[js_test:fsm_all_sharded_replication] 2015-07-09T13:55:15.926-0400 Starting JSTest jstests/concurrency/fsm_all_sharded_replication.js... ./mongo --eval MongoRunner.dataDir = "/data/db/job0/mongorunner"; TestData = new Object(); TestData.wiredTigerEngineConfigString = ""; TestData.wiredTigerCollectionConfigString = ""; TestData.storageEngine = "wiredTiger"; TestData.wiredTigerIndexConfigString = ""; TestData.noJournal = false; TestData.testName = "fsm_all_sharded_replication"; TestData.noJournalPrealloc = false; MongoRunner.dataPath = "/data/db/job0/mongorunner/" --nodb jstests/concurrency/fsm_all_sharded_replication.js [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:15.936-0400 JSTest jstests/concurrency/fsm_all_sharded_replication.js started with pid 2877. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:15.947-0400 MongoDB shell version: 3.1.6-pre- [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:15.985-0400 /data/db/job0/mongorunner/ [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.113-0400 Replica set test! [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.113-0400 ReplSetTest Starting Set [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.114-0400 ReplSetTest n is : 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.115-0400 ReplSetTest n: 0 ports: [ 31100, 31101, 31102 ] 31100 number [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.115-0400 { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.115-0400 "useHostName" : true, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.116-0400 "oplogSize" : 1024, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.116-0400 "keyFile" : undefined, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.116-0400 "port" : 31100, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.116-0400 "noprealloc" : "", [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.116-0400 "smallfiles" : "", [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.116-0400 "rest" : "", [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.116-0400 "replSet" : "test-rs0", [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.116-0400 "dbpath" : "$set-$node", [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.117-0400 "useHostname" : true, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.117-0400 "noJournalPrealloc" : undefined, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.117-0400 "pathOpts" : { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.117-0400 "testName" : "test", [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.117-0400 "shard" : 0, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.117-0400 "node" : 0, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.117-0400 "set" : "test-rs0" [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.117-0400 }, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.117-0400 "verbose" : 0, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.117-0400 "restart" : undefined [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.117-0400 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.117-0400 ReplSetTest Starting.... [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.117-0400 Resetting db path '/data/db/job0/mongorunner/test-rs0-0' [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.122-0400 2015-07-09T13:55:16.119-0400 I - [main] shell: started program (sh2878): /data/mci/src/mongod --oplogSize 1024 --port 31100 --noprealloc --smallfiles --rest --replSet test-rs0 --dbpath /data/db/job0/mongorunner/test-rs0-0 --setParameter enableTestCommands=1 --storageEngine wiredTiger [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.125-0400 2015-07-09T13:55:16.125-0400 W NETWORK [main] Failed to connect to 127.0.0.1:31100, reason: errno:61 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.136-0400 m31100| 2015-07-09T13:55:16.134-0400 I CONTROL [main] ** WARNING: --rest is specified without --httpinterface, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.136-0400 m31100| 2015-07-09T13:55:16.136-0400 I CONTROL [main] ** enabling http interface [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.136-0400 m31100| note: noprealloc may hurt performance in many applications [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.159-0400 m31100| 2015-07-09T13:55:16.158-0400 I STORAGE [initandlisten] wiredtiger_open config: create,cache_size=1G,session_max=20000,eviction=(threads_max=4),statistics=(fast),log=(enabled=true,archive=true,path=journal,compressor=snappy),file_manager=(close_idle_time=100000),checkpoint=(wait=60,log_size=2GB),statistics_log=(wait=0), [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.233-0400 m31100| 2015-07-09T13:55:16.232-0400 W STORAGE [initandlisten] Detected configuration for non-active storage engine mmapv1 when current storage engine is wiredTiger [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.233-0400 m31100| 2015-07-09T13:55:16.232-0400 I CONTROL [initandlisten] MongoDB starting : pid=2878 port=31100 dbpath=/data/db/job0/mongorunner/test-rs0-0 64-bit host=bs-osx108-8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.233-0400 m31100| 2015-07-09T13:55:16.232-0400 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.233-0400 m31100| 2015-07-09T13:55:16.233-0400 I CONTROL [initandlisten] ** NOTE: This is a development version (3.1.6-pre-) of MongoDB. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.234-0400 m31100| 2015-07-09T13:55:16.233-0400 I CONTROL [initandlisten] ** Not recommended for production. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.234-0400 m31100| 2015-07-09T13:55:16.233-0400 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.234-0400 m31100| 2015-07-09T13:55:16.233-0400 I CONTROL [initandlisten] db version v3.1.6-pre- [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.234-0400 m31100| 2015-07-09T13:55:16.233-0400 I CONTROL [initandlisten] git version: d1cb71465274bcb5f3bc962ef2740cf985f32113 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.234-0400 m31100| 2015-07-09T13:55:16.233-0400 I CONTROL [initandlisten] allocator: system [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.234-0400 m31100| 2015-07-09T13:55:16.233-0400 I CONTROL [initandlisten] options: { net: { http: { RESTInterfaceEnabled: true, enabled: true }, port: 31100 }, replication: { oplogSizeMB: 1024, replSet: "test-rs0" }, setParameter: { enableTestCommands: "1" }, storage: { dbPath: "/data/db/job0/mongorunner/test-rs0-0", engine: "wiredTiger", mmapv1: { preallocDataFiles: false, smallFiles: true } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.235-0400 m31100| 2015-07-09T13:55:16.234-0400 I NETWORK [websvr] admin web console waiting for connections on port 32100 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.273-0400 m31100| 2015-07-09T13:55:16.273-0400 I REPL [initandlisten] Did not find local voted for document at startup; NoMatchingDocument Did not find replica set lastVote document in local.replset.election [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.274-0400 m31100| 2015-07-09T13:55:16.273-0400 I REPL [initandlisten] Did not find local replica set configuration document at startup; NoMatchingDocument Did not find replica set configuration document in local.system.replset [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.315-0400 m31100| 2015-07-09T13:55:16.315-0400 I NETWORK [initandlisten] waiting for connections on port 31100 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.327-0400 m31100| 2015-07-09T13:55:16.326-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62477 #1 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.328-0400 [ connection to bs-osx108-8:31100 ] [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.328-0400 ReplSetTest n is : 1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.329-0400 ReplSetTest n: 1 ports: [ 31100, 31101, 31102 ] 31101 number [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.329-0400 { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.329-0400 "useHostName" : true, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.329-0400 "oplogSize" : 1024, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.329-0400 "keyFile" : undefined, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.330-0400 "port" : 31101, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.330-0400 "noprealloc" : "", [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.330-0400 "smallfiles" : "", [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.330-0400 "rest" : "", [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.331-0400 "replSet" : "test-rs0", [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.331-0400 "dbpath" : "$set-$node", [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.331-0400 "useHostname" : true, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.331-0400 "noJournalPrealloc" : undefined, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.332-0400 "pathOpts" : { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.332-0400 "testName" : "test", [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.332-0400 "shard" : 0, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.332-0400 "node" : 1, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.332-0400 "set" : "test-rs0" [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.332-0400 }, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.332-0400 "verbose" : 0, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.332-0400 "restart" : undefined [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.333-0400 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.333-0400 ReplSetTest Starting.... [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.333-0400 Resetting db path '/data/db/job0/mongorunner/test-rs0-1' [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.333-0400 2015-07-09T13:55:16.333-0400 I - [main] shell: started program (sh2879): /data/mci/src/mongod --oplogSize 1024 --port 31101 --noprealloc --smallfiles --rest --replSet test-rs0 --dbpath /data/db/job0/mongorunner/test-rs0-1 --setParameter enableTestCommands=1 --storageEngine wiredTiger [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.335-0400 2015-07-09T13:55:16.334-0400 W NETWORK [main] Failed to connect to 127.0.0.1:31101, reason: errno:61 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.349-0400 m31101| 2015-07-09T13:55:16.347-0400 I CONTROL [main] ** WARNING: --rest is specified without --httpinterface, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.350-0400 m31101| 2015-07-09T13:55:16.349-0400 I CONTROL [main] ** enabling http interface [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.350-0400 m31101| note: noprealloc may hurt performance in many applications [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.373-0400 m31101| 2015-07-09T13:55:16.372-0400 I STORAGE [initandlisten] wiredtiger_open config: create,cache_size=1G,session_max=20000,eviction=(threads_max=4),statistics=(fast),log=(enabled=true,archive=true,path=journal,compressor=snappy),file_manager=(close_idle_time=100000),checkpoint=(wait=60,log_size=2GB),statistics_log=(wait=0), [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.422-0400 m31101| 2015-07-09T13:55:16.422-0400 W STORAGE [initandlisten] Detected configuration for non-active storage engine mmapv1 when current storage engine is wiredTiger [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.423-0400 m31101| 2015-07-09T13:55:16.422-0400 I CONTROL [initandlisten] MongoDB starting : pid=2879 port=31101 dbpath=/data/db/job0/mongorunner/test-rs0-1 64-bit host=bs-osx108-8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.423-0400 m31101| 2015-07-09T13:55:16.422-0400 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.423-0400 m31101| 2015-07-09T13:55:16.422-0400 I CONTROL [initandlisten] ** NOTE: This is a development version (3.1.6-pre-) of MongoDB. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.423-0400 m31101| 2015-07-09T13:55:16.422-0400 I CONTROL [initandlisten] ** Not recommended for production. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.423-0400 m31101| 2015-07-09T13:55:16.422-0400 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.423-0400 m31101| 2015-07-09T13:55:16.422-0400 I CONTROL [initandlisten] db version v3.1.6-pre- [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.423-0400 m31101| 2015-07-09T13:55:16.422-0400 I CONTROL [initandlisten] git version: d1cb71465274bcb5f3bc962ef2740cf985f32113 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.424-0400 m31101| 2015-07-09T13:55:16.422-0400 I CONTROL [initandlisten] allocator: system [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.424-0400 m31101| 2015-07-09T13:55:16.422-0400 I CONTROL [initandlisten] options: { net: { http: { RESTInterfaceEnabled: true, enabled: true }, port: 31101 }, replication: { oplogSizeMB: 1024, replSet: "test-rs0" }, setParameter: { enableTestCommands: "1" }, storage: { dbPath: "/data/db/job0/mongorunner/test-rs0-1", engine: "wiredTiger", mmapv1: { preallocDataFiles: false, smallFiles: true } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.424-0400 m31101| 2015-07-09T13:55:16.424-0400 I NETWORK [websvr] admin web console waiting for connections on port 32101 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.482-0400 m31101| 2015-07-09T13:55:16.481-0400 I REPL [initandlisten] Did not find local voted for document at startup; NoMatchingDocument Did not find replica set lastVote document in local.replset.election [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.482-0400 m31101| 2015-07-09T13:55:16.481-0400 I REPL [initandlisten] Did not find local replica set configuration document at startup; NoMatchingDocument Did not find replica set configuration document in local.system.replset [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.498-0400 m31101| 2015-07-09T13:55:16.498-0400 I NETWORK [initandlisten] waiting for connections on port 31101 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.536-0400 m31101| 2015-07-09T13:55:16.536-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62479 #1 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.537-0400 [ connection to bs-osx108-8:31100, connection to bs-osx108-8:31101 ] [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.538-0400 ReplSetTest n is : 2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.538-0400 ReplSetTest n: 2 ports: [ 31100, 31101, 31102 ] 31102 number [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.540-0400 { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.540-0400 "useHostName" : true, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.540-0400 "oplogSize" : 1024, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.541-0400 "keyFile" : undefined, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.541-0400 "port" : 31102, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.541-0400 "noprealloc" : "", [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.541-0400 "smallfiles" : "", [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.541-0400 "rest" : "", [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.541-0400 "replSet" : "test-rs0", [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.541-0400 "dbpath" : "$set-$node", [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.542-0400 "useHostname" : true, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.542-0400 "noJournalPrealloc" : undefined, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.542-0400 "pathOpts" : { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.542-0400 "testName" : "test", [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.542-0400 "shard" : 0, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.542-0400 "node" : 2, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.542-0400 "set" : "test-rs0" [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.543-0400 }, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.543-0400 "verbose" : 0, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.543-0400 "restart" : undefined [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.543-0400 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.543-0400 ReplSetTest Starting.... [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.543-0400 Resetting db path '/data/db/job0/mongorunner/test-rs0-2' [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.544-0400 2015-07-09T13:55:16.541-0400 I - [main] shell: started program (sh2880): /data/mci/src/mongod --oplogSize 1024 --port 31102 --noprealloc --smallfiles --rest --replSet test-rs0 --dbpath /data/db/job0/mongorunner/test-rs0-2 --setParameter enableTestCommands=1 --storageEngine wiredTiger [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.544-0400 2015-07-09T13:55:16.542-0400 W NETWORK [main] Failed to connect to 127.0.0.1:31102, reason: errno:61 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.557-0400 m31102| 2015-07-09T13:55:16.555-0400 I CONTROL [main] ** WARNING: --rest is specified without --httpinterface, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.558-0400 m31102| 2015-07-09T13:55:16.557-0400 I CONTROL [main] ** enabling http interface [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.558-0400 m31102| note: noprealloc may hurt performance in many applications [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.582-0400 m31102| 2015-07-09T13:55:16.582-0400 I STORAGE [initandlisten] wiredtiger_open config: create,cache_size=1G,session_max=20000,eviction=(threads_max=4),statistics=(fast),log=(enabled=true,archive=true,path=journal,compressor=snappy),file_manager=(close_idle_time=100000),checkpoint=(wait=60,log_size=2GB),statistics_log=(wait=0), [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.706-0400 m31102| 2015-07-09T13:55:16.706-0400 W STORAGE [initandlisten] Detected configuration for non-active storage engine mmapv1 when current storage engine is wiredTiger [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.706-0400 m31102| 2015-07-09T13:55:16.706-0400 I CONTROL [initandlisten] MongoDB starting : pid=2880 port=31102 dbpath=/data/db/job0/mongorunner/test-rs0-2 64-bit host=bs-osx108-8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.707-0400 m31102| 2015-07-09T13:55:16.706-0400 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.707-0400 m31102| 2015-07-09T13:55:16.706-0400 I CONTROL [initandlisten] ** NOTE: This is a development version (3.1.6-pre-) of MongoDB. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.707-0400 m31102| 2015-07-09T13:55:16.706-0400 I CONTROL [initandlisten] ** Not recommended for production. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.707-0400 m31102| 2015-07-09T13:55:16.706-0400 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.707-0400 m31102| 2015-07-09T13:55:16.706-0400 I CONTROL [initandlisten] db version v3.1.6-pre- [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.708-0400 m31102| 2015-07-09T13:55:16.706-0400 I CONTROL [initandlisten] git version: d1cb71465274bcb5f3bc962ef2740cf985f32113 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.708-0400 m31102| 2015-07-09T13:55:16.706-0400 I CONTROL [initandlisten] allocator: system [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.708-0400 m31102| 2015-07-09T13:55:16.706-0400 I CONTROL [initandlisten] options: { net: { http: { RESTInterfaceEnabled: true, enabled: true }, port: 31102 }, replication: { oplogSizeMB: 1024, replSet: "test-rs0" }, setParameter: { enableTestCommands: "1" }, storage: { dbPath: "/data/db/job0/mongorunner/test-rs0-2", engine: "wiredTiger", mmapv1: { preallocDataFiles: false, smallFiles: true } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.708-0400 m31102| 2015-07-09T13:55:16.707-0400 I NETWORK [websvr] admin web console waiting for connections on port 32102 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.741-0400 m31102| 2015-07-09T13:55:16.741-0400 I REPL [initandlisten] Did not find local voted for document at startup; NoMatchingDocument Did not find replica set lastVote document in local.replset.election [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.742-0400 m31102| 2015-07-09T13:55:16.741-0400 I REPL [initandlisten] Did not find local replica set configuration document at startup; NoMatchingDocument Did not find replica set configuration document in local.system.replset [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.778-0400 m31102| 2015-07-09T13:55:16.777-0400 I NETWORK [initandlisten] waiting for connections on port 31102 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.840-0400 m31102| 2015-07-09T13:55:16.840-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62481 #1 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.841-0400 [ [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.841-0400 connection to bs-osx108-8:31100, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.841-0400 connection to bs-osx108-8:31101, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.841-0400 connection to bs-osx108-8:31102 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.841-0400 ] [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.842-0400 { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.842-0400 "replSetInitiate" : { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.842-0400 "_id" : "test-rs0", [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.842-0400 "members" : [ [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.842-0400 { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.842-0400 "_id" : 0, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.842-0400 "host" : "bs-osx108-8:31100" [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.843-0400 }, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.843-0400 { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.843-0400 "_id" : 1, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.843-0400 "host" : "bs-osx108-8:31101" [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.843-0400 }, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.843-0400 { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.843-0400 "_id" : 2, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.843-0400 "host" : "bs-osx108-8:31102" [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.843-0400 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.843-0400 ] [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.843-0400 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.843-0400 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.843-0400 m31100| 2015-07-09T13:55:16.843-0400 I REPL [conn1] replSetInitiate admin command received from client [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.844-0400 m31101| 2015-07-09T13:55:16.844-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62482 #2 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.844-0400 m31101| 2015-07-09T13:55:16.844-0400 I NETWORK [conn2] end connection 127.0.0.1:62482 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.844-0400 m31102| 2015-07-09T13:55:16.844-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62483 #2 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.845-0400 m31100| 2015-07-09T13:55:16.845-0400 I REPL [conn1] replSetInitiate config object with 3 members parses ok [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.845-0400 m31102| 2015-07-09T13:55:16.845-0400 I NETWORK [conn2] end connection 127.0.0.1:62483 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.845-0400 m31101| 2015-07-09T13:55:16.845-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62484 #3 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.846-0400 m31102| 2015-07-09T13:55:16.846-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62485 #3 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.847-0400 m31100| 2015-07-09T13:55:16.847-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62486 #2 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.848-0400 m31100| 2015-07-09T13:55:16.848-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62487 #3 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.860-0400 m31100| 2015-07-09T13:55:16.858-0400 I REPL [ReplicationExecutor] New replica set config in use: { _id: "test-rs0", version: 1, members: [ { _id: 0, host: "bs-osx108-8:31100", arbiterOnly: false, buildIndexes: true, hidden: false, priority: 1.0, tags: {}, slaveDelay: 0, votes: 1 }, { _id: 1, host: "bs-osx108-8:31101", arbiterOnly: false, buildIndexes: true, hidden: false, priority: 1.0, tags: {}, slaveDelay: 0, votes: 1 }, { _id: 2, host: "bs-osx108-8:31102", arbiterOnly: false, buildIndexes: true, hidden: false, priority: 1.0, tags: {}, slaveDelay: 0, votes: 1 } ], settings: { chainingAllowed: true, heartbeatTimeoutSecs: 10, getLastErrorModes: {}, getLastErrorDefaults: { w: 1, wtimeout: 0 }, protocolVersion: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.860-0400 m31100| 2015-07-09T13:55:16.858-0400 I REPL [ReplicationExecutor] This node is bs-osx108-8:31100 in the config [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.860-0400 m31100| 2015-07-09T13:55:16.858-0400 I REPL [ReplicationExecutor] transition to STARTUP2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.861-0400 m31100| 2015-07-09T13:55:16.858-0400 I REPL [conn1] ****** [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.861-0400 m31100| 2015-07-09T13:55:16.858-0400 I REPL [conn1] creating replication oplog of size: 1024MB... [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.861-0400 m31100| 2015-07-09T13:55:16.859-0400 I REPL [ReplicationExecutor] Member bs-osx108-8:31101 is now in state STARTUP [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.861-0400 m31100| 2015-07-09T13:55:16.861-0400 I REPL [ReplicationExecutor] Member bs-osx108-8:31102 is now in state STARTUP [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:16.865-0400 m31100| 2015-07-09T13:55:16.865-0400 I STORAGE [conn1] Starting WiredTigerRecordStoreThread local.oplog.rs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:17.090-0400 m31100| 2015-07-09T13:55:17.089-0400 I REPL [conn1] ****** [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:17.091-0400 m31100| 2015-07-09T13:55:17.090-0400 I REPL [conn1] Starting replication applier threads [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:17.093-0400 m31100| 2015-07-09T13:55:17.090-0400 I COMMAND [conn1] command local.oplog.rs command: replSetInitiate { replSetInitiate: { _id: "test-rs0", members: [ { _id: 0.0, host: "bs-osx108-8:31100" }, { _id: 1.0, host: "bs-osx108-8:31101" }, { _id: 2.0, host: "bs-osx108-8:31102" } ] } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:22 locks:{ Global: { acquireCount: { r: 5, w: 3, W: 2 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 358 } }, Database: { acquireCount: { w: 1, W: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 247ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:17.093-0400 m31100| 2015-07-09T13:55:17.091-0400 I REPL [ReplicationExecutor] transition to RECOVERING [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:17.093-0400 m31100| 2015-07-09T13:55:17.093-0400 I REPL [ReplicationExecutor] transition to SECONDARY [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:18.852-0400 m31100| 2015-07-09T13:55:18.852-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62488 #4 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:18.853-0400 m31100| 2015-07-09T13:55:18.852-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62489 #5 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:18.853-0400 m31100| 2015-07-09T13:55:18.853-0400 I NETWORK [conn4] end connection 127.0.0.1:62488 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:18.853-0400 m31100| 2015-07-09T13:55:18.853-0400 I NETWORK [conn5] end connection 127.0.0.1:62489 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:18.854-0400 m31101| 2015-07-09T13:55:18.853-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62490 #4 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:18.854-0400 m31102| 2015-07-09T13:55:18.854-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62491 #4 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:18.855-0400 m31101| 2015-07-09T13:55:18.854-0400 I NETWORK [conn4] end connection 127.0.0.1:62490 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:18.855-0400 m31102| 2015-07-09T13:55:18.855-0400 I NETWORK [conn4] end connection 127.0.0.1:62491 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:18.862-0400 m31100| 2015-07-09T13:55:18.861-0400 I REPL [ReplicationExecutor] Standing for election [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:18.862-0400 m31100| 2015-07-09T13:55:18.862-0400 I REPL [ReplicationExecutor] not electing self, we could not contact enough voting members [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:18.862-0400 m31100| 2015-07-09T13:55:18.862-0400 I REPL [ReplicationExecutor] Standing for election [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:18.863-0400 m31100| 2015-07-09T13:55:18.863-0400 I REPL [ReplicationExecutor] not electing self, we could not contact enough voting members [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:18.872-0400 m31102| 2015-07-09T13:55:18.871-0400 I REPL [replExecDBWorker-2] Starting replication applier threads [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:18.872-0400 m31102| 2015-07-09T13:55:18.872-0400 W REPL [rsSync] did not receive a valid config yet, sleeping 5 seconds [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:18.872-0400 m31101| 2015-07-09T13:55:18.872-0400 I REPL [replExecDBWorker-0] Starting replication applier threads [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:18.873-0400 m31102| 2015-07-09T13:55:18.872-0400 I REPL [ReplicationExecutor] New replica set config in use: { _id: "test-rs0", version: 1, members: [ { _id: 0, host: "bs-osx108-8:31100", arbiterOnly: false, buildIndexes: true, hidden: false, priority: 1.0, tags: {}, slaveDelay: 0, votes: 1 }, { _id: 1, host: "bs-osx108-8:31101", arbiterOnly: false, buildIndexes: true, hidden: false, priority: 1.0, tags: {}, slaveDelay: 0, votes: 1 }, { _id: 2, host: "bs-osx108-8:31102", arbiterOnly: false, buildIndexes: true, hidden: false, priority: 1.0, tags: {}, slaveDelay: 0, votes: 1 } ], settings: { chainingAllowed: true, heartbeatTimeoutSecs: 10, getLastErrorModes: {}, getLastErrorDefaults: { w: 1, wtimeout: 0 }, protocolVersion: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:18.873-0400 m31102| 2015-07-09T13:55:18.872-0400 I REPL [ReplicationExecutor] This node is bs-osx108-8:31102 in the config [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:18.873-0400 m31102| 2015-07-09T13:55:18.872-0400 I REPL [ReplicationExecutor] transition to STARTUP2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:18.874-0400 m31101| 2015-07-09T13:55:18.872-0400 W REPL [rsSync] did not receive a valid config yet, sleeping 5 seconds [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:18.874-0400 m31101| 2015-07-09T13:55:18.872-0400 I REPL [ReplicationExecutor] New replica set config in use: { _id: "test-rs0", version: 1, members: [ { _id: 0, host: "bs-osx108-8:31100", arbiterOnly: false, buildIndexes: true, hidden: false, priority: 1.0, tags: {}, slaveDelay: 0, votes: 1 }, { _id: 1, host: "bs-osx108-8:31101", arbiterOnly: false, buildIndexes: true, hidden: false, priority: 1.0, tags: {}, slaveDelay: 0, votes: 1 }, { _id: 2, host: "bs-osx108-8:31102", arbiterOnly: false, buildIndexes: true, hidden: false, priority: 1.0, tags: {}, slaveDelay: 0, votes: 1 } ], settings: { chainingAllowed: true, heartbeatTimeoutSecs: 10, getLastErrorModes: {}, getLastErrorDefaults: { w: 1, wtimeout: 0 }, protocolVersion: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:18.874-0400 m31101| 2015-07-09T13:55:18.872-0400 I REPL [ReplicationExecutor] This node is bs-osx108-8:31101 in the config [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:18.874-0400 m31101| 2015-07-09T13:55:18.872-0400 I REPL [ReplicationExecutor] transition to STARTUP2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:18.875-0400 m31101| 2015-07-09T13:55:18.873-0400 I REPL [ReplicationExecutor] Member bs-osx108-8:31100 is now in state SECONDARY [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:18.875-0400 m31102| 2015-07-09T13:55:18.873-0400 I REPL [ReplicationExecutor] Member bs-osx108-8:31100 is now in state SECONDARY [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:18.875-0400 m31102| 2015-07-09T13:55:18.873-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62493 #5 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:18.875-0400 m31101| 2015-07-09T13:55:18.873-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62492 #5 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:18.875-0400 m31101| 2015-07-09T13:55:18.874-0400 I REPL [ReplicationExecutor] Member bs-osx108-8:31102 is now in state STARTUP2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:18.875-0400 m31102| 2015-07-09T13:55:18.874-0400 I REPL [ReplicationExecutor] Member bs-osx108-8:31101 is now in state STARTUP2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:20.864-0400 m31100| 2015-07-09T13:55:20.864-0400 I REPL [ReplicationExecutor] Member bs-osx108-8:31101 is now in state STARTUP2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:20.865-0400 m31100| 2015-07-09T13:55:20.864-0400 I REPL [ReplicationExecutor] Standing for election [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:20.865-0400 m31100| 2015-07-09T13:55:20.864-0400 I REPL [ReplicationExecutor] Member bs-osx108-8:31102 is now in state STARTUP2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:20.865-0400 m31100| 2015-07-09T13:55:20.865-0400 I REPL [ReplicationExecutor] running for election [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:20.866-0400 m31102| 2015-07-09T13:55:20.866-0400 I REPL [ReplicationExecutor] replSetElect voting yea for bs-osx108-8:31100 (0) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:20.866-0400 m31101| 2015-07-09T13:55:20.866-0400 I REPL [ReplicationExecutor] replSetElect voting yea for bs-osx108-8:31100 (0) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:20.866-0400 m31100| 2015-07-09T13:55:20.866-0400 I REPL [ReplicationExecutor] received vote: 1 votes from bs-osx108-8:31102 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:20.866-0400 m31100| 2015-07-09T13:55:20.866-0400 I REPL [ReplicationExecutor] election succeeded, assuming primary role [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:20.866-0400 m31100| 2015-07-09T13:55:20.866-0400 I REPL [ReplicationExecutor] transition to PRIMARY [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:20.874-0400 m31102| 2015-07-09T13:55:20.874-0400 I REPL [ReplicationExecutor] Member bs-osx108-8:31100 is now in state PRIMARY [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:20.875-0400 m31101| 2015-07-09T13:55:20.874-0400 I REPL [ReplicationExecutor] Member bs-osx108-8:31100 is now in state PRIMARY [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:21.097-0400 m31100| 2015-07-09T13:55:21.096-0400 I REPL [rsSync] transition to primary complete; database writes are now permitted [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:23.873-0400 m31102| 2015-07-09T13:55:23.873-0400 I REPL [rsSync] ****** [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:23.874-0400 m31102| 2015-07-09T13:55:23.873-0400 I REPL [rsSync] creating replication oplog of size: 1024MB... [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:23.874-0400 m31101| 2015-07-09T13:55:23.873-0400 I REPL [rsSync] ****** [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:23.874-0400 m31101| 2015-07-09T13:55:23.873-0400 I REPL [rsSync] creating replication oplog of size: 1024MB... [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:23.879-0400 m31102| 2015-07-09T13:55:23.879-0400 I STORAGE [rsSync] Starting WiredTigerRecordStoreThread local.oplog.rs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:23.880-0400 m31101| 2015-07-09T13:55:23.880-0400 I STORAGE [rsSync] Starting WiredTigerRecordStoreThread local.oplog.rs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.290-0400 m31102| 2015-07-09T13:55:24.289-0400 I REPL [rsSync] ****** [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.290-0400 m31102| 2015-07-09T13:55:24.289-0400 I REPL [rsSync] initial sync pending [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.295-0400 m31101| 2015-07-09T13:55:24.295-0400 I REPL [rsSync] ****** [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.295-0400 m31101| 2015-07-09T13:55:24.295-0400 I REPL [rsSync] initial sync pending [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.311-0400 m31102| 2015-07-09T13:55:24.311-0400 I REPL [ReplicationExecutor] syncing from: bs-osx108-8:31100 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.311-0400 m31101| 2015-07-09T13:55:24.311-0400 I REPL [ReplicationExecutor] syncing from: bs-osx108-8:31100 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.313-0400 m31100| 2015-07-09T13:55:24.312-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62494 #6 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.313-0400 m31100| 2015-07-09T13:55:24.312-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62495 #7 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.315-0400 m31101| 2015-07-09T13:55:24.315-0400 I REPL [rsSync] initial sync drop all databases [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.315-0400 m31101| 2015-07-09T13:55:24.315-0400 I STORAGE [rsSync] dropAllDatabasesExceptLocal 1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.316-0400 m31101| 2015-07-09T13:55:24.315-0400 I REPL [rsSync] initial sync clone all databases [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.316-0400 m31102| 2015-07-09T13:55:24.315-0400 I REPL [rsSync] initial sync drop all databases [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.316-0400 m31102| 2015-07-09T13:55:24.315-0400 I STORAGE [rsSync] dropAllDatabasesExceptLocal 1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.316-0400 m31102| 2015-07-09T13:55:24.315-0400 I REPL [rsSync] initial sync clone all databases [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.316-0400 m31101| 2015-07-09T13:55:24.316-0400 I REPL [rsSync] initial sync data copy, starting syncup [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.316-0400 m31102| 2015-07-09T13:55:24.316-0400 I REPL [rsSync] initial sync data copy, starting syncup [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.316-0400 m31102| 2015-07-09T13:55:24.316-0400 I REPL [rsSync] oplog sync 1 of 3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.317-0400 m31101| 2015-07-09T13:55:24.316-0400 I REPL [rsSync] oplog sync 1 of 3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.317-0400 m31101| 2015-07-09T13:55:24.316-0400 I REPL [rsSync] oplog sync 2 of 3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.317-0400 m31102| 2015-07-09T13:55:24.316-0400 I REPL [rsSync] oplog sync 2 of 3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.317-0400 m31101| 2015-07-09T13:55:24.317-0400 I REPL [rsSync] initial sync building indexes [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.317-0400 m31101| 2015-07-09T13:55:24.317-0400 I REPL [rsSync] oplog sync 3 of 3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.318-0400 m31102| 2015-07-09T13:55:24.317-0400 I REPL [rsSync] initial sync building indexes [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.318-0400 m31102| 2015-07-09T13:55:24.317-0400 I REPL [rsSync] oplog sync 3 of 3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.318-0400 m31101| 2015-07-09T13:55:24.318-0400 I REPL [rsSync] initial sync finishing up [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.318-0400 m31101| 2015-07-09T13:55:24.318-0400 I REPL [rsSync] set minValid=(term: -1, timestamp: Jul 9 13:55:17:1) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.319-0400 m31102| 2015-07-09T13:55:24.318-0400 I REPL [rsSync] initial sync finishing up [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.319-0400 m31102| 2015-07-09T13:55:24.318-0400 I REPL [rsSync] set minValid=(term: -1, timestamp: Jul 9 13:55:17:1) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.319-0400 m31101| 2015-07-09T13:55:24.319-0400 I REPL [rsSync] initial sync done [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.319-0400 m31102| 2015-07-09T13:55:24.319-0400 I REPL [rsSync] initial sync done [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.322-0400 m31102| 2015-07-09T13:55:24.321-0400 I REPL [ReplicationExecutor] transition to RECOVERING [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.322-0400 m31100| 2015-07-09T13:55:24.321-0400 I NETWORK [conn6] end connection 127.0.0.1:62494 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.322-0400 m31101| 2015-07-09T13:55:24.321-0400 I REPL [ReplicationExecutor] transition to RECOVERING [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.322-0400 m31100| 2015-07-09T13:55:24.321-0400 I NETWORK [conn7] end connection 127.0.0.1:62495 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.322-0400 m31102| 2015-07-09T13:55:24.322-0400 I REPL [ReplicationExecutor] transition to SECONDARY [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.324-0400 m31101| 2015-07-09T13:55:24.323-0400 I REPL [ReplicationExecutor] transition to SECONDARY [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.465-0400 Replica set test! [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.465-0400 ReplSetTest Starting Set [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.465-0400 ReplSetTest n is : 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.465-0400 ReplSetTest n: 0 ports: [ 31200, 31201, 31202 ] 31200 number [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.467-0400 { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.467-0400 "useHostName" : true, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.467-0400 "oplogSize" : 1024, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.467-0400 "keyFile" : undefined, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.467-0400 "port" : 31200, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.468-0400 "noprealloc" : "", [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.468-0400 "smallfiles" : "", [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.468-0400 "rest" : "", [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.468-0400 "replSet" : "test-rs1", [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.469-0400 "dbpath" : "$set-$node", [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.469-0400 "useHostname" : true, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.469-0400 "noJournalPrealloc" : undefined, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.469-0400 "pathOpts" : { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.469-0400 "testName" : "test", [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.469-0400 "shard" : 1, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.469-0400 "node" : 0, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.469-0400 "set" : "test-rs1" [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.469-0400 }, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.469-0400 "verbose" : 0, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.470-0400 "restart" : undefined [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.470-0400 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.470-0400 ReplSetTest Starting.... [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.470-0400 Resetting db path '/data/db/job0/mongorunner/test-rs1-0' [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.470-0400 2015-07-09T13:55:24.470-0400 I - [main] shell: started program (sh2883): /data/mci/src/mongod --oplogSize 1024 --port 31200 --noprealloc --smallfiles --rest --replSet test-rs1 --dbpath /data/db/job0/mongorunner/test-rs1-0 --setParameter enableTestCommands=1 --storageEngine wiredTiger [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.471-0400 2015-07-09T13:55:24.471-0400 W NETWORK [main] Failed to connect to 127.0.0.1:31200, reason: errno:61 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.484-0400 m31200| 2015-07-09T13:55:24.482-0400 I CONTROL [main] ** WARNING: --rest is specified without --httpinterface, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.484-0400 m31200| 2015-07-09T13:55:24.484-0400 I CONTROL [main] ** enabling http interface [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.485-0400 m31200| note: noprealloc may hurt performance in many applications [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.508-0400 m31200| 2015-07-09T13:55:24.507-0400 I STORAGE [initandlisten] wiredtiger_open config: create,cache_size=1G,session_max=20000,eviction=(threads_max=4),statistics=(fast),log=(enabled=true,archive=true,path=journal,compressor=snappy),file_manager=(close_idle_time=100000),checkpoint=(wait=60,log_size=2GB),statistics_log=(wait=0), [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.557-0400 m31200| 2015-07-09T13:55:24.556-0400 W STORAGE [initandlisten] Detected configuration for non-active storage engine mmapv1 when current storage engine is wiredTiger [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.557-0400 m31200| 2015-07-09T13:55:24.557-0400 I CONTROL [initandlisten] MongoDB starting : pid=2883 port=31200 dbpath=/data/db/job0/mongorunner/test-rs1-0 64-bit host=bs-osx108-8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.557-0400 m31200| 2015-07-09T13:55:24.557-0400 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.557-0400 m31200| 2015-07-09T13:55:24.557-0400 I CONTROL [initandlisten] ** NOTE: This is a development version (3.1.6-pre-) of MongoDB. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.558-0400 m31200| 2015-07-09T13:55:24.557-0400 I CONTROL [initandlisten] ** Not recommended for production. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.558-0400 m31200| 2015-07-09T13:55:24.557-0400 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.558-0400 m31200| 2015-07-09T13:55:24.557-0400 I CONTROL [initandlisten] db version v3.1.6-pre- [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.558-0400 m31200| 2015-07-09T13:55:24.557-0400 I CONTROL [initandlisten] git version: d1cb71465274bcb5f3bc962ef2740cf985f32113 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.558-0400 m31200| 2015-07-09T13:55:24.557-0400 I CONTROL [initandlisten] allocator: system [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.559-0400 m31200| 2015-07-09T13:55:24.557-0400 I CONTROL [initandlisten] options: { net: { http: { RESTInterfaceEnabled: true, enabled: true }, port: 31200 }, replication: { oplogSizeMB: 1024, replSet: "test-rs1" }, setParameter: { enableTestCommands: "1" }, storage: { dbPath: "/data/db/job0/mongorunner/test-rs1-0", engine: "wiredTiger", mmapv1: { preallocDataFiles: false, smallFiles: true } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.559-0400 m31200| 2015-07-09T13:55:24.557-0400 I NETWORK [websvr] admin web console waiting for connections on port 32200 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.594-0400 m31200| 2015-07-09T13:55:24.594-0400 I REPL [initandlisten] Did not find local voted for document at startup; NoMatchingDocument Did not find replica set lastVote document in local.replset.election [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.595-0400 m31200| 2015-07-09T13:55:24.594-0400 I REPL [initandlisten] Did not find local replica set configuration document at startup; NoMatchingDocument Did not find replica set configuration document in local.system.replset [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.629-0400 m31200| 2015-07-09T13:55:24.629-0400 I NETWORK [initandlisten] waiting for connections on port 31200 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.673-0400 m31200| 2015-07-09T13:55:24.673-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62497 #1 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.675-0400 [ connection to bs-osx108-8:31200 ] [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.675-0400 ReplSetTest n is : 1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.675-0400 ReplSetTest n: 1 ports: [ 31200, 31201, 31202 ] 31201 number [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.676-0400 { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.676-0400 "useHostName" : true, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.676-0400 "oplogSize" : 1024, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.676-0400 "keyFile" : undefined, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.676-0400 "port" : 31201, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.677-0400 "noprealloc" : "", [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.677-0400 "smallfiles" : "", [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.677-0400 "rest" : "", [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.677-0400 "replSet" : "test-rs1", [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.677-0400 "dbpath" : "$set-$node", [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.677-0400 "useHostname" : true, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.677-0400 "noJournalPrealloc" : undefined, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.678-0400 "pathOpts" : { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.678-0400 "testName" : "test", [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.678-0400 "shard" : 1, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.678-0400 "node" : 1, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.678-0400 "set" : "test-rs1" [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.678-0400 }, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.679-0400 "verbose" : 0, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.679-0400 "restart" : undefined [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.679-0400 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.679-0400 ReplSetTest Starting.... [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.679-0400 Resetting db path '/data/db/job0/mongorunner/test-rs1-1' [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.681-0400 2015-07-09T13:55:24.680-0400 I - [main] shell: started program (sh2884): /data/mci/src/mongod --oplogSize 1024 --port 31201 --noprealloc --smallfiles --rest --replSet test-rs1 --dbpath /data/db/job0/mongorunner/test-rs1-1 --setParameter enableTestCommands=1 --storageEngine wiredTiger [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.681-0400 2015-07-09T13:55:24.681-0400 W NETWORK [main] Failed to connect to 127.0.0.1:31201, reason: errno:61 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.694-0400 m31201| 2015-07-09T13:55:24.692-0400 I CONTROL [main] ** WARNING: --rest is specified without --httpinterface, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.694-0400 m31201| 2015-07-09T13:55:24.694-0400 I CONTROL [main] ** enabling http interface [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.695-0400 m31201| note: noprealloc may hurt performance in many applications [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.719-0400 m31201| 2015-07-09T13:55:24.718-0400 I STORAGE [initandlisten] wiredtiger_open config: create,cache_size=1G,session_max=20000,eviction=(threads_max=4),statistics=(fast),log=(enabled=true,archive=true,path=journal,compressor=snappy),file_manager=(close_idle_time=100000),checkpoint=(wait=60,log_size=2GB),statistics_log=(wait=0), [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.768-0400 m31201| 2015-07-09T13:55:24.767-0400 W STORAGE [initandlisten] Detected configuration for non-active storage engine mmapv1 when current storage engine is wiredTiger [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.769-0400 m31201| 2015-07-09T13:55:24.768-0400 I CONTROL [initandlisten] MongoDB starting : pid=2884 port=31201 dbpath=/data/db/job0/mongorunner/test-rs1-1 64-bit host=bs-osx108-8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.769-0400 m31201| 2015-07-09T13:55:24.768-0400 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.769-0400 m31201| 2015-07-09T13:55:24.768-0400 I CONTROL [initandlisten] ** NOTE: This is a development version (3.1.6-pre-) of MongoDB. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.769-0400 m31201| 2015-07-09T13:55:24.768-0400 I CONTROL [initandlisten] ** Not recommended for production. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.769-0400 m31201| 2015-07-09T13:55:24.768-0400 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.770-0400 m31201| 2015-07-09T13:55:24.768-0400 I CONTROL [initandlisten] db version v3.1.6-pre- [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.770-0400 m31201| 2015-07-09T13:55:24.768-0400 I CONTROL [initandlisten] git version: d1cb71465274bcb5f3bc962ef2740cf985f32113 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.770-0400 m31201| 2015-07-09T13:55:24.768-0400 I CONTROL [initandlisten] allocator: system [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.770-0400 m31201| 2015-07-09T13:55:24.768-0400 I CONTROL [initandlisten] options: { net: { http: { RESTInterfaceEnabled: true, enabled: true }, port: 31201 }, replication: { oplogSizeMB: 1024, replSet: "test-rs1" }, setParameter: { enableTestCommands: "1" }, storage: { dbPath: "/data/db/job0/mongorunner/test-rs1-1", engine: "wiredTiger", mmapv1: { preallocDataFiles: false, smallFiles: true } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.770-0400 m31201| 2015-07-09T13:55:24.769-0400 I NETWORK [websvr] admin web console waiting for connections on port 32201 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.809-0400 m31201| 2015-07-09T13:55:24.809-0400 I REPL [initandlisten] Did not find local voted for document at startup; NoMatchingDocument Did not find replica set lastVote document in local.replset.election [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.810-0400 m31201| 2015-07-09T13:55:24.809-0400 I REPL [initandlisten] Did not find local replica set configuration document at startup; NoMatchingDocument Did not find replica set configuration document in local.system.replset [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.854-0400 m31201| 2015-07-09T13:55:24.853-0400 I NETWORK [initandlisten] waiting for connections on port 31201 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.870-0400 m31100| 2015-07-09T13:55:24.870-0400 I REPL [ReplicationExecutor] Member bs-osx108-8:31102 is now in state SECONDARY [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.870-0400 m31100| 2015-07-09T13:55:24.870-0400 I REPL [ReplicationExecutor] Member bs-osx108-8:31101 is now in state SECONDARY [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.878-0400 m31102| 2015-07-09T13:55:24.877-0400 I REPL [ReplicationExecutor] Member bs-osx108-8:31101 is now in state SECONDARY [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.878-0400 m31102| 2015-07-09T13:55:24.878-0400 I REPL [ReplicationExecutor] could not find member to sync from [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.878-0400 m31101| 2015-07-09T13:55:24.878-0400 I REPL [ReplicationExecutor] Member bs-osx108-8:31102 is now in state SECONDARY [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.879-0400 m31101| 2015-07-09T13:55:24.878-0400 I REPL [ReplicationExecutor] could not find member to sync from [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.884-0400 m31201| 2015-07-09T13:55:24.883-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62499 #1 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.885-0400 [ connection to bs-osx108-8:31200, connection to bs-osx108-8:31201 ] [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.885-0400 ReplSetTest n is : 2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.886-0400 ReplSetTest n: 2 ports: [ 31200, 31201, 31202 ] 31202 number [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.886-0400 { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.886-0400 "useHostName" : true, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.886-0400 "oplogSize" : 1024, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.886-0400 "keyFile" : undefined, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.886-0400 "port" : 31202, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.887-0400 "noprealloc" : "", [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.887-0400 "smallfiles" : "", [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.887-0400 "rest" : "", [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.887-0400 "replSet" : "test-rs1", [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.887-0400 "dbpath" : "$set-$node", [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.887-0400 "useHostname" : true, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.888-0400 "noJournalPrealloc" : undefined, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.888-0400 "pathOpts" : { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.888-0400 "testName" : "test", [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.888-0400 "shard" : 1, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.888-0400 "node" : 2, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.888-0400 "set" : "test-rs1" [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.888-0400 }, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.888-0400 "verbose" : 0, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.889-0400 "restart" : undefined [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.889-0400 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.889-0400 ReplSetTest Starting.... [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.889-0400 Resetting db path '/data/db/job0/mongorunner/test-rs1-2' [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.890-0400 2015-07-09T13:55:24.889-0400 I - [main] shell: started program (sh2885): /data/mci/src/mongod --oplogSize 1024 --port 31202 --noprealloc --smallfiles --rest --replSet test-rs1 --dbpath /data/db/job0/mongorunner/test-rs1-2 --setParameter enableTestCommands=1 --storageEngine wiredTiger [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.891-0400 2015-07-09T13:55:24.891-0400 W NETWORK [main] Failed to connect to 127.0.0.1:31202, reason: errno:61 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.907-0400 m31202| 2015-07-09T13:55:24.905-0400 I CONTROL [main] ** WARNING: --rest is specified without --httpinterface, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.907-0400 m31202| 2015-07-09T13:55:24.907-0400 I CONTROL [main] ** enabling http interface [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.908-0400 m31202| note: noprealloc may hurt performance in many applications [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.932-0400 m31202| 2015-07-09T13:55:24.932-0400 I STORAGE [initandlisten] wiredtiger_open config: create,cache_size=1G,session_max=20000,eviction=(threads_max=4),statistics=(fast),log=(enabled=true,archive=true,path=journal,compressor=snappy),file_manager=(close_idle_time=100000),checkpoint=(wait=60,log_size=2GB),statistics_log=(wait=0), [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.986-0400 m31202| 2015-07-09T13:55:24.986-0400 W STORAGE [initandlisten] Detected configuration for non-active storage engine mmapv1 when current storage engine is wiredTiger [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.987-0400 m31202| 2015-07-09T13:55:24.986-0400 I CONTROL [initandlisten] MongoDB starting : pid=2885 port=31202 dbpath=/data/db/job0/mongorunner/test-rs1-2 64-bit host=bs-osx108-8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.987-0400 m31202| 2015-07-09T13:55:24.986-0400 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.987-0400 m31202| 2015-07-09T13:55:24.986-0400 I CONTROL [initandlisten] ** NOTE: This is a development version (3.1.6-pre-) of MongoDB. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.987-0400 m31202| 2015-07-09T13:55:24.986-0400 I CONTROL [initandlisten] ** Not recommended for production. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.987-0400 m31202| 2015-07-09T13:55:24.986-0400 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.987-0400 m31202| 2015-07-09T13:55:24.986-0400 I CONTROL [initandlisten] db version v3.1.6-pre- [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.987-0400 m31202| 2015-07-09T13:55:24.986-0400 I CONTROL [initandlisten] git version: d1cb71465274bcb5f3bc962ef2740cf985f32113 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.987-0400 m31202| 2015-07-09T13:55:24.986-0400 I CONTROL [initandlisten] allocator: system [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.988-0400 m31202| 2015-07-09T13:55:24.986-0400 I CONTROL [initandlisten] options: { net: { http: { RESTInterfaceEnabled: true, enabled: true }, port: 31202 }, replication: { oplogSizeMB: 1024, replSet: "test-rs1" }, setParameter: { enableTestCommands: "1" }, storage: { dbPath: "/data/db/job0/mongorunner/test-rs1-2", engine: "wiredTiger", mmapv1: { preallocDataFiles: false, smallFiles: true } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:24.988-0400 m31202| 2015-07-09T13:55:24.987-0400 I NETWORK [websvr] admin web console waiting for connections on port 32202 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:25.024-0400 m31202| 2015-07-09T13:55:25.024-0400 I REPL [initandlisten] Did not find local voted for document at startup; NoMatchingDocument Did not find replica set lastVote document in local.replset.election [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:25.025-0400 m31202| 2015-07-09T13:55:25.024-0400 I REPL [initandlisten] Did not find local replica set configuration document at startup; NoMatchingDocument Did not find replica set configuration document in local.system.replset [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:25.060-0400 m31202| 2015-07-09T13:55:25.059-0400 I NETWORK [initandlisten] waiting for connections on port 31202 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:25.094-0400 m31202| 2015-07-09T13:55:25.094-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62501 #1 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:25.096-0400 [ [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:25.096-0400 connection to bs-osx108-8:31200, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:25.096-0400 connection to bs-osx108-8:31201, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:25.096-0400 connection to bs-osx108-8:31202 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:25.096-0400 ] [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:25.097-0400 { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:25.097-0400 "replSetInitiate" : { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:25.097-0400 "_id" : "test-rs1", [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:25.097-0400 "members" : [ [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:25.097-0400 { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:25.097-0400 "_id" : 0, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:25.097-0400 "host" : "bs-osx108-8:31200" [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:25.097-0400 }, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:25.097-0400 { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:25.098-0400 "_id" : 1, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:25.098-0400 "host" : "bs-osx108-8:31201" [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:25.098-0400 }, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:25.098-0400 { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:25.098-0400 "_id" : 2, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:25.098-0400 "host" : "bs-osx108-8:31202" [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:25.098-0400 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:25.098-0400 ] [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:25.098-0400 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:25.098-0400 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:25.098-0400 m31200| 2015-07-09T13:55:25.097-0400 I REPL [conn1] replSetInitiate admin command received from client [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:25.102-0400 m31201| 2015-07-09T13:55:25.101-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62502 #2 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:25.103-0400 m31201| 2015-07-09T13:55:25.103-0400 I NETWORK [conn2] end connection 127.0.0.1:62502 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:25.103-0400 m31202| 2015-07-09T13:55:25.103-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62503 #2 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:25.104-0400 m31200| 2015-07-09T13:55:25.104-0400 I REPL [conn1] replSetInitiate config object with 3 members parses ok [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:25.105-0400 m31202| 2015-07-09T13:55:25.104-0400 I NETWORK [conn2] end connection 127.0.0.1:62503 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:25.106-0400 m31201| 2015-07-09T13:55:25.106-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62505 #3 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:25.107-0400 m31202| 2015-07-09T13:55:25.106-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62504 #3 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:25.110-0400 m31200| 2015-07-09T13:55:25.109-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62506 #2 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:25.110-0400 m31200| 2015-07-09T13:55:25.110-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62507 #3 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:25.124-0400 m31200| 2015-07-09T13:55:25.123-0400 I REPL [ReplicationExecutor] New replica set config in use: { _id: "test-rs1", version: 1, members: [ { _id: 0, host: "bs-osx108-8:31200", arbiterOnly: false, buildIndexes: true, hidden: false, priority: 1.0, tags: {}, slaveDelay: 0, votes: 1 }, { _id: 1, host: "bs-osx108-8:31201", arbiterOnly: false, buildIndexes: true, hidden: false, priority: 1.0, tags: {}, slaveDelay: 0, votes: 1 }, { _id: 2, host: "bs-osx108-8:31202", arbiterOnly: false, buildIndexes: true, hidden: false, priority: 1.0, tags: {}, slaveDelay: 0, votes: 1 } ], settings: { chainingAllowed: true, heartbeatTimeoutSecs: 10, getLastErrorModes: {}, getLastErrorDefaults: { w: 1, wtimeout: 0 }, protocolVersion: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:25.125-0400 m31200| 2015-07-09T13:55:25.124-0400 I REPL [ReplicationExecutor] This node is bs-osx108-8:31200 in the config [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:25.125-0400 m31200| 2015-07-09T13:55:25.124-0400 I REPL [ReplicationExecutor] transition to STARTUP2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:25.125-0400 m31200| 2015-07-09T13:55:25.124-0400 I REPL [conn1] ****** [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:25.125-0400 m31200| 2015-07-09T13:55:25.124-0400 I REPL [conn1] creating replication oplog of size: 1024MB... [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:25.125-0400 m31200| 2015-07-09T13:55:25.124-0400 I REPL [ReplicationExecutor] Member bs-osx108-8:31201 is now in state STARTUP [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:25.125-0400 m31200| 2015-07-09T13:55:25.124-0400 I REPL [ReplicationExecutor] Member bs-osx108-8:31202 is now in state STARTUP [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:25.129-0400 m31200| 2015-07-09T13:55:25.129-0400 I STORAGE [conn1] Starting WiredTigerRecordStoreThread local.oplog.rs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:25.357-0400 m31200| 2015-07-09T13:55:25.357-0400 I REPL [conn1] ****** [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:25.358-0400 m31200| 2015-07-09T13:55:25.358-0400 I REPL [conn1] Starting replication applier threads [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:25.358-0400 m31200| 2015-07-09T13:55:25.358-0400 I COMMAND [conn1] command local.oplog.rs command: replSetInitiate { replSetInitiate: { _id: "test-rs1", members: [ { _id: 0.0, host: "bs-osx108-8:31200" }, { _id: 1.0, host: "bs-osx108-8:31201" }, { _id: 2.0, host: "bs-osx108-8:31202" } ] } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:22 locks:{ Global: { acquireCount: { r: 5, w: 3, W: 2 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 233 } }, Database: { acquireCount: { w: 1, W: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 260ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:25.359-0400 m31200| 2015-07-09T13:55:25.358-0400 I REPL [ReplicationExecutor] transition to RECOVERING [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:25.360-0400 m31200| 2015-07-09T13:55:25.360-0400 I REPL [ReplicationExecutor] transition to SECONDARY [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:27.117-0400 m31200| 2015-07-09T13:55:27.117-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62509 #4 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:27.118-0400 m31200| 2015-07-09T13:55:27.117-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62510 #5 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:27.118-0400 m31200| 2015-07-09T13:55:27.118-0400 I NETWORK [conn4] end connection 127.0.0.1:62509 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:27.118-0400 m31200| 2015-07-09T13:55:27.118-0400 I NETWORK [conn5] end connection 127.0.0.1:62510 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:27.119-0400 m31202| 2015-07-09T13:55:27.119-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62511 #4 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:27.119-0400 m31201| 2015-07-09T13:55:27.119-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62512 #4 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:27.119-0400 m31202| 2015-07-09T13:55:27.119-0400 I NETWORK [conn4] end connection 127.0.0.1:62511 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:27.120-0400 m31201| 2015-07-09T13:55:27.119-0400 I NETWORK [conn4] end connection 127.0.0.1:62512 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:27.126-0400 m31200| 2015-07-09T13:55:27.126-0400 I REPL [ReplicationExecutor] Standing for election [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:27.127-0400 m31200| 2015-07-09T13:55:27.127-0400 I REPL [ReplicationExecutor] not electing self, we could not contact enough voting members [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:27.133-0400 m31201| 2015-07-09T13:55:27.133-0400 I REPL [replExecDBWorker-1] Starting replication applier threads [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:27.133-0400 m31201| 2015-07-09T13:55:27.133-0400 W REPL [rsSync] did not receive a valid config yet, sleeping 5 seconds [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:27.135-0400 m31201| 2015-07-09T13:55:27.133-0400 I REPL [ReplicationExecutor] New replica set config in use: { _id: "test-rs1", version: 1, members: [ { _id: 0, host: "bs-osx108-8:31200", arbiterOnly: false, buildIndexes: true, hidden: false, priority: 1.0, tags: {}, slaveDelay: 0, votes: 1 }, { _id: 1, host: "bs-osx108-8:31201", arbiterOnly: false, buildIndexes: true, hidden: false, priority: 1.0, tags: {}, slaveDelay: 0, votes: 1 }, { _id: 2, host: "bs-osx108-8:31202", arbiterOnly: false, buildIndexes: true, hidden: false, priority: 1.0, tags: {}, slaveDelay: 0, votes: 1 } ], settings: { chainingAllowed: true, heartbeatTimeoutSecs: 10, getLastErrorModes: {}, getLastErrorDefaults: { w: 1, wtimeout: 0 }, protocolVersion: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:27.135-0400 m31201| 2015-07-09T13:55:27.133-0400 I REPL [ReplicationExecutor] This node is bs-osx108-8:31201 in the config [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:27.135-0400 m31201| 2015-07-09T13:55:27.133-0400 I REPL [ReplicationExecutor] transition to STARTUP2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:27.136-0400 m31201| 2015-07-09T13:55:27.134-0400 I REPL [ReplicationExecutor] Member bs-osx108-8:31200 is now in state SECONDARY [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:27.136-0400 m31202| 2015-07-09T13:55:27.135-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62513 #5 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:27.137-0400 m31201| 2015-07-09T13:55:27.136-0400 I REPL [ReplicationExecutor] Member bs-osx108-8:31202 is now in state STARTUP [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:27.137-0400 m31201| 2015-07-09T13:55:27.137-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62514 #5 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:27.138-0400 m31202| 2015-07-09T13:55:27.137-0400 I REPL [replExecDBWorker-0] Starting replication applier threads [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:27.138-0400 m31202| 2015-07-09T13:55:27.138-0400 W REPL [rsSync] did not receive a valid config yet, sleeping 5 seconds [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:27.139-0400 m31202| 2015-07-09T13:55:27.138-0400 I REPL [ReplicationExecutor] New replica set config in use: { _id: "test-rs1", version: 1, members: [ { _id: 0, host: "bs-osx108-8:31200", arbiterOnly: false, buildIndexes: true, hidden: false, priority: 1.0, tags: {}, slaveDelay: 0, votes: 1 }, { _id: 1, host: "bs-osx108-8:31201", arbiterOnly: false, buildIndexes: true, hidden: false, priority: 1.0, tags: {}, slaveDelay: 0, votes: 1 }, { _id: 2, host: "bs-osx108-8:31202", arbiterOnly: false, buildIndexes: true, hidden: false, priority: 1.0, tags: {}, slaveDelay: 0, votes: 1 } ], settings: { chainingAllowed: true, heartbeatTimeoutSecs: 10, getLastErrorModes: {}, getLastErrorDefaults: { w: 1, wtimeout: 0 }, protocolVersion: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:27.139-0400 m31202| 2015-07-09T13:55:27.138-0400 I REPL [ReplicationExecutor] This node is bs-osx108-8:31202 in the config [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:27.139-0400 m31202| 2015-07-09T13:55:27.138-0400 I REPL [ReplicationExecutor] transition to STARTUP2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:27.139-0400 m31202| 2015-07-09T13:55:27.138-0400 I REPL [ReplicationExecutor] Member bs-osx108-8:31200 is now in state SECONDARY [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:27.139-0400 m31202| 2015-07-09T13:55:27.139-0400 I REPL [ReplicationExecutor] Member bs-osx108-8:31201 is now in state STARTUP2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:29.129-0400 m31200| 2015-07-09T13:55:29.129-0400 I REPL [ReplicationExecutor] Member bs-osx108-8:31201 is now in state STARTUP2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:29.129-0400 m31200| 2015-07-09T13:55:29.129-0400 I REPL [ReplicationExecutor] Standing for election [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:29.129-0400 m31200| 2015-07-09T13:55:29.129-0400 I REPL [ReplicationExecutor] Member bs-osx108-8:31202 is now in state STARTUP2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:29.130-0400 m31200| 2015-07-09T13:55:29.130-0400 I REPL [ReplicationExecutor] running for election [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:29.131-0400 m31201| 2015-07-09T13:55:29.131-0400 I REPL [ReplicationExecutor] replSetElect voting yea for bs-osx108-8:31200 (0) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:29.131-0400 m31202| 2015-07-09T13:55:29.131-0400 I REPL [ReplicationExecutor] replSetElect voting yea for bs-osx108-8:31200 (0) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:29.131-0400 m31200| 2015-07-09T13:55:29.131-0400 I REPL [ReplicationExecutor] received vote: 1 votes from bs-osx108-8:31201 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:29.132-0400 m31200| 2015-07-09T13:55:29.131-0400 I REPL [ReplicationExecutor] election succeeded, assuming primary role [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:29.132-0400 m31200| 2015-07-09T13:55:29.131-0400 I REPL [ReplicationExecutor] transition to PRIMARY [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:29.136-0400 m31201| 2015-07-09T13:55:29.136-0400 I REPL [ReplicationExecutor] Member bs-osx108-8:31200 is now in state PRIMARY [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:29.138-0400 m31201| 2015-07-09T13:55:29.137-0400 I REPL [ReplicationExecutor] Member bs-osx108-8:31202 is now in state STARTUP2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:29.140-0400 m31202| 2015-07-09T13:55:29.139-0400 I REPL [ReplicationExecutor] Member bs-osx108-8:31200 is now in state PRIMARY [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:29.363-0400 m31200| 2015-07-09T13:55:29.363-0400 I REPL [rsSync] transition to primary complete; database writes are now permitted [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.134-0400 m31201| 2015-07-09T13:55:32.133-0400 I REPL [rsSync] ****** [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.134-0400 m31201| 2015-07-09T13:55:32.134-0400 I REPL [rsSync] creating replication oplog of size: 1024MB... [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.138-0400 m31202| 2015-07-09T13:55:32.138-0400 I REPL [rsSync] ****** [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.139-0400 m31202| 2015-07-09T13:55:32.138-0400 I REPL [rsSync] creating replication oplog of size: 1024MB... [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.144-0400 m31201| 2015-07-09T13:55:32.143-0400 I STORAGE [rsSync] Starting WiredTigerRecordStoreThread local.oplog.rs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.165-0400 m31202| 2015-07-09T13:55:32.164-0400 I STORAGE [rsSync] Starting WiredTigerRecordStoreThread local.oplog.rs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.593-0400 m31202| 2015-07-09T13:55:32.593-0400 I REPL [rsSync] ****** [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.594-0400 m31202| 2015-07-09T13:55:32.593-0400 I REPL [rsSync] initial sync pending [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.598-0400 m31201| 2015-07-09T13:55:32.597-0400 I REPL [rsSync] ****** [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.598-0400 m31201| 2015-07-09T13:55:32.597-0400 I REPL [rsSync] initial sync pending [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.622-0400 m31202| 2015-07-09T13:55:32.621-0400 I REPL [ReplicationExecutor] syncing from: bs-osx108-8:31200 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.623-0400 m31200| 2015-07-09T13:55:32.623-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62515 #6 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.624-0400 m31201| 2015-07-09T13:55:32.623-0400 I REPL [ReplicationExecutor] syncing from: bs-osx108-8:31200 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.625-0400 m31200| 2015-07-09T13:55:32.624-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62517 #7 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.626-0400 m31202| 2015-07-09T13:55:32.626-0400 I REPL [rsSync] initial sync drop all databases [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.626-0400 m31202| 2015-07-09T13:55:32.626-0400 I STORAGE [rsSync] dropAllDatabasesExceptLocal 1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.626-0400 m31202| 2015-07-09T13:55:32.626-0400 I REPL [rsSync] initial sync clone all databases [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.627-0400 m31202| 2015-07-09T13:55:32.627-0400 I REPL [rsSync] initial sync data copy, starting syncup [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.627-0400 m31202| 2015-07-09T13:55:32.627-0400 I REPL [rsSync] oplog sync 1 of 3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.627-0400 m31202| 2015-07-09T13:55:32.627-0400 I REPL [rsSync] oplog sync 2 of 3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.627-0400 m31201| 2015-07-09T13:55:32.627-0400 I REPL [rsSync] initial sync drop all databases [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.628-0400 m31201| 2015-07-09T13:55:32.627-0400 I STORAGE [rsSync] dropAllDatabasesExceptLocal 1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.628-0400 m31201| 2015-07-09T13:55:32.627-0400 I REPL [rsSync] initial sync clone all databases [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.628-0400 m31202| 2015-07-09T13:55:32.627-0400 I REPL [rsSync] initial sync building indexes [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.628-0400 m31202| 2015-07-09T13:55:32.627-0400 I REPL [rsSync] oplog sync 3 of 3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.628-0400 m31201| 2015-07-09T13:55:32.628-0400 I REPL [rsSync] initial sync data copy, starting syncup [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.629-0400 m31202| 2015-07-09T13:55:32.628-0400 I REPL [rsSync] initial sync finishing up [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.629-0400 m31202| 2015-07-09T13:55:32.628-0400 I REPL [rsSync] set minValid=(term: -1, timestamp: Jul 9 13:55:25:1) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.629-0400 m31201| 2015-07-09T13:55:32.628-0400 I REPL [rsSync] oplog sync 1 of 3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.629-0400 m31201| 2015-07-09T13:55:32.629-0400 I REPL [rsSync] oplog sync 2 of 3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.630-0400 m31202| 2015-07-09T13:55:32.629-0400 I REPL [rsSync] initial sync done [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.630-0400 m31201| 2015-07-09T13:55:32.629-0400 I REPL [rsSync] initial sync building indexes [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.630-0400 m31201| 2015-07-09T13:55:32.629-0400 I REPL [rsSync] oplog sync 3 of 3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.631-0400 m31201| 2015-07-09T13:55:32.630-0400 I REPL [rsSync] initial sync finishing up [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.631-0400 m31201| 2015-07-09T13:55:32.630-0400 I REPL [rsSync] set minValid=(term: -1, timestamp: Jul 9 13:55:25:1) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.631-0400 m31200| 2015-07-09T13:55:32.631-0400 I NETWORK [conn6] end connection 127.0.0.1:62515 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.632-0400 m31202| 2015-07-09T13:55:32.631-0400 I REPL [ReplicationExecutor] transition to RECOVERING [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.632-0400 m31201| 2015-07-09T13:55:32.631-0400 I REPL [rsSync] initial sync done [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.632-0400 m31202| 2015-07-09T13:55:32.632-0400 I REPL [ReplicationExecutor] transition to SECONDARY [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.636-0400 m31201| 2015-07-09T13:55:32.636-0400 I REPL [ReplicationExecutor] transition to RECOVERING [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.636-0400 m31200| 2015-07-09T13:55:32.636-0400 I NETWORK [conn7] end connection 127.0.0.1:62517 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.637-0400 m31201| 2015-07-09T13:55:32.637-0400 I REPL [ReplicationExecutor] transition to SECONDARY [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.783-0400 2015-07-09T13:55:32.783-0400 I NETWORK [main] starting new replica set monitor for replica set test-rs0 with seeds [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.784-0400 2015-07-09T13:55:32.783-0400 I NETWORK [main] bs-osx108-8:31100 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.784-0400 2015-07-09T13:55:32.783-0400 I NETWORK [main] , [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.784-0400 2015-07-09T13:55:32.783-0400 I NETWORK [main] bs-osx108-8:31101 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.784-0400 2015-07-09T13:55:32.783-0400 I NETWORK [main] , [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.784-0400 2015-07-09T13:55:32.783-0400 I NETWORK [main] bs-osx108-8:31102 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.784-0400 2015-07-09T13:55:32.784-0400 I NETWORK [ReplicaSetMonitorWatcher] starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.786-0400 m31101| 2015-07-09T13:55:32.785-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62518 #6 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.788-0400 m31100| 2015-07-09T13:55:32.788-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62519 #8 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.811-0400 2015-07-09T13:55:32.811-0400 I NETWORK [main] starting new replica set monitor for replica set test-rs1 with seeds [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.811-0400 2015-07-09T13:55:32.811-0400 I NETWORK [main] bs-osx108-8:31200 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.811-0400 2015-07-09T13:55:32.811-0400 I NETWORK [main] , [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.811-0400 2015-07-09T13:55:32.811-0400 I NETWORK [main] bs-osx108-8:31201 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.812-0400 2015-07-09T13:55:32.811-0400 I NETWORK [main] , [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.812-0400 2015-07-09T13:55:32.811-0400 I NETWORK [main] bs-osx108-8:31202 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.812-0400 m31201| 2015-07-09T13:55:32.812-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62520 #6 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.814-0400 m31200| 2015-07-09T13:55:32.814-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62521 #8 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.816-0400 ReplSetTest Starting Set [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.816-0400 ReplSetTest n is : 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.816-0400 ReplSetTest n: 0 ports: [ 29000 ] 29000 number [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.816-0400 { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.816-0400 "useHostName" : true, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.816-0400 "oplogSize" : 40, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.817-0400 "keyFile" : undefined, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.817-0400 "port" : 29000, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.817-0400 "noprealloc" : "", [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.817-0400 "smallfiles" : "", [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.817-0400 "rest" : "", [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.817-0400 "replSet" : "test-configRS", [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.817-0400 "dbpath" : "$set-$node", [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.817-0400 "pathOpts" : { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.817-0400 "testName" : "test", [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.817-0400 "node" : 0, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.817-0400 "set" : "test-configRS" [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.817-0400 }, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.817-0400 "configsvr" : "", [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.817-0400 "noJournalPrealloc" : undefined, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.818-0400 "restart" : undefined [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.818-0400 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.818-0400 ReplSetTest Starting.... [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.818-0400 Resetting db path '/data/db/job0/mongorunner/test-configRS-0' [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.818-0400 2015-07-09T13:55:32.818-0400 I - [main] shell: started program (sh2886): /data/mci/src/mongod --oplogSize 40 --port 29000 --noprealloc --smallfiles --rest --replSet test-configRS --dbpath /data/db/job0/mongorunner/test-configRS-0 --configsvr --setParameter enableTestCommands=1 --storageEngine wiredTiger [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.819-0400 2015-07-09T13:55:32.819-0400 W NETWORK [main] Failed to connect to 127.0.0.1:29000, reason: errno:61 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.833-0400 m29000| 2015-07-09T13:55:32.831-0400 I CONTROL [main] ** WARNING: --rest is specified without --httpinterface, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.833-0400 m29000| 2015-07-09T13:55:32.832-0400 I CONTROL [main] ** enabling http interface [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.833-0400 m29000| note: noprealloc may hurt performance in many applications [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.857-0400 m29000| 2015-07-09T13:55:32.856-0400 I STORAGE [initandlisten] wiredtiger_open config: create,cache_size=1G,session_max=20000,eviction=(threads_max=4),statistics=(fast),log=(enabled=true,archive=true,path=journal,compressor=snappy),file_manager=(close_idle_time=100000),checkpoint=(wait=60,log_size=2GB),statistics_log=(wait=0), [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.888-0400 m31101| 2015-07-09T13:55:32.888-0400 I REPL [ReplicationExecutor] syncing from: bs-osx108-8:31100 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.888-0400 m31102| 2015-07-09T13:55:32.888-0400 I REPL [ReplicationExecutor] syncing from: bs-osx108-8:31100 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.890-0400 m31100| 2015-07-09T13:55:32.889-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62523 #9 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.890-0400 m31100| 2015-07-09T13:55:32.890-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62524 #10 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.907-0400 m29000| 2015-07-09T13:55:32.906-0400 W STORAGE [initandlisten] Detected configuration for non-active storage engine mmapv1 when current storage engine is wiredTiger [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.907-0400 m29000| 2015-07-09T13:55:32.907-0400 I CONTROL [initandlisten] MongoDB starting : pid=2886 port=29000 dbpath=/data/db/job0/mongorunner/test-configRS-0 master=1 64-bit host=bs-osx108-8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.907-0400 m29000| 2015-07-09T13:55:32.907-0400 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.908-0400 m29000| 2015-07-09T13:55:32.907-0400 I CONTROL [initandlisten] ** NOTE: This is a development version (3.1.6-pre-) of MongoDB. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.908-0400 m29000| 2015-07-09T13:55:32.907-0400 I CONTROL [initandlisten] ** Not recommended for production. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.908-0400 m29000| 2015-07-09T13:55:32.907-0400 I CONTROL [initandlisten] [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.908-0400 m29000| 2015-07-09T13:55:32.907-0400 I CONTROL [initandlisten] db version v3.1.6-pre- [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.908-0400 m29000| 2015-07-09T13:55:32.907-0400 I CONTROL [initandlisten] git version: d1cb71465274bcb5f3bc962ef2740cf985f32113 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.908-0400 m29000| 2015-07-09T13:55:32.907-0400 I CONTROL [initandlisten] allocator: system [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.909-0400 m29000| 2015-07-09T13:55:32.907-0400 I CONTROL [initandlisten] options: { net: { http: { RESTInterfaceEnabled: true, enabled: true }, port: 29000 }, replication: { oplogSizeMB: 40, replSet: "test-configRS" }, setParameter: { enableTestCommands: "1" }, sharding: { clusterRole: "configsvr" }, storage: { dbPath: "/data/db/job0/mongorunner/test-configRS-0", engine: "wiredTiger", mmapv1: { preallocDataFiles: false, smallFiles: true } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.909-0400 m29000| 2015-07-09T13:55:32.908-0400 I NETWORK [websvr] admin web console waiting for connections on port 30000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.943-0400 m29000| 2015-07-09T13:55:32.942-0400 I REPL [initandlisten] Did not find local voted for document at startup; NoMatchingDocument Did not find replica set lastVote document in local.replset.election [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.943-0400 m29000| 2015-07-09T13:55:32.943-0400 I REPL [initandlisten] Did not find local replica set configuration document at startup; NoMatchingDocument Did not find replica set configuration document in local.system.replset [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:32.988-0400 m29000| 2015-07-09T13:55:32.987-0400 I NETWORK [initandlisten] waiting for connections on port 29000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:33.021-0400 m29000| 2015-07-09T13:55:33.021-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62525 #1 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:33.023-0400 [ connection to bs-osx108-8:29000 ] [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:33.023-0400 { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:33.024-0400 "replSetInitiate" : { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:33.024-0400 "_id" : "test-configRS", [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:33.024-0400 "members" : [ [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:33.024-0400 { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:33.024-0400 "_id" : 0, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:33.024-0400 "host" : "bs-osx108-8:29000" [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:33.024-0400 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:33.025-0400 ] [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:33.025-0400 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:33.025-0400 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:33.025-0400 m29000| 2015-07-09T13:55:33.024-0400 I REPL [conn1] replSetInitiate admin command received from client [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:33.026-0400 m29000| 2015-07-09T13:55:33.025-0400 I REPL [conn1] replSetInitiate config object with 1 members parses ok [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:33.046-0400 m29000| 2015-07-09T13:55:33.045-0400 I REPL [ReplicationExecutor] New replica set config in use: { _id: "test-configRS", version: 1, members: [ { _id: 0, host: "bs-osx108-8:29000", arbiterOnly: false, buildIndexes: true, hidden: false, priority: 1.0, tags: {}, slaveDelay: 0, votes: 1 } ], settings: { chainingAllowed: true, heartbeatTimeoutSecs: 10, getLastErrorModes: {}, getLastErrorDefaults: { w: 1, wtimeout: 0 }, protocolVersion: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:33.047-0400 m29000| 2015-07-09T13:55:33.045-0400 I REPL [ReplicationExecutor] This node is bs-osx108-8:29000 in the config [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:33.047-0400 m29000| 2015-07-09T13:55:33.045-0400 I REPL [ReplicationExecutor] transition to STARTUP2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:33.048-0400 m29000| 2015-07-09T13:55:33.046-0400 I REPL [conn1] ****** [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:33.048-0400 m29000| 2015-07-09T13:55:33.046-0400 I REPL [conn1] creating replication oplog of size: 40MB... [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:33.051-0400 m29000| 2015-07-09T13:55:33.051-0400 I STORAGE [conn1] Starting WiredTigerRecordStoreThread local.oplog.rs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:33.135-0400 m31200| 2015-07-09T13:55:33.134-0400 I REPL [ReplicationExecutor] Member bs-osx108-8:31201 is now in state SECONDARY [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:33.135-0400 m31200| 2015-07-09T13:55:33.134-0400 I REPL [ReplicationExecutor] Member bs-osx108-8:31202 is now in state SECONDARY [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:33.140-0400 m31201| 2015-07-09T13:55:33.139-0400 I REPL [ReplicationExecutor] could not find member to sync from [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:33.140-0400 m31201| 2015-07-09T13:55:33.140-0400 I REPL [ReplicationExecutor] Member bs-osx108-8:31202 is now in state SECONDARY [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:33.144-0400 m31202| 2015-07-09T13:55:33.143-0400 I REPL [ReplicationExecutor] Member bs-osx108-8:31201 is now in state SECONDARY [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:33.145-0400 m31202| 2015-07-09T13:55:33.144-0400 I REPL [ReplicationExecutor] syncing from: bs-osx108-8:31200 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:33.146-0400 m31200| 2015-07-09T13:55:33.146-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62526 #9 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:33.326-0400 m29000| 2015-07-09T13:55:33.325-0400 I REPL [conn1] ****** [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:33.327-0400 m29000| 2015-07-09T13:55:33.326-0400 I REPL [conn1] Starting replication applier threads [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:33.327-0400 m29000| 2015-07-09T13:55:33.326-0400 I COMMAND [conn1] command local.oplog.rs command: replSetInitiate { replSetInitiate: { _id: "test-configRS", members: [ { _id: 0.0, host: "bs-osx108-8:29000" } ] } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:22 locks:{ Global: { acquireCount: { r: 5, w: 3, W: 2 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 310 } }, Database: { acquireCount: { w: 1, W: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 302ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:33.327-0400 m29000| 2015-07-09T13:55:33.327-0400 I REPL [ReplicationExecutor] transition to RECOVERING [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:33.329-0400 m29000| 2015-07-09T13:55:33.329-0400 I REPL [ReplicationExecutor] transition to SECONDARY [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:33.329-0400 m29000| 2015-07-09T13:55:33.329-0400 I REPL [ReplicationExecutor] transition to PRIMARY [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:33.330-0400 m31102| 2015-07-09T13:55:33.330-0400 I REPL [SyncSourceFeedback] setting syncSourceFeedback to bs-osx108-8:31100 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:33.331-0400 m31101| 2015-07-09T13:55:33.330-0400 I REPL [SyncSourceFeedback] setting syncSourceFeedback to bs-osx108-8:31100 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:33.331-0400 m31100| 2015-07-09T13:55:33.331-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62527 #11 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:33.332-0400 m31100| 2015-07-09T13:55:33.331-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62528 #12 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:33.622-0400 m31202| 2015-07-09T13:55:33.622-0400 I REPL [SyncSourceFeedback] setting syncSourceFeedback to bs-osx108-8:31200 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:33.624-0400 m31200| 2015-07-09T13:55:33.624-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62529 #10 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.142-0400 m31201| 2015-07-09T13:55:34.141-0400 I REPL [ReplicationExecutor] syncing from: bs-osx108-8:31200 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.143-0400 m31200| 2015-07-09T13:55:34.143-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62530 #11 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.328-0400 m29000| 2015-07-09T13:55:34.328-0400 I REPL [rsSync] transition to primary complete; database writes are now permitted [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.343-0400 "config servers: test-configRS/bs-osx108-8:29000" [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.344-0400 2015-07-09T13:55:34.343-0400 I NETWORK [main] starting new replica set monitor for replica set test-configRS with seeds [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.344-0400 2015-07-09T13:55:34.343-0400 I NETWORK [main] bs-osx108-8:29000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.345-0400 m29000| 2015-07-09T13:55:34.344-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62531 #2 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.346-0400 ShardingTest test : [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.346-0400 { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.346-0400 "config" : "test-configRS/bs-osx108-8:29000", [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.347-0400 "shards" : [ [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.347-0400 connection to test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102, [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.347-0400 connection to test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.347-0400 ] [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.347-0400 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.348-0400 2015-07-09T13:55:34.348-0400 I - [main] shell: started program (sh2887): /data/mci/src/mongos --port 30999 --configdb test-configRS/bs-osx108-8:29000 --chunkSize 50 --setParameter enableTestCommands=1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.349-0400 2015-07-09T13:55:34.349-0400 W NETWORK [main] Failed to connect to 127.0.0.1:30999, reason: errno:61 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.365-0400 m30999| 2015-07-09T13:55:34.363-0400 W SHARDING [main] running with less than 3 config servers should be done only for testing purposes and is not recommended for production [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.375-0400 m30999| 2015-07-09T13:55:34.374-0400 I CONTROL [main] [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.375-0400 m30999| 2015-07-09T13:55:34.374-0400 I CONTROL [main] ** NOTE: This is a development version (3.1.6-pre-) of MongoDB. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.375-0400 m30999| 2015-07-09T13:55:34.374-0400 I CONTROL [main] ** Not recommended for production. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.375-0400 m30999| 2015-07-09T13:55:34.374-0400 I CONTROL [main] [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.375-0400 m30999| 2015-07-09T13:55:34.375-0400 I SHARDING [mongosMain] MongoS version 3.1.6-pre- starting: pid=2887 port=30999 64-bit host=bs-osx108-8 (--help for usage) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.375-0400 m30999| 2015-07-09T13:55:34.375-0400 I CONTROL [mongosMain] db version v3.1.6-pre- [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.375-0400 m30999| 2015-07-09T13:55:34.375-0400 I CONTROL [mongosMain] git version: d1cb71465274bcb5f3bc962ef2740cf985f32113 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.376-0400 m30999| 2015-07-09T13:55:34.375-0400 I CONTROL [mongosMain] allocator: system [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.376-0400 m30999| 2015-07-09T13:55:34.375-0400 I CONTROL [mongosMain] options: { net: { port: 30999 }, setParameter: { enableTestCommands: "1" }, sharding: { chunkSize: 50, configDB: "test-configRS/bs-osx108-8:29000" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.378-0400 m30999| 2015-07-09T13:55:34.378-0400 I NETWORK [mongosMain] starting new replica set monitor for replica set test-configRS with seeds [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.378-0400 m30999| 2015-07-09T13:55:34.378-0400 I NETWORK [mongosMain] bs-osx108-8:29000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.378-0400 m30999| 2015-07-09T13:55:34.378-0400 I NETWORK [ReplicaSetMonitorWatcher] starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.379-0400 m29000| 2015-07-09T13:55:34.379-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62533 #3 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.382-0400 m29000| 2015-07-09T13:55:34.382-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62534 #4 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.385-0400 m29000| 2015-07-09T13:55:34.385-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62535 #5 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.388-0400 m29000| 2015-07-09T13:55:34.388-0400 I COMMAND [conn5] CMD fsync: sync:1 lock:0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.435-0400 m29000| 2015-07-09T13:55:34.435-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62536 #6 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.439-0400 m30999| 2015-07-09T13:55:34.439-0400 I SHARDING [LockPinger] creating distributed lock ping thread for test-configRS/bs-osx108-8:29000 and process bs-osx108-8:30999:1436464534:16807 (sleeping for 30000ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.441-0400 m29000| 2015-07-09T13:55:34.440-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62537 #7 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.464-0400 m30999| 2015-07-09T13:55:34.463-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T13:55:34.439-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30999:1436464534:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.464-0400 m30999| 2015-07-09T13:55:34.464-0400 I SHARDING [mongosMain] distributed lock 'configUpgrade/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb596ca4787b9985d1b83 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.466-0400 m30999| 2015-07-09T13:55:34.466-0400 I SHARDING [mongosMain] starting upgrade of config server from v0 to v7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.466-0400 m30999| 2015-07-09T13:55:34.466-0400 I SHARDING [mongosMain] starting next upgrade step from v0 to v7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.475-0400 m30999| 2015-07-09T13:55:34.475-0400 I SHARDING [mongosMain] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:34.475-0400-559eb596ca4787b9985d1b84", server: "bs-osx108-8", clientAddr: "", time: new Date(1436464534475), what: "starting upgrade of config database", ns: "config.version", details: { from: 0, to: 7 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.476-0400 m29000| 2015-07-09T13:55:34.476-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62538 #8 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.528-0400 m29000| 2015-07-09T13:55:34.528-0400 I SHARDING [conn8] first cluster operation detected, adding sharding hook to enable versioning and authentication to remote servers [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.529-0400 m29000| 2015-07-09T13:55:34.529-0400 I NETWORK [conn8] starting new replica set monitor for replica set test-configRS with seeds [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.529-0400 m29000| 2015-07-09T13:55:34.529-0400 I NETWORK [conn8] bs-osx108-8:29000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.530-0400 m29000| 2015-07-09T13:55:34.529-0400 I NETWORK [ReplicaSetMonitorWatcher] starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.530-0400 m30999| 2015-07-09T13:55:34.530-0400 I SHARDING [mongosMain] writing initial config version at v7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.551-0400 2015-07-09T13:55:34.550-0400 W NETWORK [main] Failed to connect to 127.0.0.1:30999, reason: errno:61 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.596-0400 m30999| 2015-07-09T13:55:34.596-0400 I SHARDING [mongosMain] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:34.596-0400-559eb596ca4787b9985d1b86", server: "bs-osx108-8", clientAddr: "", time: new Date(1436464534596), what: "finished upgrade of config database", ns: "config.version", details: { from: 0, to: 7 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.628-0400 m31201| 2015-07-09T13:55:34.628-0400 I REPL [SyncSourceFeedback] setting syncSourceFeedback to bs-osx108-8:31200 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.629-0400 m31200| 2015-07-09T13:55:34.629-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62540 #12 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.649-0400 m30999| 2015-07-09T13:55:34.649-0400 I SHARDING [mongosMain] upgrade of config server to v7 successful [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.651-0400 m29000| 2015-07-09T13:55:34.650-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62541 #9 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.652-0400 m30999| 2015-07-09T13:55:34.652-0400 I SHARDING [mongosMain] distributed lock 'configUpgrade/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.752-0400 2015-07-09T13:55:34.752-0400 W NETWORK [main] Failed to connect to 127.0.0.1:30999, reason: errno:61 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.780-0400 m29000| 2015-07-09T13:55:34.780-0400 I INDEX [conn8] build index on: config.chunks properties: { v: 1, unique: true, key: { ns: 1, min: 1 }, name: "ns_1_min_1", ns: "config.chunks" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.780-0400 m29000| 2015-07-09T13:55:34.780-0400 I INDEX [conn8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.786-0400 m29000| 2015-07-09T13:55:34.786-0400 I INDEX [conn8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.845-0400 m29000| 2015-07-09T13:55:34.843-0400 I INDEX [conn8] build index on: config.chunks properties: { v: 1, unique: true, key: { ns: 1, shard: 1, min: 1 }, name: "ns_1_shard_1_min_1", ns: "config.chunks" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.845-0400 m29000| 2015-07-09T13:55:34.843-0400 I INDEX [conn8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.848-0400 m29000| 2015-07-09T13:55:34.848-0400 I INDEX [conn8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.907-0400 m29000| 2015-07-09T13:55:34.906-0400 I INDEX [conn8] build index on: config.chunks properties: { v: 1, unique: true, key: { ns: 1, lastmod: 1 }, name: "ns_1_lastmod_1", ns: "config.chunks" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.907-0400 m29000| 2015-07-09T13:55:34.906-0400 I INDEX [conn8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.911-0400 m29000| 2015-07-09T13:55:34.911-0400 I INDEX [conn8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.954-0400 2015-07-09T13:55:34.954-0400 W NETWORK [main] Failed to connect to 127.0.0.1:30999, reason: errno:61 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.978-0400 m29000| 2015-07-09T13:55:34.978-0400 I INDEX [conn8] build index on: config.shards properties: { v: 1, unique: true, key: { host: 1 }, name: "host_1", ns: "config.shards" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.978-0400 m29000| 2015-07-09T13:55:34.978-0400 I INDEX [conn8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:34.981-0400 m29000| 2015-07-09T13:55:34.981-0400 I INDEX [conn8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:35.040-0400 m29000| 2015-07-09T13:55:35.039-0400 I INDEX [conn8] build index on: config.locks properties: { v: 1, key: { ts: 1 }, name: "ts_1", ns: "config.locks" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:35.041-0400 m29000| 2015-07-09T13:55:35.039-0400 I INDEX [conn8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:35.045-0400 m29000| 2015-07-09T13:55:35.044-0400 I INDEX [conn8] build index done. scanned 1 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:35.103-0400 m29000| 2015-07-09T13:55:35.102-0400 I INDEX [conn8] build index on: config.locks properties: { v: 1, key: { state: 1, process: 1 }, name: "state_1_process_1", ns: "config.locks" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:35.103-0400 m29000| 2015-07-09T13:55:35.102-0400 I INDEX [conn8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:35.107-0400 m29000| 2015-07-09T13:55:35.107-0400 I INDEX [conn8] build index done. scanned 1 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:35.157-0400 2015-07-09T13:55:35.157-0400 W NETWORK [main] Failed to connect to 127.0.0.1:30999, reason: errno:61 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:35.165-0400 m29000| 2015-07-09T13:55:35.165-0400 I INDEX [conn8] build index on: config.lockpings properties: { v: 1, key: { ping: 1 }, name: "ping_1", ns: "config.lockpings" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:35.166-0400 m29000| 2015-07-09T13:55:35.165-0400 I INDEX [conn8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:35.170-0400 m29000| 2015-07-09T13:55:35.170-0400 I INDEX [conn8] build index done. scanned 1 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:35.242-0400 m29000| 2015-07-09T13:55:35.241-0400 I INDEX [conn8] build index on: config.tags properties: { v: 1, unique: true, key: { ns: 1, min: 1 }, name: "ns_1_min_1", ns: "config.tags" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:35.242-0400 m29000| 2015-07-09T13:55:35.241-0400 I INDEX [conn8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:35.245-0400 m29000| 2015-07-09T13:55:35.245-0400 I INDEX [conn8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:35.246-0400 m30999| 2015-07-09T13:55:35.246-0400 I SHARDING [Balancer] about to contact config servers and shards [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:35.247-0400 m30999| 2015-07-09T13:55:35.247-0400 I SHARDING [Balancer] config servers and shards contacted successfully [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:35.247-0400 m30999| 2015-07-09T13:55:35.247-0400 I NETWORK [mongosMain] waiting for connections on port 30999 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:35.247-0400 m30999| 2015-07-09T13:55:35.247-0400 I SHARDING [Balancer] balancer id: bs-osx108-8:30999 started [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:35.316-0400 m30999| 2015-07-09T13:55:35.315-0400 I SHARDING [Balancer] distributed lock 'balancer/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb597ca4787b9985d1b88 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:35.360-0400 m30999| 2015-07-09T13:55:35.360-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62545 #1 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:35.375-0400 2015-07-09T13:55:35.374-0400 I - [main] shell: started program (sh2888): /data/mci/src/mongos --port 30998 --configdb test-configRS/bs-osx108-8:29000 --chunkSize 50 --setParameter enableTestCommands=1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:35.376-0400 2015-07-09T13:55:35.376-0400 W NETWORK [main] Failed to connect to 127.0.0.1:30998, reason: errno:61 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:35.380-0400 m30999| 2015-07-09T13:55:35.379-0400 I SHARDING [Balancer] distributed lock 'balancer/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:35.389-0400 m30998| 2015-07-09T13:55:35.387-0400 W SHARDING [main] running with less than 3 config servers should be done only for testing purposes and is not recommended for production [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:35.398-0400 m30998| 2015-07-09T13:55:35.398-0400 I CONTROL [main] [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:35.399-0400 m30998| 2015-07-09T13:55:35.398-0400 I CONTROL [main] ** NOTE: This is a development version (3.1.6-pre-) of MongoDB. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:35.399-0400 m30998| 2015-07-09T13:55:35.398-0400 I CONTROL [main] ** Not recommended for production. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:35.399-0400 m30998| 2015-07-09T13:55:35.398-0400 I CONTROL [main] [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:35.399-0400 m30998| 2015-07-09T13:55:35.398-0400 I SHARDING [mongosMain] MongoS version 3.1.6-pre- starting: pid=2888 port=30998 64-bit host=bs-osx108-8 (--help for usage) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:35.399-0400 m30998| 2015-07-09T13:55:35.399-0400 I CONTROL [mongosMain] db version v3.1.6-pre- [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:35.399-0400 m30998| 2015-07-09T13:55:35.399-0400 I CONTROL [mongosMain] git version: d1cb71465274bcb5f3bc962ef2740cf985f32113 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:35.399-0400 m30998| 2015-07-09T13:55:35.399-0400 I CONTROL [mongosMain] allocator: system [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:35.400-0400 m30998| 2015-07-09T13:55:35.399-0400 I CONTROL [mongosMain] options: { net: { port: 30998 }, setParameter: { enableTestCommands: "1" }, sharding: { chunkSize: 50, configDB: "test-configRS/bs-osx108-8:29000" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:35.402-0400 m30998| 2015-07-09T13:55:35.401-0400 I NETWORK [mongosMain] starting new replica set monitor for replica set test-configRS with seeds [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:35.402-0400 m30998| 2015-07-09T13:55:35.402-0400 I NETWORK [mongosMain] bs-osx108-8:29000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:35.402-0400 m30998| 2015-07-09T13:55:35.402-0400 I NETWORK [ReplicaSetMonitorWatcher] starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:35.403-0400 m29000| 2015-07-09T13:55:35.403-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62547 #10 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:35.406-0400 m29000| 2015-07-09T13:55:35.405-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62548 #11 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:35.408-0400 m29000| 2015-07-09T13:55:35.408-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62549 #12 (12 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:35.413-0400 m29000| 2015-07-09T13:55:35.413-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62550 #13 (13 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:35.579-0400 2015-07-09T13:55:35.579-0400 W NETWORK [main] Failed to connect to 127.0.0.1:30998, reason: errno:61 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:35.781-0400 2015-07-09T13:55:35.781-0400 W NETWORK [main] Failed to connect to 127.0.0.1:30998, reason: errno:61 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:35.810-0400 m30998| 2015-07-09T13:55:35.809-0400 I SHARDING [Balancer] about to contact config servers and shards [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:35.811-0400 m29000| 2015-07-09T13:55:35.811-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62554 #14 (14 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:35.813-0400 m30998| 2015-07-09T13:55:35.812-0400 I SHARDING [Balancer] config servers and shards contacted successfully [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:35.813-0400 m30998| 2015-07-09T13:55:35.812-0400 I NETWORK [mongosMain] waiting for connections on port 30998 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:35.813-0400 m30998| 2015-07-09T13:55:35.812-0400 I SHARDING [Balancer] balancer id: bs-osx108-8:30998 started [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:35.869-0400 m29000| 2015-07-09T13:55:35.869-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62555 #15 (15 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:35.874-0400 m30998| 2015-07-09T13:55:35.873-0400 I SHARDING [LockPinger] creating distributed lock ping thread for test-configRS/bs-osx108-8:29000 and process bs-osx108-8:30998:1436464535:16807 (sleeping for 30000ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:35.877-0400 m30998| 2015-07-09T13:55:35.876-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T13:55:35.874-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30998:1436464535:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:35.877-0400 m30998| 2015-07-09T13:55:35.877-0400 I SHARDING [Balancer] distributed lock 'balancer/bs-osx108-8:30998:1436464535:16807' acquired, ts : 559eb5970bd550bed3408aa7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:35.933-0400 m29000| 2015-07-09T13:55:35.933-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62556 #16 (16 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:35.935-0400 m30998| 2015-07-09T13:55:35.935-0400 I SHARDING [Balancer] distributed lock 'balancer/bs-osx108-8:30998:1436464535:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:35.983-0400 m30998| 2015-07-09T13:55:35.983-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62557 #1 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.046-0400 m29000| 2015-07-09T13:55:36.046-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62558 #17 (17 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.049-0400 Waiting for active hosts... [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.049-0400 Waiting for the balancer lock... [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.051-0400 Waiting again for active hosts after balancer is off... [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.051-0400 ShardingTest undefined going to add shard : test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.052-0400 m30999| 2015-07-09T13:55:36.052-0400 I NETWORK [conn1] starting new replica set monitor for replica set test-rs0 with seeds [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.052-0400 m30999| 2015-07-09T13:55:36.052-0400 I NETWORK [conn1] bs-osx108-8:31100 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.052-0400 m30999| 2015-07-09T13:55:36.052-0400 I NETWORK [conn1] , [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.053-0400 m30999| 2015-07-09T13:55:36.052-0400 I NETWORK [conn1] bs-osx108-8:31101 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.053-0400 m30999| 2015-07-09T13:55:36.052-0400 I NETWORK [conn1] , [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.053-0400 m30999| 2015-07-09T13:55:36.052-0400 I NETWORK [conn1] bs-osx108-8:31102 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.053-0400 m31102| 2015-07-09T13:55:36.053-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62559 #6 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.056-0400 m31100| 2015-07-09T13:55:36.055-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62560 #13 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.059-0400 m31100| 2015-07-09T13:55:36.058-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62561 #14 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.063-0400 m30999| 2015-07-09T13:55:36.062-0400 I SHARDING [conn1] going to add shard: { _id: "test-rs0", host: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.117-0400 m30999| 2015-07-09T13:55:36.116-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:36.116-0400-559eb598ca4787b9985d1b8a", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464536116), what: "addShard", ns: "", details: { name: "test-rs0", host: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.170-0400 { "shardAdded" : "test-rs0", "ok" : 1 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.294-0400 ShardingTest undefined going to add shard : test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.294-0400 m30999| 2015-07-09T13:55:36.170-0400 I NETWORK [conn1] starting new replica set monitor for replica set test-rs1 with seeds [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.295-0400 m30999| 2015-07-09T13:55:36.171-0400 I NETWORK [conn1] bs-osx108-8:31200 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.295-0400 m30999| 2015-07-09T13:55:36.171-0400 I NETWORK [conn1] , [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.295-0400 m30999| 2015-07-09T13:55:36.171-0400 I NETWORK [conn1] bs-osx108-8:31201 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.295-0400 m30999| 2015-07-09T13:55:36.171-0400 I NETWORK [conn1] , [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.295-0400 m30999| 2015-07-09T13:55:36.171-0400 I NETWORK [conn1] bs-osx108-8:31202 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.295-0400 m31202| 2015-07-09T13:55:36.172-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62562 #6 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.295-0400 m31200| 2015-07-09T13:55:36.174-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62563 #13 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.295-0400 m31200| 2015-07-09T13:55:36.176-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62564 #14 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.296-0400 m30999| 2015-07-09T13:55:36.180-0400 I SHARDING [conn1] going to add shard: { _id: "test-rs1", host: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.296-0400 m30999| 2015-07-09T13:55:36.234-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:36.234-0400-559eb598ca4787b9985d1b8b", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464536234), what: "addShard", ns: "", details: { name: "test-rs1", host: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.296-0400 { "shardAdded" : "test-rs1", "ok" : 1 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.345-0400 Waiting for active hosts... [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.345-0400 Waiting for the balancer lock... [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.347-0400 Waiting again for active hosts after balancer is off... [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.365-0400 setting random seed: 1436464536365 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.366-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.366-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.366-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.366-0400 jstests/concurrency/fsm_workloads/update_replace_noindex.js [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.366-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.366-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.367-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.375-0400 m30999| 2015-07-09T13:55:36.375-0400 I SHARDING [conn1] distributed lock 'db0/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb598ca4787b9985d1b8c [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.378-0400 m30999| 2015-07-09T13:55:36.378-0400 I SHARDING [conn1] Placing [db0] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.379-0400 m30999| 2015-07-09T13:55:36.378-0400 I SHARDING [conn1] Enabling sharding for database [db0] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.444-0400 m30999| 2015-07-09T13:55:36.443-0400 I SHARDING [conn1] distributed lock 'db0/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.447-0400 m31100| 2015-07-09T13:55:36.446-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62566 #15 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.450-0400 m31100| 2015-07-09T13:55:36.450-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62567 #16 (12 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.468-0400 m31100| 2015-07-09T13:55:36.467-0400 I INDEX [conn16] build index on: db0.coll0 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db0.coll0" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.469-0400 m31100| 2015-07-09T13:55:36.467-0400 I INDEX [conn16] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.479-0400 m31100| 2015-07-09T13:55:36.478-0400 I INDEX [conn16] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.480-0400 m30999| 2015-07-09T13:55:36.480-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db0.coll0", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.483-0400 m30999| 2015-07-09T13:55:36.483-0400 I SHARDING [conn1] distributed lock 'db0.coll0/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb598ca4787b9985d1b8d [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.484-0400 m30999| 2015-07-09T13:55:36.484-0400 I SHARDING [conn1] enable sharding on: db0.coll0 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.484-0400 m30999| 2015-07-09T13:55:36.484-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:36.484-0400-559eb598ca4787b9985d1b8e", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464536484), what: "shardCollection.start", ns: "db0.coll0", details: { shardKey: { _id: "hashed" }, collection: "db0.coll0", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.488-0400 m31102| 2015-07-09T13:55:36.488-0400 I INDEX [repl writer worker 7] build index on: db0.coll0 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db0.coll0" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.488-0400 m31102| 2015-07-09T13:55:36.488-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.491-0400 m31101| 2015-07-09T13:55:36.489-0400 I INDEX [repl writer worker 3] build index on: db0.coll0 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db0.coll0" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.491-0400 m31101| 2015-07-09T13:55:36.489-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.495-0400 m31102| 2015-07-09T13:55:36.494-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.497-0400 m31101| 2015-07-09T13:55:36.497-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.538-0400 m30999| 2015-07-09T13:55:36.537-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db0.coll0 using new epoch 559eb598ca4787b9985d1b8f [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.645-0400 m30999| 2015-07-09T13:55:36.645-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db0.coll0: 1ms sequenceNumber: 2 version: 1|1||559eb598ca4787b9985d1b8f based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.711-0400 m30999| 2015-07-09T13:55:36.711-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db0.coll0: 0ms sequenceNumber: 3 version: 1|1||559eb598ca4787b9985d1b8f based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.712-0400 m31100| 2015-07-09T13:55:36.712-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62568 #17 (13 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.715-0400 m31100| 2015-07-09T13:55:36.714-0400 I SHARDING [conn17] first cluster operation detected, adding sharding hook to enable versioning and authentication to remote servers [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.715-0400 m31100| 2015-07-09T13:55:36.715-0400 I NETWORK [conn17] starting new replica set monitor for replica set test-configRS with seeds [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.715-0400 m31100| 2015-07-09T13:55:36.715-0400 I NETWORK [conn17] bs-osx108-8:29000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.716-0400 m31100| 2015-07-09T13:55:36.715-0400 I NETWORK [ReplicaSetMonitorWatcher] starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.716-0400 m31100| 2015-07-09T13:55:36.715-0400 I SHARDING [conn17] remote client 127.0.0.1:62568 initialized this host (test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102) as shard test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.716-0400 m31100| 2015-07-09T13:55:36.715-0400 I SHARDING [conn17] remotely refreshing metadata for db0.coll0 with requested shard version 1|1||559eb598ca4787b9985d1b8f, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.717-0400 m29000| 2015-07-09T13:55:36.716-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62569 #18 (18 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.719-0400 m29000| 2015-07-09T13:55:36.719-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62570 #19 (19 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.721-0400 m31100| 2015-07-09T13:55:36.721-0400 I SHARDING [conn17] collection db0.coll0 was previously unsharded, new metadata loaded with shard version 1|1||559eb598ca4787b9985d1b8f [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.722-0400 m31100| 2015-07-09T13:55:36.721-0400 I SHARDING [conn17] collection version was loaded at version 1|1||559eb598ca4787b9985d1b8f, took 5ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.722-0400 m30999| 2015-07-09T13:55:36.721-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:36.721-0400-559eb598ca4787b9985d1b90", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464536721), what: "shardCollection", ns: "db0.coll0", details: { version: "1|1||559eb598ca4787b9985d1b8f" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.776-0400 m30999| 2015-07-09T13:55:36.776-0400 I SHARDING [conn1] distributed lock 'db0.coll0/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.777-0400 m30999| 2015-07-09T13:55:36.777-0400 I SHARDING [conn1] moving chunk ns: db0.coll0 moving ( ns: db0.coll0, shard: test-rs0, lastmod: 1|1||000000000000000000000000, min: { _id: 0 }, max: { _id: MaxKey }) test-rs0 -> test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.778-0400 m31100| 2015-07-09T13:55:36.777-0400 I SHARDING [conn15] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.779-0400 m31100| 2015-07-09T13:55:36.778-0400 I NETWORK [conn15] starting new replica set monitor for replica set test-rs0 with seeds [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.779-0400 m31100| 2015-07-09T13:55:36.778-0400 I NETWORK [conn15] bs-osx108-8:31100 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.779-0400 m31100| 2015-07-09T13:55:36.778-0400 I NETWORK [conn15] , [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.779-0400 m31100| 2015-07-09T13:55:36.778-0400 I NETWORK [conn15] bs-osx108-8:31101 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.779-0400 m31100| 2015-07-09T13:55:36.778-0400 I NETWORK [conn15] , [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.779-0400 m31100| 2015-07-09T13:55:36.778-0400 I NETWORK [conn15] bs-osx108-8:31102 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.779-0400 m31100| 2015-07-09T13:55:36.778-0400 I NETWORK [conn15] starting new replica set monitor for replica set test-rs1 with seeds [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.779-0400 m31100| 2015-07-09T13:55:36.778-0400 I NETWORK [conn15] bs-osx108-8:31200 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.780-0400 m31100| 2015-07-09T13:55:36.778-0400 I NETWORK [conn15] , [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.780-0400 m31100| 2015-07-09T13:55:36.778-0400 I NETWORK [conn15] bs-osx108-8:31201 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.780-0400 m31100| 2015-07-09T13:55:36.779-0400 I NETWORK [conn15] , [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.780-0400 m31100| 2015-07-09T13:55:36.779-0400 I NETWORK [conn15] bs-osx108-8:31202 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.780-0400 m31100| 2015-07-09T13:55:36.779-0400 I SHARDING [conn15] received moveChunk request: { moveChunk: "db0.coll0", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb598ca4787b9985d1b8f') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.781-0400 m29000| 2015-07-09T13:55:36.780-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62571 #20 (20 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.784-0400 m31100| 2015-07-09T13:55:36.784-0400 I SHARDING [LockPinger] creating distributed lock ping thread for test-configRS/bs-osx108-8:29000 and process bs-osx108-8:31100:1436464536:197041335 (sleeping for 30000ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.785-0400 m29000| 2015-07-09T13:55:36.785-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62572 #21 (21 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.787-0400 m31100| 2015-07-09T13:55:36.787-0400 I SHARDING [conn15] distributed lock 'db0.coll0/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb598792e00bb672748cd [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.788-0400 m31100| 2015-07-09T13:55:36.788-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:36.788-0400-559eb598792e00bb672748ce", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464536788), what: "moveChunk.start", ns: "db0.coll0", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.788-0400 m31100| 2015-07-09T13:55:36.788-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T13:55:36.784-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31100:1436464536:197041335', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.789-0400 m29000| 2015-07-09T13:55:36.789-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62573 #22 (22 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.844-0400 m31100| 2015-07-09T13:55:36.843-0400 I SHARDING [conn15] remotely refreshing metadata for db0.coll0 based on current shard version 1|1||559eb598ca4787b9985d1b8f, current metadata version is 1|1||559eb598ca4787b9985d1b8f [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.845-0400 m31100| 2015-07-09T13:55:36.845-0400 I SHARDING [conn15] metadata of collection db0.coll0 already up to date (shard version : 1|1||559eb598ca4787b9985d1b8f, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.845-0400 m31100| 2015-07-09T13:55:36.845-0400 I SHARDING [conn15] moveChunk request accepted at version 1|1||559eb598ca4787b9985d1b8f [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.846-0400 m31100| 2015-07-09T13:55:36.845-0400 I SHARDING [conn15] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.847-0400 m31202| 2015-07-09T13:55:36.846-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62574 #7 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.849-0400 m31200| 2015-07-09T13:55:36.849-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62575 #15 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.852-0400 m31200| 2015-07-09T13:55:36.852-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62576 #16 (12 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.853-0400 m31200| 2015-07-09T13:55:36.853-0400 I SHARDING [conn16] first cluster operation detected, adding sharding hook to enable versioning and authentication to remote servers [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.854-0400 m31200| 2015-07-09T13:55:36.854-0400 I NETWORK [conn16] starting new replica set monitor for replica set test-configRS with seeds [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.854-0400 m31200| 2015-07-09T13:55:36.854-0400 I NETWORK [conn16] bs-osx108-8:29000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.855-0400 m31200| 2015-07-09T13:55:36.854-0400 I NETWORK [ReplicaSetMonitorWatcher] starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.855-0400 m31200| 2015-07-09T13:55:36.854-0400 I SHARDING [conn16] remote client 127.0.0.1:62576 initialized this host as shard test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.855-0400 m31200| 2015-07-09T13:55:36.854-0400 I SHARDING [conn16] remotely refreshing metadata for db0.coll0, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.856-0400 m29000| 2015-07-09T13:55:36.855-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62577 #23 (23 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.859-0400 m29000| 2015-07-09T13:55:36.858-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62578 #24 (24 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.861-0400 m31200| 2015-07-09T13:55:36.860-0400 I SHARDING [conn16] collection db0.coll0 was previously unsharded, new metadata loaded with shard version 0|0||559eb598ca4787b9985d1b8f [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.861-0400 m31200| 2015-07-09T13:55:36.860-0400 I SHARDING [conn16] collection version was loaded at version 1|1||559eb598ca4787b9985d1b8f, took 5ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.861-0400 m31200| 2015-07-09T13:55:36.861-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: 0 } -> { _id: MaxKey } for collection db0.coll0 from test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 at epoch 559eb598ca4787b9985d1b8f [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.861-0400 m31200| 2015-07-09T13:55:36.861-0400 I NETWORK [migrateThread] starting new replica set monitor for replica set test-rs0 with seeds [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.861-0400 m31200| 2015-07-09T13:55:36.861-0400 I NETWORK [migrateThread] bs-osx108-8:31100 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.861-0400 m31200| 2015-07-09T13:55:36.861-0400 I NETWORK [migrateThread] , [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.862-0400 m31200| 2015-07-09T13:55:36.861-0400 I NETWORK [migrateThread] bs-osx108-8:31101 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.862-0400 m31200| 2015-07-09T13:55:36.861-0400 I NETWORK [migrateThread] , [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.862-0400 m31200| 2015-07-09T13:55:36.861-0400 I NETWORK [migrateThread] bs-osx108-8:31102 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.862-0400 m31102| 2015-07-09T13:55:36.862-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62579 #7 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.863-0400 m31100| 2015-07-09T13:55:36.862-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db0.coll0", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.865-0400 m31100| 2015-07-09T13:55:36.865-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62580 #18 (14 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.866-0400 m31100| 2015-07-09T13:55:36.866-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db0.coll0", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.867-0400 m31100| 2015-07-09T13:55:36.867-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62581 #19 (15 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.872-0400 m31100| 2015-07-09T13:55:36.871-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db0.coll0", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.883-0400 m31100| 2015-07-09T13:55:36.880-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db0.coll0", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.883-0400 m31200| 2015-07-09T13:55:36.881-0400 I INDEX [migrateThread] build index on: db0.coll0 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db0.coll0" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.883-0400 m31200| 2015-07-09T13:55:36.881-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.886-0400 m31200| 2015-07-09T13:55:36.885-0400 I INDEX [migrateThread] build index on: db0.coll0 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db0.coll0" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.886-0400 m31200| 2015-07-09T13:55:36.885-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.896-0400 m31200| 2015-07-09T13:55:36.895-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.896-0400 m31200| 2015-07-09T13:55:36.896-0400 I SHARDING [migrateThread] Deleter starting delete for: db0.coll0 from { _id: 0 } -> { _id: MaxKey }, with opId: 141 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.896-0400 m31200| 2015-07-09T13:55:36.896-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db0.coll0 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.898-0400 m31100| 2015-07-09T13:55:36.897-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db0.coll0", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.904-0400 m31202| 2015-07-09T13:55:36.904-0400 I INDEX [repl writer worker 2] build index on: db0.coll0 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db0.coll0" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.905-0400 m31202| 2015-07-09T13:55:36.904-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.906-0400 m31201| 2015-07-09T13:55:36.904-0400 I INDEX [repl writer worker 4] build index on: db0.coll0 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db0.coll0" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.906-0400 m31201| 2015-07-09T13:55:36.904-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.911-0400 m31201| 2015-07-09T13:55:36.911-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.913-0400 m31202| 2015-07-09T13:55:36.913-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.915-0400 m31200| 2015-07-09T13:55:36.914-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.915-0400 m31200| 2015-07-09T13:55:36.914-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db0.coll0' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.931-0400 m31100| 2015-07-09T13:55:36.931-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db0.coll0", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.932-0400 m31100| 2015-07-09T13:55:36.931-0400 I SHARDING [conn15] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.932-0400 m31100| 2015-07-09T13:55:36.932-0400 I SHARDING [conn15] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.932-0400 m31100| 2015-07-09T13:55:36.932-0400 I SHARDING [conn15] moveChunk setting version to: 2|0||559eb598ca4787b9985d1b8f [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.933-0400 m31200| 2015-07-09T13:55:36.933-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62582 #17 (13 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.939-0400 m31200| 2015-07-09T13:55:36.938-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db0.coll0' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.940-0400 m31200| 2015-07-09T13:55:36.939-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:36.939-0400-559eb598d5a107a5b9c0da83", server: "bs-osx108-8", clientAddr: "", time: new Date(1436464536939), what: "moveChunk.to", ns: "db0.coll0", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 5: 34, step 2 of 5: 17, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 24, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.941-0400 m29000| 2015-07-09T13:55:36.940-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62583 #25 (25 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.995-0400 m31100| 2015-07-09T13:55:36.994-0400 I SHARDING [conn15] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db0.coll0", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.995-0400 m31100| 2015-07-09T13:55:36.995-0400 I SHARDING [conn15] moveChunk updating self version to: 2|1||559eb598ca4787b9985d1b8f through { _id: MinKey } -> { _id: 0 } for collection 'db0.coll0' [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:36.996-0400 m31100| 2015-07-09T13:55:36.996-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:36.996-0400-559eb598792e00bb672748cf", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464536996), what: "moveChunk.commit", ns: "db0.coll0", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.049-0400 m31100| 2015-07-09T13:55:37.049-0400 I SHARDING [conn15] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.050-0400 m31100| 2015-07-09T13:55:37.049-0400 I SHARDING [conn15] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.050-0400 m31100| 2015-07-09T13:55:37.050-0400 I SHARDING [conn15] Deleter starting delete for: db0.coll0 from { _id: 0 } -> { _id: MaxKey }, with opId: 173 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.050-0400 m31100| 2015-07-09T13:55:37.050-0400 I SHARDING [conn15] rangeDeleter deleted 0 documents for db0.coll0 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.050-0400 m31100| 2015-07-09T13:55:37.050-0400 I SHARDING [conn15] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.051-0400 m29000| 2015-07-09T13:55:37.051-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62584 #26 (26 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.053-0400 m31100| 2015-07-09T13:55:37.053-0400 I SHARDING [conn15] distributed lock 'db0.coll0/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.054-0400 m31100| 2015-07-09T13:55:37.053-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:37.053-0400-559eb599792e00bb672748d0", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464537053), what: "moveChunk.from", ns: "db0.coll0", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 6: 0, step 2 of 6: 66, step 3 of 6: 16, step 4 of 6: 70, step 5 of 6: 118, step 6 of 6: 0, to: "test-rs1", from: "test-rs0", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.108-0400 m31100| 2015-07-09T13:55:37.107-0400 I COMMAND [conn15] command db0.coll0 command: moveChunk { moveChunk: "db0.coll0", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb598ca4787b9985d1b8f') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 329ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.109-0400 m30999| 2015-07-09T13:55:37.108-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db0.coll0: 0ms sequenceNumber: 4 version: 2|1||559eb598ca4787b9985d1b8f based on: 1|1||559eb598ca4787b9985d1b8f [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.110-0400 m31100| 2015-07-09T13:55:37.110-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db0.coll0", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb598ca4787b9985d1b8f') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.114-0400 m31100| 2015-07-09T13:55:37.114-0400 I SHARDING [conn15] distributed lock 'db0.coll0/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb599792e00bb672748d1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.114-0400 m31100| 2015-07-09T13:55:37.114-0400 I SHARDING [conn15] remotely refreshing metadata for db0.coll0 based on current shard version 2|0||559eb598ca4787b9985d1b8f, current metadata version is 2|0||559eb598ca4787b9985d1b8f [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.116-0400 m31100| 2015-07-09T13:55:37.115-0400 I SHARDING [conn15] updating metadata for db0.coll0 from shard version 2|0||559eb598ca4787b9985d1b8f to shard version 2|1||559eb598ca4787b9985d1b8f [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.116-0400 m31100| 2015-07-09T13:55:37.115-0400 I SHARDING [conn15] collection version was loaded at version 2|1||559eb598ca4787b9985d1b8f, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.116-0400 m31100| 2015-07-09T13:55:37.116-0400 I SHARDING [conn15] splitChunk accepted at version 2|1||559eb598ca4787b9985d1b8f [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.118-0400 m31100| 2015-07-09T13:55:37.118-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:37.118-0400-559eb599792e00bb672748d2", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464537118), what: "split", ns: "db0.coll0", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559eb598ca4787b9985d1b8f') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559eb598ca4787b9985d1b8f') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.172-0400 m31100| 2015-07-09T13:55:37.172-0400 I SHARDING [conn15] distributed lock 'db0.coll0/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.174-0400 m30999| 2015-07-09T13:55:37.174-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db0.coll0: 0ms sequenceNumber: 5 version: 2|3||559eb598ca4787b9985d1b8f based on: 2|1||559eb598ca4787b9985d1b8f [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.175-0400 m31200| 2015-07-09T13:55:37.175-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62585 #18 (14 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.177-0400 m31200| 2015-07-09T13:55:37.176-0400 I SHARDING [conn18] received splitChunk request: { splitChunk: "db0.coll0", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb598ca4787b9985d1b8f') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.178-0400 m29000| 2015-07-09T13:55:37.177-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62586 #27 (27 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.183-0400 m31200| 2015-07-09T13:55:37.182-0400 I SHARDING [LockPinger] creating distributed lock ping thread for test-configRS/bs-osx108-8:29000 and process bs-osx108-8:31200:1436464537:809424560 (sleeping for 30000ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.184-0400 m29000| 2015-07-09T13:55:37.183-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62587 #28 (28 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.185-0400 m31200| 2015-07-09T13:55:37.185-0400 I SHARDING [conn18] distributed lock 'db0.coll0/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb599d5a107a5b9c0da84 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.186-0400 m31200| 2015-07-09T13:55:37.185-0400 I SHARDING [conn18] remotely refreshing metadata for db0.coll0 based on current shard version 0|0||559eb598ca4787b9985d1b8f, current metadata version is 1|1||559eb598ca4787b9985d1b8f [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.187-0400 m31200| 2015-07-09T13:55:37.186-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T13:55:37.182-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31200:1436464537:809424560', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.187-0400 m31200| 2015-07-09T13:55:37.187-0400 I SHARDING [conn18] updating metadata for db0.coll0 from shard version 0|0||559eb598ca4787b9985d1b8f to shard version 2|0||559eb598ca4787b9985d1b8f [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.188-0400 m31200| 2015-07-09T13:55:37.187-0400 I SHARDING [conn18] collection version was loaded at version 2|3||559eb598ca4787b9985d1b8f, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.188-0400 m31200| 2015-07-09T13:55:37.187-0400 I SHARDING [conn18] splitChunk accepted at version 2|0||559eb598ca4787b9985d1b8f [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.190-0400 m31200| 2015-07-09T13:55:37.189-0400 I SHARDING [conn18] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:37.189-0400-559eb599d5a107a5b9c0da85", server: "bs-osx108-8", clientAddr: "127.0.0.1:62585", time: new Date(1436464537189), what: "split", ns: "db0.coll0", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559eb598ca4787b9985d1b8f') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559eb598ca4787b9985d1b8f') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.244-0400 m29000| 2015-07-09T13:55:37.243-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62588 #29 (29 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.245-0400 m31200| 2015-07-09T13:55:37.245-0400 I SHARDING [conn18] distributed lock 'db0.coll0/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.248-0400 m30999| 2015-07-09T13:55:37.247-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db0.coll0: 0ms sequenceNumber: 6 version: 2|5||559eb598ca4787b9985d1b8f based on: 2|3||559eb598ca4787b9985d1b8f [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.251-0400 m30999| 2015-07-09T13:55:37.250-0400 I SHARDING [conn1] sharded connection to test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.251-0400 m30999| 2015-07-09T13:55:37.250-0400 I SHARDING [conn1] retrying command: { listIndexes: "coll0" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.251-0400 m31100| 2015-07-09T13:55:37.251-0400 I NETWORK [conn17] end connection 127.0.0.1:62568 (14 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.251-0400 m31100| 2015-07-09T13:55:37.251-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62589 #20 (15 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.255-0400 m31200| 2015-07-09T13:55:37.255-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62590 #19 (15 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.266-0400 m31200| 2015-07-09T13:55:37.265-0400 I INDEX [conn19] build index on: db0.coll0 properties: { v: 1, key: { a: 1.0 }, name: "a_1", ns: "db0.coll0" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.267-0400 m31200| 2015-07-09T13:55:37.265-0400 I INDEX [conn19] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.268-0400 m31100| 2015-07-09T13:55:37.267-0400 I INDEX [conn20] build index on: db0.coll0 properties: { v: 1, key: { a: 1.0 }, name: "a_1", ns: "db0.coll0" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.268-0400 m31100| 2015-07-09T13:55:37.267-0400 I INDEX [conn20] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.274-0400 m31200| 2015-07-09T13:55:37.274-0400 I INDEX [conn19] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.274-0400 m31100| 2015-07-09T13:55:37.274-0400 I INDEX [conn20] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.281-0400 m31102| 2015-07-09T13:55:37.280-0400 I INDEX [repl writer worker 3] build index on: db0.coll0 properties: { v: 1, key: { a: 1.0 }, name: "a_1", ns: "db0.coll0" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.281-0400 m31102| 2015-07-09T13:55:37.280-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.286-0400 m31100| 2015-07-09T13:55:37.286-0400 I INDEX [conn20] build index on: db0.coll0 properties: { v: 1, key: { b: 1.0 }, name: "b_1", ns: "db0.coll0" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.287-0400 m31100| 2015-07-09T13:55:37.286-0400 I INDEX [conn20] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.292-0400 m31200| 2015-07-09T13:55:37.292-0400 I INDEX [conn19] build index on: db0.coll0 properties: { v: 1, key: { b: 1.0 }, name: "b_1", ns: "db0.coll0" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.293-0400 m31200| 2015-07-09T13:55:37.292-0400 I INDEX [conn19] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.293-0400 m31102| 2015-07-09T13:55:37.293-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.293-0400 m31202| 2015-07-09T13:55:37.293-0400 I INDEX [repl writer worker 4] build index on: db0.coll0 properties: { v: 1, key: { a: 1.0 }, name: "a_1", ns: "db0.coll0" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.294-0400 m31202| 2015-07-09T13:55:37.293-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.297-0400 m31201| 2015-07-09T13:55:37.296-0400 I INDEX [repl writer worker 8] build index on: db0.coll0 properties: { v: 1, key: { a: 1.0 }, name: "a_1", ns: "db0.coll0" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.297-0400 m31201| 2015-07-09T13:55:37.296-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.303-0400 m31101| 2015-07-09T13:55:37.303-0400 I INDEX [repl writer worker 4] build index on: db0.coll0 properties: { v: 1, key: { a: 1.0 }, name: "a_1", ns: "db0.coll0" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.303-0400 m31101| 2015-07-09T13:55:37.303-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.304-0400 m31100| 2015-07-09T13:55:37.304-0400 I INDEX [conn20] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.305-0400 m31200| 2015-07-09T13:55:37.304-0400 I INDEX [conn19] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.307-0400 m31202| 2015-07-09T13:55:37.306-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.310-0400 m31201| 2015-07-09T13:55:37.310-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.317-0400 m31200| 2015-07-09T13:55:37.316-0400 I INDEX [conn19] build index on: db0.coll0 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db0.coll0" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.317-0400 m31200| 2015-07-09T13:55:37.316-0400 I INDEX [conn19] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.320-0400 m31201| 2015-07-09T13:55:37.319-0400 I INDEX [repl writer worker 9] build index on: db0.coll0 properties: { v: 1, key: { b: 1.0 }, name: "b_1", ns: "db0.coll0" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.321-0400 m31201| 2015-07-09T13:55:37.319-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.321-0400 m31100| 2015-07-09T13:55:37.319-0400 I INDEX [conn20] build index on: db0.coll0 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db0.coll0" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.321-0400 m31100| 2015-07-09T13:55:37.319-0400 I INDEX [conn20] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.321-0400 m31102| 2015-07-09T13:55:37.319-0400 I INDEX [repl writer worker 8] build index on: db0.coll0 properties: { v: 1, key: { b: 1.0 }, name: "b_1", ns: "db0.coll0" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.321-0400 m31102| 2015-07-09T13:55:37.319-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.323-0400 m31202| 2015-07-09T13:55:37.322-0400 I INDEX [repl writer worker 6] build index on: db0.coll0 properties: { v: 1, key: { b: 1.0 }, name: "b_1", ns: "db0.coll0" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.324-0400 m31202| 2015-07-09T13:55:37.322-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.326-0400 m31101| 2015-07-09T13:55:37.325-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.327-0400 m31102| 2015-07-09T13:55:37.327-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.327-0400 m31200| 2015-07-09T13:55:37.327-0400 I INDEX [conn19] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.331-0400 m31100| 2015-07-09T13:55:37.331-0400 I INDEX [conn20] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.333-0400 m31201| 2015-07-09T13:55:37.331-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.336-0400 m31202| 2015-07-09T13:55:37.335-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.340-0400 m31101| 2015-07-09T13:55:37.340-0400 I INDEX [repl writer worker 5] build index on: db0.coll0 properties: { v: 1, key: { b: 1.0 }, name: "b_1", ns: "db0.coll0" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.340-0400 m31101| 2015-07-09T13:55:37.340-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.345-0400 m31100| 2015-07-09T13:55:37.344-0400 I INDEX [conn20] build index on: db0.coll0 properties: { v: 1, key: { y: 1.0 }, name: "y_1", ns: "db0.coll0" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.345-0400 m31100| 2015-07-09T13:55:37.344-0400 I INDEX [conn20] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.345-0400 m31102| 2015-07-09T13:55:37.344-0400 I INDEX [repl writer worker 6] build index on: db0.coll0 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db0.coll0" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.345-0400 m31102| 2015-07-09T13:55:37.344-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.347-0400 m31202| 2015-07-09T13:55:37.346-0400 I INDEX [repl writer worker 9] build index on: db0.coll0 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db0.coll0" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.347-0400 m31202| 2015-07-09T13:55:37.346-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.348-0400 m31201| 2015-07-09T13:55:37.346-0400 I INDEX [repl writer worker 10] build index on: db0.coll0 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db0.coll0" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.348-0400 m31201| 2015-07-09T13:55:37.346-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.350-0400 m31101| 2015-07-09T13:55:37.350-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.351-0400 m31200| 2015-07-09T13:55:37.350-0400 I INDEX [conn19] build index on: db0.coll0 properties: { v: 1, key: { y: 1.0 }, name: "y_1", ns: "db0.coll0" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.351-0400 m31200| 2015-07-09T13:55:37.350-0400 I INDEX [conn19] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.355-0400 m31100| 2015-07-09T13:55:37.355-0400 I INDEX [conn20] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.356-0400 m31201| 2015-07-09T13:55:37.355-0400 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.357-0400 m31202| 2015-07-09T13:55:37.357-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.357-0400 m31102| 2015-07-09T13:55:37.355-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.362-0400 m31200| 2015-07-09T13:55:37.361-0400 I INDEX [conn19] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.363-0400 m31101| 2015-07-09T13:55:37.362-0400 I INDEX [repl writer worker 6] build index on: db0.coll0 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db0.coll0" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.363-0400 m31101| 2015-07-09T13:55:37.362-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.365-0400 m31200| 2015-07-09T13:55:37.365-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62591 #20 (16 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.367-0400 m31102| 2015-07-09T13:55:37.366-0400 I INDEX [repl writer worker 9] build index on: db0.coll0 properties: { v: 1, key: { y: 1.0 }, name: "y_1", ns: "db0.coll0" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.367-0400 m31102| 2015-07-09T13:55:37.366-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.377-0400 m31101| 2015-07-09T13:55:37.376-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.378-0400 m31201| 2015-07-09T13:55:37.376-0400 I INDEX [repl writer worker 7] build index on: db0.coll0 properties: { v: 1, key: { y: 1.0 }, name: "y_1", ns: "db0.coll0" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.379-0400 m31201| 2015-07-09T13:55:37.376-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.379-0400 m31102| 2015-07-09T13:55:37.377-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.382-0400 m31202| 2015-07-09T13:55:37.381-0400 I INDEX [repl writer worker 12] build index on: db0.coll0 properties: { v: 1, key: { y: 1.0 }, name: "y_1", ns: "db0.coll0" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.383-0400 m31202| 2015-07-09T13:55:37.381-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.389-0400 m31100| 2015-07-09T13:55:37.386-0400 I COMMAND [conn15] CMD: dropIndexes db0.coll0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.392-0400 m31200| 2015-07-09T13:55:37.386-0400 I COMMAND [conn18] CMD: dropIndexes db0.coll0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.392-0400 m31101| 2015-07-09T13:55:37.386-0400 I INDEX [repl writer worker 8] build index on: db0.coll0 properties: { v: 1, key: { y: 1.0 }, name: "y_1", ns: "db0.coll0" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.392-0400 m31101| 2015-07-09T13:55:37.386-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.393-0400 m31102| 2015-07-09T13:55:37.388-0400 I COMMAND [repl writer worker 11] CMD: dropIndexes db0.coll0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.393-0400 m31201| 2015-07-09T13:55:37.389-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.394-0400 m31202| 2015-07-09T13:55:37.389-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.394-0400 m31100| 2015-07-09T13:55:37.390-0400 I COMMAND [conn15] CMD: dropIndexes db0.coll0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.395-0400 m31200| 2015-07-09T13:55:37.390-0400 I COMMAND [conn18] CMD: dropIndexes db0.coll0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.396-0400 m31200| 2015-07-09T13:55:37.392-0400 I COMMAND [conn18] CMD: dropIndexes db0.coll0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.396-0400 m31102| 2015-07-09T13:55:37.392-0400 I COMMAND [repl writer worker 1] CMD: dropIndexes db0.coll0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.396-0400 m31100| 2015-07-09T13:55:37.393-0400 I COMMAND [conn15] CMD: dropIndexes db0.coll0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.396-0400 m31201| 2015-07-09T13:55:37.394-0400 I COMMAND [repl writer worker 6] CMD: dropIndexes db0.coll0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.397-0400 m31101| 2015-07-09T13:55:37.395-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.397-0400 m31100| 2015-07-09T13:55:37.395-0400 I COMMAND [conn15] CMD: dropIndexes db0.coll0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.398-0400 m31200| 2015-07-09T13:55:37.395-0400 I COMMAND [conn18] CMD: dropIndexes db0.coll0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.398-0400 m31101| 2015-07-09T13:55:37.395-0400 I COMMAND [repl writer worker 7] CMD: dropIndexes db0.coll0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.398-0400 m31102| 2015-07-09T13:55:37.396-0400 I COMMAND [repl writer worker 10] CMD: dropIndexes db0.coll0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.398-0400 m31201| 2015-07-09T13:55:37.397-0400 I COMMAND [repl writer worker 12] CMD: dropIndexes db0.coll0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.398-0400 m31101| 2015-07-09T13:55:37.397-0400 I COMMAND [repl writer worker 10] CMD: dropIndexes db0.coll0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.398-0400 Using 10 threads (requested 10) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.398-0400 m31201| 2015-07-09T13:55:37.398-0400 I COMMAND [repl writer worker 13] CMD: dropIndexes db0.coll0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.400-0400 m31101| 2015-07-09T13:55:37.398-0400 I COMMAND [repl writer worker 9] CMD: dropIndexes db0.coll0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.402-0400 m31101| 2015-07-09T13:55:37.400-0400 I COMMAND [repl writer worker 12] CMD: dropIndexes db0.coll0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.403-0400 m31202| 2015-07-09T13:55:37.402-0400 I COMMAND [repl writer worker 14] CMD: dropIndexes db0.coll0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.403-0400 m31201| 2015-07-09T13:55:37.401-0400 I COMMAND [repl writer worker 14] CMD: dropIndexes db0.coll0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.442-0400 m31102| 2015-07-09T13:55:37.402-0400 I COMMAND [repl writer worker 4] CMD: dropIndexes db0.coll0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.442-0400 m31202| 2015-07-09T13:55:37.414-0400 I COMMAND [repl writer worker 8] CMD: dropIndexes db0.coll0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.449-0400 m31202| 2015-07-09T13:55:37.442-0400 I COMMAND [repl writer worker 10] CMD: dropIndexes db0.coll0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.449-0400 m31202| 2015-07-09T13:55:37.443-0400 I COMMAND [repl writer worker 15] CMD: dropIndexes db0.coll0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.478-0400 m30999| 2015-07-09T13:55:37.477-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62592 #2 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.509-0400 m30998| 2015-07-09T13:55:37.508-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62593 #2 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.509-0400 m30999| 2015-07-09T13:55:37.508-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62594 #3 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.509-0400 m30998| 2015-07-09T13:55:37.509-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62595 #3 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.514-0400 m30999| 2015-07-09T13:55:37.514-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62596 #4 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.514-0400 m30999| 2015-07-09T13:55:37.514-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62597 #5 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.515-0400 m30999| 2015-07-09T13:55:37.515-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62598 #6 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.520-0400 m30998| 2015-07-09T13:55:37.519-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62599 #4 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.528-0400 m30998| 2015-07-09T13:55:37.527-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62600 #5 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.533-0400 m30998| 2015-07-09T13:55:37.533-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62601 #6 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.540-0400 setting random seed: 8329072087071 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.540-0400 setting random seed: 2830154271796 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.540-0400 setting random seed: 2465469604358 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.541-0400 setting random seed: 7147659738548 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.541-0400 setting random seed: 6714632599614 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.541-0400 setting random seed: 3772733327932 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.543-0400 setting random seed: 5337286409921 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.546-0400 setting random seed: 7870081835426 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.549-0400 m30998| 2015-07-09T13:55:37.549-0400 I NETWORK [conn2] starting new replica set monitor for replica set test-rs0 with seeds [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.550-0400 m30998| 2015-07-09T13:55:37.549-0400 I NETWORK [conn2] bs-osx108-8:31100 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.550-0400 m30998| 2015-07-09T13:55:37.549-0400 I NETWORK [conn2] , [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.551-0400 m30998| 2015-07-09T13:55:37.549-0400 I NETWORK [conn2] bs-osx108-8:31101 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.551-0400 m30998| 2015-07-09T13:55:37.549-0400 I NETWORK [conn2] , [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.551-0400 m30998| 2015-07-09T13:55:37.549-0400 I NETWORK [conn2] bs-osx108-8:31102 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.551-0400 m30998| 2015-07-09T13:55:37.549-0400 I NETWORK [conn2] starting new replica set monitor for replica set test-rs1 with seeds [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.551-0400 m30998| 2015-07-09T13:55:37.549-0400 I NETWORK [conn2] bs-osx108-8:31200 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.551-0400 m30998| 2015-07-09T13:55:37.549-0400 I NETWORK [conn2] , [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.552-0400 m30998| 2015-07-09T13:55:37.549-0400 I NETWORK [conn2] bs-osx108-8:31201 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.552-0400 m30998| 2015-07-09T13:55:37.549-0400 I NETWORK [conn2] , [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.552-0400 m30998| 2015-07-09T13:55:37.549-0400 I NETWORK [conn2] bs-osx108-8:31202 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.552-0400 m30998| 2015-07-09T13:55:37.549-0400 I SHARDING [conn2] ChunkManager: time to load chunks for db0.coll0: 1ms sequenceNumber: 2 version: 2|5||559eb598ca4787b9985d1b8f based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.552-0400 setting random seed: 3006358663551 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.552-0400 m31101| 2015-07-09T13:55:37.550-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62602 #7 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.558-0400 setting random seed: 6402868228033 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.558-0400 m31102| 2015-07-09T13:55:37.557-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62603 #8 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.561-0400 m31100| 2015-07-09T13:55:37.557-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62604 #21 (16 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.561-0400 m31201| 2015-07-09T13:55:37.561-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62608 #7 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.561-0400 m31202| 2015-07-09T13:55:37.561-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62607 #8 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.561-0400 m31200| 2015-07-09T13:55:37.561-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62609 #21 (17 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.563-0400 m31100| 2015-07-09T13:55:37.562-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62605 #22 (17 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.566-0400 m31100| 2015-07-09T13:55:37.564-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62606 #23 (18 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.566-0400 m31200| 2015-07-09T13:55:37.565-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62610 #22 (18 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.566-0400 m31200| 2015-07-09T13:55:37.565-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62611 #23 (19 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.567-0400 m31100| 2015-07-09T13:55:37.566-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62612 #24 (19 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.567-0400 m31200| 2015-07-09T13:55:37.567-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62616 #24 (20 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.568-0400 m31100| 2015-07-09T13:55:37.568-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62613 #25 (20 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.572-0400 m31200| 2015-07-09T13:55:37.571-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62617 #25 (21 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.573-0400 m31100| 2015-07-09T13:55:37.573-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62614 #26 (21 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.578-0400 m31100| 2015-07-09T13:55:37.577-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62615 #27 (22 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.588-0400 m30999| 2015-07-09T13:55:37.587-0400 I NETWORK [conn3] end connection 127.0.0.1:62594 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.592-0400 m30999| 2015-07-09T13:55:37.592-0400 I NETWORK [conn5] end connection 127.0.0.1:62597 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.593-0400 m31200| 2015-07-09T13:55:37.593-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62618 #26 (22 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.604-0400 m31200| 2015-07-09T13:55:37.603-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62619 #27 (23 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.620-0400 m30999| 2015-07-09T13:55:37.620-0400 I NETWORK [conn6] end connection 127.0.0.1:62598 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.624-0400 m30999| 2015-07-09T13:55:37.623-0400 I NETWORK [conn2] end connection 127.0.0.1:62592 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.629-0400 m30998| 2015-07-09T13:55:37.628-0400 I NETWORK [conn4] end connection 127.0.0.1:62599 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.644-0400 m30998| 2015-07-09T13:55:37.644-0400 I NETWORK [conn3] end connection 127.0.0.1:62595 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.646-0400 m30998| 2015-07-09T13:55:37.646-0400 I NETWORK [conn2] end connection 127.0.0.1:62593 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.656-0400 m30998| 2015-07-09T13:55:37.649-0400 I NETWORK [conn5] end connection 127.0.0.1:62600 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.656-0400 m30999| 2015-07-09T13:55:37.653-0400 I NETWORK [conn4] end connection 127.0.0.1:62596 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.685-0400 m30998| 2015-07-09T13:55:37.684-0400 I NETWORK [conn6] end connection 127.0.0.1:62601 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.703-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.703-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.704-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.704-0400 jstests/concurrency/fsm_workloads/update_replace_noindex.js: Workload completed in 307 ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.704-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.704-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.704-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.704-0400 m30999| 2015-07-09T13:55:37.704-0400 I COMMAND [conn1] DROP: db0.coll0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.704-0400 m30999| 2015-07-09T13:55:37.704-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:37.704-0400-559eb599ca4787b9985d1b91", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464537704), what: "dropCollection.start", ns: "db0.coll0", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.761-0400 m30999| 2015-07-09T13:55:37.761-0400 I SHARDING [conn1] distributed lock 'db0.coll0/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb599ca4787b9985d1b92 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.762-0400 m31100| 2015-07-09T13:55:37.762-0400 I COMMAND [conn15] CMD: drop db0.coll0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.764-0400 m31200| 2015-07-09T13:55:37.764-0400 I COMMAND [conn18] CMD: drop db0.coll0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.766-0400 m31102| 2015-07-09T13:55:37.766-0400 I COMMAND [repl writer worker 13] CMD: drop db0.coll0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.766-0400 m31101| 2015-07-09T13:55:37.766-0400 I COMMAND [repl writer worker 11] CMD: drop db0.coll0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.767-0400 m31202| 2015-07-09T13:55:37.767-0400 I COMMAND [repl writer worker 3] CMD: drop db0.coll0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.768-0400 m31201| 2015-07-09T13:55:37.767-0400 I COMMAND [repl writer worker 15] CMD: drop db0.coll0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.818-0400 m31100| 2015-07-09T13:55:37.818-0400 I SHARDING [conn15] remotely refreshing metadata for db0.coll0 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559eb598ca4787b9985d1b8f, current metadata version is 2|3||559eb598ca4787b9985d1b8f [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.820-0400 m31100| 2015-07-09T13:55:37.819-0400 W SHARDING [conn15] no chunks found when reloading db0.coll0, previous version was 0|0||559eb598ca4787b9985d1b8f, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.820-0400 m31100| 2015-07-09T13:55:37.819-0400 I SHARDING [conn15] dropping metadata for db0.coll0 at shard version 2|3||559eb598ca4787b9985d1b8f, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.821-0400 m31200| 2015-07-09T13:55:37.820-0400 I SHARDING [conn18] remotely refreshing metadata for db0.coll0 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559eb598ca4787b9985d1b8f, current metadata version is 2|5||559eb598ca4787b9985d1b8f [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.823-0400 m31200| 2015-07-09T13:55:37.822-0400 W SHARDING [conn18] no chunks found when reloading db0.coll0, previous version was 0|0||559eb598ca4787b9985d1b8f, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.823-0400 m31200| 2015-07-09T13:55:37.822-0400 I SHARDING [conn18] dropping metadata for db0.coll0 at shard version 2|5||559eb598ca4787b9985d1b8f, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.824-0400 m30999| 2015-07-09T13:55:37.823-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:37.823-0400-559eb599ca4787b9985d1b93", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464537823), what: "dropCollection", ns: "db0.coll0", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.878-0400 m30999| 2015-07-09T13:55:37.878-0400 I SHARDING [conn1] distributed lock 'db0.coll0/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.935-0400 m30999| 2015-07-09T13:55:37.934-0400 I COMMAND [conn1] DROP DATABASE: db0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.935-0400 m30999| 2015-07-09T13:55:37.935-0400 I SHARDING [conn1] DBConfig::dropDatabase: db0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:37.935-0400 m30999| 2015-07-09T13:55:37.935-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:37.935-0400-559eb599ca4787b9985d1b94", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464537935), what: "dropDatabase.start", ns: "db0", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.041-0400 m30999| 2015-07-09T13:55:38.040-0400 I SHARDING [conn1] DBConfig::dropDatabase: db0 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.042-0400 m31100| 2015-07-09T13:55:38.042-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62620 #28 (23 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.043-0400 m31100| 2015-07-09T13:55:38.043-0400 I COMMAND [conn28] dropDatabase db0 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.043-0400 m31100| 2015-07-09T13:55:38.043-0400 I COMMAND [conn28] dropDatabase db0 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.044-0400 m30999| 2015-07-09T13:55:38.043-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:38.043-0400-559eb59aca4787b9985d1b95", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464538043), what: "dropDatabase", ns: "db0", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.044-0400 m31102| 2015-07-09T13:55:38.044-0400 I COMMAND [repl writer worker 14] dropDatabase db0 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.044-0400 m31101| 2015-07-09T13:55:38.044-0400 I COMMAND [repl writer worker 13] dropDatabase db0 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.044-0400 m31102| 2015-07-09T13:55:38.044-0400 I COMMAND [repl writer worker 14] dropDatabase db0 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.044-0400 m31101| 2015-07-09T13:55:38.044-0400 I COMMAND [repl writer worker 13] dropDatabase db0 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.133-0400 m31100| 2015-07-09T13:55:38.133-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.134-0400 m31101| 2015-07-09T13:55:38.134-0400 I COMMAND [repl writer worker 0] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.137-0400 m31102| 2015-07-09T13:55:38.136-0400 I COMMAND [repl writer worker 0] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.167-0400 m31200| 2015-07-09T13:55:38.167-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.169-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.169-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.170-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.170-0400 jstests/concurrency/fsm_workloads/touch_data.js [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.170-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.170-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.170-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.171-0400 m31202| 2015-07-09T13:55:38.170-0400 I COMMAND [repl writer worker 4] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.171-0400 m31201| 2015-07-09T13:55:38.170-0400 I COMMAND [repl writer worker 0] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.177-0400 m30999| 2015-07-09T13:55:38.177-0400 I SHARDING [conn1] distributed lock 'db1/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb59aca4787b9985d1b96 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.180-0400 m30999| 2015-07-09T13:55:38.180-0400 I SHARDING [conn1] Placing [db1] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.181-0400 m30999| 2015-07-09T13:55:38.180-0400 I SHARDING [conn1] Enabling sharding for database [db1] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.235-0400 m30999| 2015-07-09T13:55:38.234-0400 I SHARDING [conn1] distributed lock 'db1/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.257-0400 m31100| 2015-07-09T13:55:38.256-0400 I INDEX [conn22] build index on: db1.coll1 properties: { v: 1, key: { tid: 1.0 }, name: "tid_1", ns: "db1.coll1" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.257-0400 m31100| 2015-07-09T13:55:38.256-0400 I INDEX [conn22] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.259-0400 m31100| 2015-07-09T13:55:38.259-0400 I INDEX [conn22] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.260-0400 m30999| 2015-07-09T13:55:38.259-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db1.coll1", key: { tid: 1.0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.263-0400 m30999| 2015-07-09T13:55:38.263-0400 I SHARDING [conn1] distributed lock 'db1.coll1/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb59aca4787b9985d1b97 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.265-0400 m30999| 2015-07-09T13:55:38.265-0400 I SHARDING [conn1] enable sharding on: db1.coll1 with shard key: { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.266-0400 m30999| 2015-07-09T13:55:38.265-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:38.265-0400-559eb59aca4787b9985d1b98", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464538265), what: "shardCollection.start", ns: "db1.coll1", details: { shardKey: { tid: 1.0 }, collection: "db1.coll1", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 1 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.270-0400 m31101| 2015-07-09T13:55:38.270-0400 I INDEX [repl writer worker 2] build index on: db1.coll1 properties: { v: 1, key: { tid: 1.0 }, name: "tid_1", ns: "db1.coll1" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.271-0400 m31101| 2015-07-09T13:55:38.270-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.277-0400 m31101| 2015-07-09T13:55:38.277-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.278-0400 m31102| 2015-07-09T13:55:38.278-0400 I INDEX [repl writer worker 5] build index on: db1.coll1 properties: { v: 1, key: { tid: 1.0 }, name: "tid_1", ns: "db1.coll1" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.279-0400 m31102| 2015-07-09T13:55:38.278-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.283-0400 m31102| 2015-07-09T13:55:38.283-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.319-0400 m30999| 2015-07-09T13:55:38.319-0400 I SHARDING [conn1] going to create 1 chunk(s) for: db1.coll1 using new epoch 559eb59aca4787b9985d1b99 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.374-0400 m30999| 2015-07-09T13:55:38.373-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db1.coll1: 0ms sequenceNumber: 7 version: 1|0||559eb59aca4787b9985d1b99 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.429-0400 m30999| 2015-07-09T13:55:38.429-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db1.coll1: 0ms sequenceNumber: 8 version: 1|0||559eb59aca4787b9985d1b99 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.431-0400 m31100| 2015-07-09T13:55:38.430-0400 I SHARDING [conn20] remotely refreshing metadata for db1.coll1 with requested shard version 1|0||559eb59aca4787b9985d1b99, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.432-0400 m31100| 2015-07-09T13:55:38.432-0400 I SHARDING [conn20] collection db1.coll1 was previously unsharded, new metadata loaded with shard version 1|0||559eb59aca4787b9985d1b99 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.432-0400 m31100| 2015-07-09T13:55:38.432-0400 I SHARDING [conn20] collection version was loaded at version 1|0||559eb59aca4787b9985d1b99, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.433-0400 m30999| 2015-07-09T13:55:38.432-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:38.432-0400-559eb59aca4787b9985d1b9a", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464538432), what: "shardCollection", ns: "db1.coll1", details: { version: "1|0||559eb59aca4787b9985d1b99" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.487-0400 m30999| 2015-07-09T13:55:38.486-0400 I SHARDING [conn1] distributed lock 'db1.coll1/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.494-0400 m31100| 2015-07-09T13:55:38.494-0400 I INDEX [conn20] build index on: db1.coll1 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db1.coll1" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.494-0400 m31100| 2015-07-09T13:55:38.494-0400 I INDEX [conn20] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.504-0400 m31100| 2015-07-09T13:55:38.504-0400 I INDEX [conn20] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.513-0400 m31101| 2015-07-09T13:55:38.513-0400 I INDEX [repl writer worker 3] build index on: db1.coll1 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db1.coll1" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.514-0400 m31200| 2015-07-09T13:55:38.513-0400 I INDEX [conn19] build index on: db1.coll1 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db1.coll1" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.514-0400 m31200| 2015-07-09T13:55:38.513-0400 I INDEX [conn19] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.515-0400 m31101| 2015-07-09T13:55:38.514-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.515-0400 m31102| 2015-07-09T13:55:38.514-0400 I INDEX [repl writer worker 7] build index on: db1.coll1 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db1.coll1" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.515-0400 m31102| 2015-07-09T13:55:38.514-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.519-0400 m31200| 2015-07-09T13:55:38.518-0400 I INDEX [conn19] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.519-0400 Using 10 threads (requested 10) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.522-0400 m31101| 2015-07-09T13:55:38.520-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.522-0400 m31102| 2015-07-09T13:55:38.521-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.618-0400 m31201| 2015-07-09T13:55:38.610-0400 I INDEX [repl writer worker 8] build index on: db1.coll1 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db1.coll1" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.618-0400 m31201| 2015-07-09T13:55:38.610-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.628-0400 m31202| 2015-07-09T13:55:38.625-0400 I INDEX [repl writer worker 6] build index on: db1.coll1 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db1.coll1" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.628-0400 m31202| 2015-07-09T13:55:38.625-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.628-0400 m31201| 2015-07-09T13:55:38.626-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.629-0400 m30999| 2015-07-09T13:55:38.629-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62621 #7 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.637-0400 m30998| 2015-07-09T13:55:38.637-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62624 #7 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.639-0400 m30999| 2015-07-09T13:55:38.639-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62622 #8 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.640-0400 m31202| 2015-07-09T13:55:38.640-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.644-0400 m30998| 2015-07-09T13:55:38.643-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62625 #8 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.659-0400 m30999| 2015-07-09T13:55:38.647-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62623 #9 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.659-0400 m30998| 2015-07-09T13:55:38.653-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62626 #9 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.659-0400 m30999| 2015-07-09T13:55:38.658-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62628 #10 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.659-0400 m30999| 2015-07-09T13:55:38.658-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62629 #11 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.662-0400 m30998| 2015-07-09T13:55:38.661-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62627 #10 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.663-0400 m30998| 2015-07-09T13:55:38.663-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62630 #11 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.683-0400 setting random seed: 2565299915149 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.683-0400 setting random seed: 9282385366968 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.683-0400 setting random seed: 9069455573335 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.683-0400 setting random seed: 4995693997479 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.683-0400 setting random seed: 339892026968 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.684-0400 setting random seed: 9050935287959 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.687-0400 setting random seed: 1935045109130 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.693-0400 setting random seed: 2303164298646 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.694-0400 setting random seed: 717281717807 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.695-0400 m30998| 2015-07-09T13:55:38.693-0400 I SHARDING [conn9] ChunkManager: time to load chunks for db1.coll1: 0ms sequenceNumber: 3 version: 1|0||559eb59aca4787b9985d1b99 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.697-0400 setting random seed: 5353836794383 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.708-0400 m31100| 2015-07-09T13:55:38.707-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62631 #29 (24 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.713-0400 m31100| 2015-07-09T13:55:38.712-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62632 #30 (25 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.720-0400 m31100| 2015-07-09T13:55:38.720-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62633 #31 (26 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.803-0400 m31100| 2015-07-09T13:55:38.802-0400 I COMMAND [conn26] command db1.$cmd command: insert { insert: "coll1", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eb59aca4787b9985d1b99') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 106, w: 106 } }, Database: { acquireCount: { w: 106 } }, Collection: { acquireCount: { w: 6 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 108ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.803-0400 m31100| 2015-07-09T13:55:38.803-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62634 #32 (27 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.808-0400 m31100| 2015-07-09T13:55:38.807-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62635 #33 (28 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.816-0400 m31100| 2015-07-09T13:55:38.815-0400 I COMMAND [conn16] command db1.$cmd command: insert { insert: "coll1", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eb59aca4787b9985d1b99') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 106, w: 106 } }, Database: { acquireCount: { w: 106 } }, Collection: { acquireCount: { w: 6 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 122ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.843-0400 m31100| 2015-07-09T13:55:38.842-0400 I COMMAND [conn23] command db1.$cmd command: insert { insert: "coll1", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eb59aca4787b9985d1b99') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 108, w: 108 } }, Database: { acquireCount: { w: 108 } }, Collection: { acquireCount: { w: 8 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 153ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.843-0400 m31100| 2015-07-09T13:55:38.843-0400 I SHARDING [conn15] request split points lookup for chunk db1.coll1 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.844-0400 m31100| 2015-07-09T13:55:38.844-0400 W SHARDING [conn15] possible low cardinality key detected in db1.coll1 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.845-0400 m31100| 2015-07-09T13:55:38.844-0400 W SHARDING [conn15] possible low cardinality key detected in db1.coll1 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.845-0400 m31100| 2015-07-09T13:55:38.844-0400 W SHARDING [conn15] possible low cardinality key detected in db1.coll1 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.845-0400 m31100| 2015-07-09T13:55:38.844-0400 W SHARDING [conn15] possible low cardinality key detected in db1.coll1 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.845-0400 m31100| 2015-07-09T13:55:38.844-0400 W SHARDING [conn15] possible low cardinality key detected in db1.coll1 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.846-0400 m31100| 2015-07-09T13:55:38.844-0400 W SHARDING [conn15] possible low cardinality key detected in db1.coll1 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.846-0400 m31100| 2015-07-09T13:55:38.844-0400 W SHARDING [conn15] possible low cardinality key detected in db1.coll1 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.846-0400 m31100| 2015-07-09T13:55:38.844-0400 W SHARDING [conn15] possible low cardinality key detected in db1.coll1 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.846-0400 m31100| 2015-07-09T13:55:38.844-0400 W SHARDING [conn15] possible low cardinality key detected in db1.coll1 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.846-0400 m31100| 2015-07-09T13:55:38.844-0400 W SHARDING [conn15] possible low cardinality key detected in db1.coll1 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.847-0400 m31100| 2015-07-09T13:55:38.845-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db1.coll1", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb59aca4787b9985d1b99') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.847-0400 m31100| 2015-07-09T13:55:38.847-0400 I SHARDING [conn15] distributed lock 'db1.coll1/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb59a792e00bb672748d4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.848-0400 m31100| 2015-07-09T13:55:38.847-0400 I SHARDING [conn15] remotely refreshing metadata for db1.coll1 based on current shard version 1|0||559eb59aca4787b9985d1b99, current metadata version is 1|0||559eb59aca4787b9985d1b99 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.850-0400 m31100| 2015-07-09T13:55:38.849-0400 I COMMAND [conn27] command db1.$cmd command: insert { insert: "coll1", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eb59aca4787b9985d1b99') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 107, w: 107 } }, Database: { acquireCount: { w: 107 } }, Collection: { acquireCount: { w: 7 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 140ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.851-0400 m31100| 2015-07-09T13:55:38.849-0400 I COMMAND [conn22] command db1.$cmd command: insert { insert: "coll1", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eb59aca4787b9985d1b99') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 108, w: 108 } }, Database: { acquireCount: { w: 108 } }, Collection: { acquireCount: { w: 8 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 162ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.852-0400 m31100| 2015-07-09T13:55:38.850-0400 I COMMAND [conn24] command db1.$cmd command: insert { insert: "coll1", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eb59aca4787b9985d1b99') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 107, w: 107 } }, Database: { acquireCount: { w: 107 } }, Collection: { acquireCount: { w: 7 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 145ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.852-0400 m31100| 2015-07-09T13:55:38.850-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62636 #34 (29 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.852-0400 m31100| 2015-07-09T13:55:38.851-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62637 #35 (30 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.853-0400 m31100| 2015-07-09T13:55:38.852-0400 I COMMAND [conn25] command db1.$cmd command: insert { insert: "coll1", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eb59aca4787b9985d1b99') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 107, w: 107 } }, Database: { acquireCount: { w: 107 } }, Collection: { acquireCount: { w: 7 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 142ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.854-0400 m31100| 2015-07-09T13:55:38.853-0400 I COMMAND [conn29] command db1.$cmd command: insert { insert: "coll1", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eb59aca4787b9985d1b99') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 107, w: 107 } }, Database: { acquireCount: { w: 107 } }, Collection: { acquireCount: { w: 7 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 142ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.854-0400 m31100| 2015-07-09T13:55:38.853-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62638 #36 (31 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.855-0400 m31100| 2015-07-09T13:55:38.853-0400 I COMMAND [conn30] command db1.$cmd command: insert { insert: "coll1", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eb59aca4787b9985d1b99') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 107, w: 107 } }, Database: { acquireCount: { w: 107 } }, Collection: { acquireCount: { w: 7 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 139ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.855-0400 m31100| 2015-07-09T13:55:38.854-0400 I COMMAND [conn31] command db1.$cmd command: insert { insert: "coll1", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eb59aca4787b9985d1b99') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 107, w: 107 } }, Database: { acquireCount: { w: 107 } }, Collection: { acquireCount: { w: 7 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 133ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.855-0400 m31100| 2015-07-09T13:55:38.854-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62639 #37 (32 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.855-0400 m31100| 2015-07-09T13:55:38.855-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62640 #38 (33 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.856-0400 m31100| 2015-07-09T13:55:38.856-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62641 #39 (34 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.862-0400 m31100| 2015-07-09T13:55:38.862-0400 I SHARDING [conn15] metadata of collection db1.coll1 already up to date (shard version : 1|0||559eb59aca4787b9985d1b99, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.862-0400 m31100| 2015-07-09T13:55:38.862-0400 I SHARDING [conn32] request split points lookup for chunk db1.coll1 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.864-0400 m31100| 2015-07-09T13:55:38.862-0400 I SHARDING [conn36] request split points lookup for chunk db1.coll1 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.864-0400 m31100| 2015-07-09T13:55:38.862-0400 I SHARDING [conn15] splitChunk accepted at version 1|0||559eb59aca4787b9985d1b99 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.865-0400 m31100| 2015-07-09T13:55:38.862-0400 I SHARDING [conn39] request split points lookup for chunk db1.coll1 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.865-0400 m31100| 2015-07-09T13:55:38.862-0400 I SHARDING [conn34] request split points lookup for chunk db1.coll1 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.865-0400 m31100| 2015-07-09T13:55:38.862-0400 I SHARDING [conn35] request split points lookup for chunk db1.coll1 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.867-0400 m31100| 2015-07-09T13:55:38.862-0400 I SHARDING [conn37] request split points lookup for chunk db1.coll1 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.868-0400 m31100| 2015-07-09T13:55:38.862-0400 I SHARDING [conn38] request split points lookup for chunk db1.coll1 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.869-0400 m31100| 2015-07-09T13:55:38.863-0400 W SHARDING [conn32] possible low cardinality key detected in db1.coll1 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.869-0400 m31100| 2015-07-09T13:55:38.864-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62642 #40 (35 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.869-0400 m31100| 2015-07-09T13:55:38.865-0400 W SHARDING [conn34] possible low cardinality key detected in db1.coll1 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.869-0400 m31100| 2015-07-09T13:55:38.865-0400 W SHARDING [conn32] possible low cardinality key detected in db1.coll1 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.869-0400 m31100| 2015-07-09T13:55:38.865-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.870-0400 m31100| 2015-07-09T13:55:38.865-0400 W SHARDING [conn34] possible low cardinality key detected in db1.coll1 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.870-0400 m31100| 2015-07-09T13:55:38.866-0400 W SHARDING [conn32] possible low cardinality key detected in db1.coll1 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.870-0400 m31100| 2015-07-09T13:55:38.866-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.870-0400 m31100| 2015-07-09T13:55:38.866-0400 W SHARDING [conn32] possible low cardinality key detected in db1.coll1 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.871-0400 m31100| 2015-07-09T13:55:38.866-0400 I SHARDING [conn36] received splitChunk request: { splitChunk: "db1.coll1", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb59aca4787b9985d1b99') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.871-0400 m31100| 2015-07-09T13:55:38.866-0400 W SHARDING [conn32] possible low cardinality key detected in db1.coll1 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.871-0400 m31100| 2015-07-09T13:55:38.866-0400 W SHARDING [conn32] possible low cardinality key detected in db1.coll1 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.871-0400 m31100| 2015-07-09T13:55:38.866-0400 W SHARDING [conn32] possible low cardinality key detected in db1.coll1 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.871-0400 m31100| 2015-07-09T13:55:38.866-0400 W SHARDING [conn32] possible low cardinality key detected in db1.coll1 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.872-0400 m31100| 2015-07-09T13:55:38.866-0400 I SHARDING [conn39] received splitChunk request: { splitChunk: "db1.coll1", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb59aca4787b9985d1b99') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.872-0400 m31100| 2015-07-09T13:55:38.866-0400 W SHARDING [conn32] possible low cardinality key detected in db1.coll1 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.872-0400 m31100| 2015-07-09T13:55:38.866-0400 W SHARDING [conn32] possible low cardinality key detected in db1.coll1 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.873-0400 m31100| 2015-07-09T13:55:38.867-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db1.coll1", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb59aca4787b9985d1b99') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.873-0400 m31100| 2015-07-09T13:55:38.867-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db1.coll1", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb59aca4787b9985d1b99') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.873-0400 m31100| 2015-07-09T13:55:38.867-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db1.coll1", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb59aca4787b9985d1b99') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.873-0400 m31100| 2015-07-09T13:55:38.868-0400 W SHARDING [conn36] could not acquire collection lock for db1.coll1 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db1.coll1 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.874-0400 m31100| 2015-07-09T13:55:38.868-0400 I SHARDING [conn32] received splitChunk request: { splitChunk: "db1.coll1", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb59aca4787b9985d1b99') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.878-0400 m30998| 2015-07-09T13:55:38.868-0400 W SHARDING [conn7] splitChunk failed - cmd: { splitChunk: "db1.coll1", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb59aca4787b9985d1b99') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db1.coll1 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { :...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.880-0400 m31100| 2015-07-09T13:55:38.869-0400 W SHARDING [conn32] could not acquire collection lock for db1.coll1 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db1.coll1 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.884-0400 m30998| 2015-07-09T13:55:38.869-0400 W SHARDING [conn10] splitChunk failed - cmd: { splitChunk: "db1.coll1", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb59aca4787b9985d1b99') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db1.coll1 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { :...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.884-0400 m29000| 2015-07-09T13:55:38.870-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62643 #30 (30 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.884-0400 m31100| 2015-07-09T13:55:38.872-0400 W SHARDING [conn39] could not acquire collection lock for db1.coll1 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db1.coll1 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.884-0400 m30998| 2015-07-09T13:55:38.872-0400 W SHARDING [conn11] splitChunk failed - cmd: { splitChunk: "db1.coll1", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb59aca4787b9985d1b99') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db1.coll1 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { :...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.885-0400 m31100| 2015-07-09T13:55:38.874-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:38.874-0400-559eb59a792e00bb672748d5", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464538874), what: "multi-split", ns: "db1.coll1", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 1, of: 10, chunk: { min: { tid: MinKey }, max: { tid: 0.0 }, lastmod: Timestamp 1000|1, lastmodEpoch: ObjectId('559eb59aca4787b9985d1b99') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.885-0400 m31100| 2015-07-09T13:55:38.874-0400 I SHARDING [conn40] request split points lookup for chunk db1.coll1 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.885-0400 m31100| 2015-07-09T13:55:38.874-0400 I SHARDING [conn39] request split points lookup for chunk db1.coll1 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.885-0400 m31100| 2015-07-09T13:55:38.876-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db1.coll1", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb59aca4787b9985d1b99') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.888-0400 m31100| 2015-07-09T13:55:38.876-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db1.coll1", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 5.0 }, { tid: 7.0 }, { tid: 8.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb59aca4787b9985d1b99') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.888-0400 m31100| 2015-07-09T13:55:38.876-0400 W SHARDING [conn39] possible low cardinality key detected in db1.coll1 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.888-0400 m31100| 2015-07-09T13:55:38.876-0400 W SHARDING [conn39] possible low cardinality key detected in db1.coll1 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.888-0400 m31100| 2015-07-09T13:55:38.876-0400 W SHARDING [conn39] possible low cardinality key detected in db1.coll1 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.888-0400 m31100| 2015-07-09T13:55:38.876-0400 W SHARDING [conn39] possible low cardinality key detected in db1.coll1 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.889-0400 m31100| 2015-07-09T13:55:38.876-0400 W SHARDING [conn39] possible low cardinality key detected in db1.coll1 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.889-0400 m31100| 2015-07-09T13:55:38.877-0400 W SHARDING [conn39] possible low cardinality key detected in db1.coll1 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.889-0400 m31100| 2015-07-09T13:55:38.877-0400 W SHARDING [conn39] possible low cardinality key detected in db1.coll1 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.889-0400 m31100| 2015-07-09T13:55:38.877-0400 W SHARDING [conn39] possible low cardinality key detected in db1.coll1 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.889-0400 m31100| 2015-07-09T13:55:38.877-0400 W SHARDING [conn39] possible low cardinality key detected in db1.coll1 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.889-0400 m31100| 2015-07-09T13:55:38.877-0400 W SHARDING [conn39] possible low cardinality key detected in db1.coll1 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.889-0400 m31100| 2015-07-09T13:55:38.877-0400 W SHARDING [conn38] could not acquire collection lock for db1.coll1 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db1.coll1 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.890-0400 m30999| 2015-07-09T13:55:38.877-0400 W SHARDING [conn11] splitChunk failed - cmd: { splitChunk: "db1.coll1", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb59aca4787b9985d1b99') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db1.coll1 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { :...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.891-0400 m31100| 2015-07-09T13:55:38.877-0400 I SHARDING [conn39] received splitChunk request: { splitChunk: "db1.coll1", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb59aca4787b9985d1b99') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.891-0400 m31100| 2015-07-09T13:55:38.878-0400 W SHARDING [conn40] could not acquire collection lock for db1.coll1 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db1.coll1 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.891-0400 m30999| 2015-07-09T13:55:38.878-0400 W SHARDING [conn7] splitChunk failed - cmd: { splitChunk: "db1.coll1", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 5.0 }, { tid: 7.0 }, { tid: 8.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb59aca4787b9985d1b99') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db1.coll1 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { :...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.892-0400 m31100| 2015-07-09T13:55:38.878-0400 W SHARDING [conn39] could not acquire collection lock for db1.coll1 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db1.coll1 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.892-0400 m30998| 2015-07-09T13:55:38.878-0400 W SHARDING [conn9] splitChunk failed - cmd: { splitChunk: "db1.coll1", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb59aca4787b9985d1b99') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db1.coll1 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { :...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.892-0400 m29000| 2015-07-09T13:55:38.880-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62644 #31 (31 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.892-0400 m29000| 2015-07-09T13:55:38.880-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62645 #32 (32 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.892-0400 m29000| 2015-07-09T13:55:38.881-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62646 #33 (33 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.893-0400 m31100| 2015-07-09T13:55:38.882-0400 W SHARDING [conn35] could not acquire collection lock for db1.coll1 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db1.coll1 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.893-0400 m30998| 2015-07-09T13:55:38.882-0400 W SHARDING [conn8] splitChunk failed - cmd: { splitChunk: "db1.coll1", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb59aca4787b9985d1b99') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db1.coll1 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { :...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.893-0400 m30999| 2015-07-09T13:55:38.882-0400 W SHARDING [conn9] splitChunk failed - cmd: { splitChunk: "db1.coll1", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb59aca4787b9985d1b99') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db1.coll1 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { :...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.893-0400 m29000| 2015-07-09T13:55:38.883-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62647 #34 (34 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.894-0400 m31100| 2015-07-09T13:55:38.882-0400 W SHARDING [conn34] could not acquire collection lock for db1.coll1 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db1.coll1 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.894-0400 m30999| 2015-07-09T13:55:38.885-0400 W SHARDING [conn8] splitChunk failed - cmd: { splitChunk: "db1.coll1", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb59aca4787b9985d1b99') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db1.coll1 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { :...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.894-0400 m31100| 2015-07-09T13:55:38.884-0400 W SHARDING [conn37] could not acquire collection lock for db1.coll1 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db1.coll1 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.894-0400 m31100| 2015-07-09T13:55:38.894-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62648 #41 (36 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.900-0400 m30999| 2015-07-09T13:55:38.899-0400 I SHARDING [conn8] ChunkManager: time to load chunks for db1.coll1: 0ms sequenceNumber: 9 version: 1|10||559eb59aca4787b9985d1b99 based on: 1|0||559eb59aca4787b9985d1b99 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.926-0400 m31100| 2015-07-09T13:55:38.925-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:38.925-0400-559eb59a792e00bb672748d6", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464538925), what: "multi-split", ns: "db1.coll1", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 2, of: 10, chunk: { min: { tid: 0.0 }, max: { tid: 2.0 }, lastmod: Timestamp 1000|2, lastmodEpoch: ObjectId('559eb59aca4787b9985d1b99') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.970-0400 m31100| 2015-07-09T13:55:38.970-0400 I SHARDING [conn35] request split points lookup for chunk db1.coll1 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.974-0400 m31100| 2015-07-09T13:55:38.973-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.974-0400 m31100| 2015-07-09T13:55:38.973-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.974-0400 m31100| 2015-07-09T13:55:38.973-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.974-0400 m31100| 2015-07-09T13:55:38.973-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.975-0400 m31100| 2015-07-09T13:55:38.973-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.975-0400 m31100| 2015-07-09T13:55:38.973-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.975-0400 m31100| 2015-07-09T13:55:38.973-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.975-0400 m31100| 2015-07-09T13:55:38.973-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.975-0400 m31100| 2015-07-09T13:55:38.973-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.975-0400 m31100| 2015-07-09T13:55:38.973-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.976-0400 m31100| 2015-07-09T13:55:38.974-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db1.coll1", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb59aca4787b9985d1b99') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.976-0400 m31100| 2015-07-09T13:55:38.975-0400 W SHARDING [conn35] could not acquire collection lock for db1.coll1 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db1.coll1 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.977-0400 m30998| 2015-07-09T13:55:38.976-0400 W SHARDING [conn11] splitChunk failed - cmd: { splitChunk: "db1.coll1", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb59aca4787b9985d1b99') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db1.coll1 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { :...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.978-0400 m31100| 2015-07-09T13:55:38.977-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:38.977-0400-559eb59a792e00bb672748d7", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464538977), what: "multi-split", ns: "db1.coll1", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 3, of: 10, chunk: { min: { tid: 2.0 }, max: { tid: 3.0 }, lastmod: Timestamp 1000|3, lastmodEpoch: ObjectId('559eb59aca4787b9985d1b99') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.980-0400 m31100| 2015-07-09T13:55:38.979-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62649 #42 (37 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:38.995-0400 m30998| 2015-07-09T13:55:38.995-0400 I NETWORK [conn11] end connection 127.0.0.1:62630 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.030-0400 m31100| 2015-07-09T13:55:39.029-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:39.029-0400-559eb59b792e00bb672748d8", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464539029), what: "multi-split", ns: "db1.coll1", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 4, of: 10, chunk: { min: { tid: 3.0 }, max: { tid: 4.0 }, lastmod: Timestamp 1000|4, lastmodEpoch: ObjectId('559eb59aca4787b9985d1b99') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.034-0400 m31100| 2015-07-09T13:55:39.033-0400 I COMMAND [conn31] command db1.$cmd command: insert { insert: "coll1", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eb59aca4787b9985d1b99') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 108, w: 108 } }, Database: { acquireCount: { w: 108 } }, Collection: { acquireCount: { w: 8 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 148ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.034-0400 m31100| 2015-07-09T13:55:39.033-0400 I SHARDING [conn35] request split points lookup for chunk db1.coll1 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.038-0400 m31100| 2015-07-09T13:55:39.038-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.039-0400 m31100| 2015-07-09T13:55:39.038-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.039-0400 m31100| 2015-07-09T13:55:39.038-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.039-0400 m31100| 2015-07-09T13:55:39.038-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.039-0400 m31100| 2015-07-09T13:55:39.038-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.040-0400 m31100| 2015-07-09T13:55:39.038-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.040-0400 m31100| 2015-07-09T13:55:39.038-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.040-0400 m31100| 2015-07-09T13:55:39.038-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.040-0400 m31100| 2015-07-09T13:55:39.038-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.041-0400 m31100| 2015-07-09T13:55:39.038-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.041-0400 m31100| 2015-07-09T13:55:39.039-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db1.coll1", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb59aca4787b9985d1b99') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.042-0400 m31100| 2015-07-09T13:55:39.040-0400 W SHARDING [conn35] could not acquire collection lock for db1.coll1 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db1.coll1 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.042-0400 m30998| 2015-07-09T13:55:39.041-0400 W SHARDING [conn10] splitChunk failed - cmd: { splitChunk: "db1.coll1", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb59aca4787b9985d1b99') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db1.coll1 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { :...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.044-0400 m31100| 2015-07-09T13:55:39.043-0400 I COMMAND [conn25] command db1.$cmd command: insert { insert: "coll1", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eb59aca4787b9985d1b99') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 108, w: 108 } }, Database: { acquireCount: { w: 108 } }, Collection: { acquireCount: { w: 8 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 153ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.044-0400 m31100| 2015-07-09T13:55:39.043-0400 I COMMAND [conn24] command db1.$cmd command: insert { insert: "coll1", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eb59aca4787b9985d1b99') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 107, w: 107 } }, Database: { acquireCount: { w: 107 } }, Collection: { acquireCount: { w: 7 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 149ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.044-0400 m31100| 2015-07-09T13:55:39.043-0400 I SHARDING [conn39] request split points lookup for chunk db1.coll1 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.045-0400 m31100| 2015-07-09T13:55:39.044-0400 I COMMAND [conn30] command db1.$cmd command: insert { insert: "coll1", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eb59aca4787b9985d1b99') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 108, w: 108 } }, Database: { acquireCount: { w: 108 } }, Collection: { acquireCount: { w: 8 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 153ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.050-0400 m31100| 2015-07-09T13:55:39.049-0400 W SHARDING [conn39] possible low cardinality key detected in db1.coll1 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.050-0400 m31100| 2015-07-09T13:55:39.049-0400 W SHARDING [conn39] possible low cardinality key detected in db1.coll1 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.050-0400 m31100| 2015-07-09T13:55:39.049-0400 W SHARDING [conn39] possible low cardinality key detected in db1.coll1 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.050-0400 m31100| 2015-07-09T13:55:39.049-0400 W SHARDING [conn39] possible low cardinality key detected in db1.coll1 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.050-0400 m31100| 2015-07-09T13:55:39.049-0400 W SHARDING [conn39] possible low cardinality key detected in db1.coll1 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.050-0400 m31100| 2015-07-09T13:55:39.050-0400 W SHARDING [conn39] possible low cardinality key detected in db1.coll1 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.051-0400 m31100| 2015-07-09T13:55:39.050-0400 W SHARDING [conn39] possible low cardinality key detected in db1.coll1 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.051-0400 m31100| 2015-07-09T13:55:39.050-0400 W SHARDING [conn39] possible low cardinality key detected in db1.coll1 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.051-0400 m31100| 2015-07-09T13:55:39.050-0400 W SHARDING [conn39] possible low cardinality key detected in db1.coll1 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.051-0400 m31100| 2015-07-09T13:55:39.050-0400 I SHARDING [conn35] request split points lookup for chunk db1.coll1 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.054-0400 m31100| 2015-07-09T13:55:39.054-0400 I SHARDING [conn39] received splitChunk request: { splitChunk: "db1.coll1", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb59aca4787b9985d1b99') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.055-0400 m31100| 2015-07-09T13:55:39.054-0400 I COMMAND [conn16] command db1.$cmd command: insert { insert: "coll1", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eb59aca4787b9985d1b99') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 108, w: 108 } }, Database: { acquireCount: { w: 108 } }, Collection: { acquireCount: { w: 8 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 162ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.055-0400 m31100| 2015-07-09T13:55:39.055-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62650 #43 (38 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.056-0400 m31100| 2015-07-09T13:55:39.055-0400 W SHARDING [conn39] could not acquire collection lock for db1.coll1 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db1.coll1 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.057-0400 m30998| 2015-07-09T13:55:39.056-0400 W SHARDING [conn8] splitChunk failed - cmd: { splitChunk: "db1.coll1", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb59aca4787b9985d1b99') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db1.coll1 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { :...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.058-0400 m31100| 2015-07-09T13:55:39.056-0400 I COMMAND [conn27] command db1.$cmd command: insert { insert: "coll1", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eb59aca4787b9985d1b99') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 107, w: 107 } }, Database: { acquireCount: { w: 107 } }, Collection: { acquireCount: { w: 7 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 157ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.058-0400 m31100| 2015-07-09T13:55:39.056-0400 I SHARDING [conn39] request split points lookup for chunk db1.coll1 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.059-0400 m31100| 2015-07-09T13:55:39.057-0400 I COMMAND [conn22] command db1.$cmd command: insert { insert: "coll1", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb59aca4787b9985d1b99') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 108, w: 108 } }, Database: { acquireCount: { w: 108 } }, Collection: { acquireCount: { w: 8 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 146ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.063-0400 m31100| 2015-07-09T13:55:39.058-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.064-0400 m31100| 2015-07-09T13:55:39.058-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.064-0400 m31100| 2015-07-09T13:55:39.058-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.064-0400 m31100| 2015-07-09T13:55:39.058-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.064-0400 m31100| 2015-07-09T13:55:39.058-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.064-0400 m31100| 2015-07-09T13:55:39.058-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.065-0400 m31100| 2015-07-09T13:55:39.058-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.065-0400 m31100| 2015-07-09T13:55:39.058-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.065-0400 m31100| 2015-07-09T13:55:39.058-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.065-0400 m31100| 2015-07-09T13:55:39.058-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.065-0400 m31100| 2015-07-09T13:55:39.060-0400 W SHARDING [conn39] possible low cardinality key detected in db1.coll1 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.065-0400 m31100| 2015-07-09T13:55:39.060-0400 W SHARDING [conn39] possible low cardinality key detected in db1.coll1 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.066-0400 m31100| 2015-07-09T13:55:39.060-0400 W SHARDING [conn39] possible low cardinality key detected in db1.coll1 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.066-0400 m31100| 2015-07-09T13:55:39.060-0400 W SHARDING [conn39] possible low cardinality key detected in db1.coll1 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.066-0400 m31100| 2015-07-09T13:55:39.060-0400 W SHARDING [conn39] possible low cardinality key detected in db1.coll1 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.066-0400 m31100| 2015-07-09T13:55:39.060-0400 W SHARDING [conn39] possible low cardinality key detected in db1.coll1 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.066-0400 m31100| 2015-07-09T13:55:39.060-0400 W SHARDING [conn39] possible low cardinality key detected in db1.coll1 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.066-0400 m31100| 2015-07-09T13:55:39.060-0400 W SHARDING [conn39] possible low cardinality key detected in db1.coll1 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.066-0400 m31100| 2015-07-09T13:55:39.060-0400 W SHARDING [conn39] possible low cardinality key detected in db1.coll1 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.067-0400 m31100| 2015-07-09T13:55:39.060-0400 W SHARDING [conn39] possible low cardinality key detected in db1.coll1 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.067-0400 m31100| 2015-07-09T13:55:39.060-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db1.coll1", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb59aca4787b9985d1b99') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.067-0400 m31100| 2015-07-09T13:55:39.062-0400 I SHARDING [conn39] received splitChunk request: { splitChunk: "db1.coll1", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb59aca4787b9985d1b99') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.068-0400 m31100| 2015-07-09T13:55:39.063-0400 I COMMAND [conn29] command db1.$cmd command: insert { insert: "coll1", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb59aca4787b9985d1b99') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 108, w: 108 } }, Database: { acquireCount: { w: 108 } }, Collection: { acquireCount: { w: 8 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 161ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.068-0400 m31100| 2015-07-09T13:55:39.064-0400 W SHARDING [conn39] could not acquire collection lock for db1.coll1 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db1.coll1 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.068-0400 m30998| 2015-07-09T13:55:39.064-0400 W SHARDING [conn7] splitChunk failed - cmd: { splitChunk: "db1.coll1", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb59aca4787b9985d1b99') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db1.coll1 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { :...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.068-0400 m31100| 2015-07-09T13:55:39.064-0400 W SHARDING [conn35] could not acquire collection lock for db1.coll1 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db1.coll1 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.068-0400 m31100| 2015-07-09T13:55:39.064-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62651 #44 (39 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.069-0400 m30998| 2015-07-09T13:55:39.064-0400 W SHARDING [conn9] splitChunk failed - cmd: { splitChunk: "db1.coll1", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb59aca4787b9985d1b99') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db1.coll1 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { :...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.081-0400 m31100| 2015-07-09T13:55:39.081-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:39.081-0400-559eb59b792e00bb672748d9", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464539081), what: "multi-split", ns: "db1.coll1", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 5, of: 10, chunk: { min: { tid: 4.0 }, max: { tid: 5.0 }, lastmod: Timestamp 1000|5, lastmodEpoch: ObjectId('559eb59aca4787b9985d1b99') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.134-0400 m31100| 2015-07-09T13:55:39.133-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:39.133-0400-559eb59b792e00bb672748da", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464539133), what: "multi-split", ns: "db1.coll1", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 6, of: 10, chunk: { min: { tid: 5.0 }, max: { tid: 6.0 }, lastmod: Timestamp 1000|6, lastmodEpoch: ObjectId('559eb59aca4787b9985d1b99') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.165-0400 m31100| 2015-07-09T13:55:39.164-0400 I COMMAND [conn24] command db1.$cmd command: insert { insert: "coll1", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eb59aca4787b9985d1b99') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 105, w: 105 } }, Database: { acquireCount: { w: 105 } }, Collection: { acquireCount: { w: 5 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 111ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.165-0400 m31100| 2015-07-09T13:55:39.165-0400 I SHARDING [conn35] request split points lookup for chunk db1.coll1 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.173-0400 m31100| 2015-07-09T13:55:39.170-0400 I COMMAND [conn22] command db1.$cmd command: insert { insert: "coll1", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb59aca4787b9985d1b99') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 105, w: 105 } }, Database: { acquireCount: { w: 105 } }, Collection: { acquireCount: { w: 5 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 106ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.173-0400 m31100| 2015-07-09T13:55:39.170-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.173-0400 m31100| 2015-07-09T13:55:39.170-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.174-0400 m31100| 2015-07-09T13:55:39.170-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.174-0400 m31100| 2015-07-09T13:55:39.170-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.174-0400 m31100| 2015-07-09T13:55:39.170-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.174-0400 m31100| 2015-07-09T13:55:39.170-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.175-0400 m31100| 2015-07-09T13:55:39.170-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.175-0400 m31100| 2015-07-09T13:55:39.170-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.175-0400 m31100| 2015-07-09T13:55:39.170-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.175-0400 m31100| 2015-07-09T13:55:39.170-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.175-0400 m31100| 2015-07-09T13:55:39.172-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db1.coll1", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb59aca4787b9985d1b99') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.175-0400 m31100| 2015-07-09T13:55:39.174-0400 W SHARDING [conn35] could not acquire collection lock for db1.coll1 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db1.coll1 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.176-0400 m31100| 2015-07-09T13:55:39.174-0400 I COMMAND [conn27] command db1.$cmd command: insert { insert: "coll1", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eb59aca4787b9985d1b99') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 106, w: 106 } }, Database: { acquireCount: { w: 106 } }, Collection: { acquireCount: { w: 6 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 111ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.176-0400 m30998| 2015-07-09T13:55:39.174-0400 W SHARDING [conn10] splitChunk failed - cmd: { splitChunk: "db1.coll1", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb59aca4787b9985d1b99') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db1.coll1 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { :...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.176-0400 m31100| 2015-07-09T13:55:39.174-0400 I SHARDING [conn35] request split points lookup for chunk db1.coll1 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.182-0400 m31100| 2015-07-09T13:55:39.180-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.182-0400 m31100| 2015-07-09T13:55:39.180-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.182-0400 m31100| 2015-07-09T13:55:39.180-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.182-0400 m31100| 2015-07-09T13:55:39.180-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.182-0400 m31100| 2015-07-09T13:55:39.180-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.183-0400 m31100| 2015-07-09T13:55:39.180-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.183-0400 m31100| 2015-07-09T13:55:39.180-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.183-0400 m31100| 2015-07-09T13:55:39.180-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.183-0400 m31100| 2015-07-09T13:55:39.180-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.183-0400 m31100| 2015-07-09T13:55:39.181-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db1.coll1", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb59aca4787b9985d1b99') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.184-0400 m31100| 2015-07-09T13:55:39.182-0400 W SHARDING [conn35] could not acquire collection lock for db1.coll1 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db1.coll1 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.185-0400 m30998| 2015-07-09T13:55:39.182-0400 W SHARDING [conn8] splitChunk failed - cmd: { splitChunk: "db1.coll1", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb59aca4787b9985d1b99') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db1.coll1 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { :...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.185-0400 m31100| 2015-07-09T13:55:39.185-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:39.185-0400-559eb59b792e00bb672748db", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464539185), what: "multi-split", ns: "db1.coll1", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 7, of: 10, chunk: { min: { tid: 6.0 }, max: { tid: 7.0 }, lastmod: Timestamp 1000|7, lastmodEpoch: ObjectId('559eb59aca4787b9985d1b99') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.190-0400 m31100| 2015-07-09T13:55:39.190-0400 I COMMAND [conn25] command db1.$cmd command: insert { insert: "coll1", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eb59aca4787b9985d1b99') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 106, w: 106 } }, Database: { acquireCount: { w: 106 } }, Collection: { acquireCount: { w: 6 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 119ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.196-0400 m31100| 2015-07-09T13:55:39.191-0400 I SHARDING [conn35] request split points lookup for chunk db1.coll1 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.196-0400 m31100| 2015-07-09T13:55:39.192-0400 I COMMAND [conn29] command db1.$cmd command: insert { insert: "coll1", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb59aca4787b9985d1b99') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 106, w: 106 } }, Database: { acquireCount: { w: 106 } }, Collection: { acquireCount: { w: 6 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 118ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.197-0400 m31100| 2015-07-09T13:55:39.196-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.197-0400 m31100| 2015-07-09T13:55:39.196-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.197-0400 m31100| 2015-07-09T13:55:39.196-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.197-0400 m31100| 2015-07-09T13:55:39.196-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.198-0400 m31100| 2015-07-09T13:55:39.196-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.198-0400 m31100| 2015-07-09T13:55:39.196-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.200-0400 m31100| 2015-07-09T13:55:39.196-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.201-0400 m31100| 2015-07-09T13:55:39.196-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.201-0400 m31100| 2015-07-09T13:55:39.196-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.201-0400 m31100| 2015-07-09T13:55:39.196-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.201-0400 m31100| 2015-07-09T13:55:39.196-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db1.coll1", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb59aca4787b9985d1b99') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.202-0400 m31100| 2015-07-09T13:55:39.197-0400 I COMMAND [conn16] command db1.$cmd command: insert { insert: "coll1", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb59aca4787b9985d1b99') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 106, w: 106 } }, Database: { acquireCount: { w: 106 } }, Collection: { acquireCount: { w: 6 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 120ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.202-0400 m31100| 2015-07-09T13:55:39.198-0400 W SHARDING [conn35] could not acquire collection lock for db1.coll1 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db1.coll1 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.204-0400 m30998| 2015-07-09T13:55:39.198-0400 W SHARDING [conn9] splitChunk failed - cmd: { splitChunk: "db1.coll1", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb59aca4787b9985d1b99') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db1.coll1 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { :...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.204-0400 m30999| 2015-07-09T13:55:39.202-0400 I NETWORK [conn8] end connection 127.0.0.1:62622 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.208-0400 m31100| 2015-07-09T13:55:39.207-0400 I COMMAND [conn30] command db1.$cmd command: insert { insert: "coll1", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb59aca4787b9985d1b99') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 106, w: 106 } }, Database: { acquireCount: { w: 106 } }, Collection: { acquireCount: { w: 6 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 124ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.213-0400 m30999| 2015-07-09T13:55:39.212-0400 I NETWORK [conn7] end connection 127.0.0.1:62621 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.220-0400 m31100| 2015-07-09T13:55:39.219-0400 I COMMAND [conn31] command db1.$cmd command: insert { insert: "coll1", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eb59aca4787b9985d1b99') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 107, w: 107 } }, Database: { acquireCount: { w: 107 } }, Collection: { acquireCount: { w: 7 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 134ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.220-0400 m31100| 2015-07-09T13:55:39.220-0400 I SHARDING [conn35] request split points lookup for chunk db1.coll1 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.225-0400 m31100| 2015-07-09T13:55:39.225-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.226-0400 m31100| 2015-07-09T13:55:39.225-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.226-0400 m31100| 2015-07-09T13:55:39.225-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.226-0400 m31100| 2015-07-09T13:55:39.225-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.226-0400 m31100| 2015-07-09T13:55:39.225-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.226-0400 m31100| 2015-07-09T13:55:39.225-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.227-0400 m31100| 2015-07-09T13:55:39.225-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.227-0400 m31100| 2015-07-09T13:55:39.225-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.227-0400 m31100| 2015-07-09T13:55:39.225-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.227-0400 m31100| 2015-07-09T13:55:39.225-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.228-0400 m31100| 2015-07-09T13:55:39.226-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db1.coll1", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb59aca4787b9985d1b99') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.228-0400 m31100| 2015-07-09T13:55:39.227-0400 W SHARDING [conn35] could not acquire collection lock for db1.coll1 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db1.coll1 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.229-0400 m30998| 2015-07-09T13:55:39.227-0400 W SHARDING [conn7] splitChunk failed - cmd: { splitChunk: "db1.coll1", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb59aca4787b9985d1b99') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db1.coll1 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { :...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.237-0400 m31100| 2015-07-09T13:55:39.237-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:39.237-0400-559eb59b792e00bb672748dc", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464539237), what: "multi-split", ns: "db1.coll1", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 8, of: 10, chunk: { min: { tid: 7.0 }, max: { tid: 8.0 }, lastmod: Timestamp 1000|8, lastmodEpoch: ObjectId('559eb59aca4787b9985d1b99') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.288-0400 m31100| 2015-07-09T13:55:39.287-0400 I COMMAND [conn27] command db1.$cmd command: insert { insert: "coll1", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eb59aca4787b9985d1b99') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 106, w: 106 } }, Database: { acquireCount: { w: 106 } }, Collection: { acquireCount: { w: 6 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 105ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.289-0400 m31100| 2015-07-09T13:55:39.288-0400 I COMMAND [conn22] command db1.$cmd command: insert { insert: "coll1", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb59aca4787b9985d1b99') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 106, w: 106 } }, Database: { acquireCount: { w: 106 } }, Collection: { acquireCount: { w: 6 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 113ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.290-0400 m31100| 2015-07-09T13:55:39.288-0400 I SHARDING [conn35] request split points lookup for chunk db1.coll1 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.290-0400 m31100| 2015-07-09T13:55:39.288-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:39.288-0400-559eb59b792e00bb672748dd", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464539288), what: "multi-split", ns: "db1.coll1", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 9, of: 10, chunk: { min: { tid: 8.0 }, max: { tid: 9.0 }, lastmod: Timestamp 1000|9, lastmodEpoch: ObjectId('559eb59aca4787b9985d1b99') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.299-0400 m31100| 2015-07-09T13:55:39.295-0400 I SHARDING [conn39] request split points lookup for chunk db1.coll1 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.299-0400 m31100| 2015-07-09T13:55:39.295-0400 I SHARDING [conn32] request split points lookup for chunk db1.coll1 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.300-0400 m31100| 2015-07-09T13:55:39.296-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.300-0400 m31100| 2015-07-09T13:55:39.296-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.300-0400 m31100| 2015-07-09T13:55:39.296-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.300-0400 m31100| 2015-07-09T13:55:39.296-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.300-0400 m31100| 2015-07-09T13:55:39.296-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.302-0400 m31100| 2015-07-09T13:55:39.296-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.303-0400 m31100| 2015-07-09T13:55:39.296-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.303-0400 m31100| 2015-07-09T13:55:39.296-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.303-0400 m31100| 2015-07-09T13:55:39.296-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.304-0400 m31100| 2015-07-09T13:55:39.296-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.305-0400 m31100| 2015-07-09T13:55:39.300-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db1.coll1", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb59aca4787b9985d1b99') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.305-0400 m31100| 2015-07-09T13:55:39.301-0400 W SHARDING [conn32] possible low cardinality key detected in db1.coll1 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.305-0400 m31100| 2015-07-09T13:55:39.301-0400 W SHARDING [conn32] possible low cardinality key detected in db1.coll1 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.305-0400 m31100| 2015-07-09T13:55:39.301-0400 W SHARDING [conn32] possible low cardinality key detected in db1.coll1 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.305-0400 m31100| 2015-07-09T13:55:39.301-0400 W SHARDING [conn32] possible low cardinality key detected in db1.coll1 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.306-0400 m31100| 2015-07-09T13:55:39.301-0400 W SHARDING [conn32] possible low cardinality key detected in db1.coll1 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.306-0400 m31100| 2015-07-09T13:55:39.301-0400 W SHARDING [conn32] possible low cardinality key detected in db1.coll1 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.306-0400 m31100| 2015-07-09T13:55:39.301-0400 W SHARDING [conn32] possible low cardinality key detected in db1.coll1 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.306-0400 m31100| 2015-07-09T13:55:39.301-0400 W SHARDING [conn32] possible low cardinality key detected in db1.coll1 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.306-0400 m31100| 2015-07-09T13:55:39.301-0400 W SHARDING [conn32] possible low cardinality key detected in db1.coll1 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.307-0400 m31100| 2015-07-09T13:55:39.302-0400 W SHARDING [conn39] possible low cardinality key detected in db1.coll1 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.307-0400 m31100| 2015-07-09T13:55:39.302-0400 W SHARDING [conn39] possible low cardinality key detected in db1.coll1 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.307-0400 m31100| 2015-07-09T13:55:39.302-0400 W SHARDING [conn39] possible low cardinality key detected in db1.coll1 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.309-0400 m31100| 2015-07-09T13:55:39.302-0400 I SHARDING [conn32] received splitChunk request: { splitChunk: "db1.coll1", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb59aca4787b9985d1b99') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.309-0400 m31100| 2015-07-09T13:55:39.302-0400 W SHARDING [conn39] possible low cardinality key detected in db1.coll1 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.309-0400 m31100| 2015-07-09T13:55:39.302-0400 W SHARDING [conn39] possible low cardinality key detected in db1.coll1 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.309-0400 m31100| 2015-07-09T13:55:39.302-0400 W SHARDING [conn39] possible low cardinality key detected in db1.coll1 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.311-0400 m31100| 2015-07-09T13:55:39.302-0400 W SHARDING [conn39] possible low cardinality key detected in db1.coll1 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.312-0400 m31100| 2015-07-09T13:55:39.302-0400 W SHARDING [conn39] possible low cardinality key detected in db1.coll1 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.312-0400 m31100| 2015-07-09T13:55:39.302-0400 W SHARDING [conn39] possible low cardinality key detected in db1.coll1 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.312-0400 m31100| 2015-07-09T13:55:39.303-0400 I SHARDING [conn39] received splitChunk request: { splitChunk: "db1.coll1", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb59aca4787b9985d1b99') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.313-0400 m30998| 2015-07-09T13:55:39.303-0400 W SHARDING [conn9] splitChunk failed - cmd: { splitChunk: "db1.coll1", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb59aca4787b9985d1b99') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db1.coll1 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { :...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.313-0400 m31100| 2015-07-09T13:55:39.303-0400 W SHARDING [conn32] could not acquire collection lock for db1.coll1 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db1.coll1 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.313-0400 m31100| 2015-07-09T13:55:39.304-0400 I SHARDING [conn32] request split points lookup for chunk db1.coll1 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.313-0400 m30999| 2015-07-09T13:55:39.305-0400 I NETWORK [conn11] end connection 127.0.0.1:62629 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.324-0400 m31100| 2015-07-09T13:55:39.306-0400 W SHARDING [conn39] could not acquire collection lock for db1.coll1 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db1.coll1 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.326-0400 m30998| 2015-07-09T13:55:39.307-0400 W SHARDING [conn8] splitChunk failed - cmd: { splitChunk: "db1.coll1", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb59aca4787b9985d1b99') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db1.coll1 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { :...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.326-0400 m30998| 2015-07-09T13:55:39.308-0400 I NETWORK [conn9] end connection 127.0.0.1:62626 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.326-0400 m31100| 2015-07-09T13:55:39.310-0400 W SHARDING [conn32] possible low cardinality key detected in db1.coll1 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.327-0400 m30998| 2015-07-09T13:55:39.312-0400 W SHARDING [conn7] splitChunk failed - cmd: { splitChunk: "db1.coll1", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb59aca4787b9985d1b99') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db1.coll1 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { :...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.327-0400 m31100| 2015-07-09T13:55:39.310-0400 W SHARDING [conn32] possible low cardinality key detected in db1.coll1 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.327-0400 m31100| 2015-07-09T13:55:39.310-0400 W SHARDING [conn32] possible low cardinality key detected in db1.coll1 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.327-0400 m31100| 2015-07-09T13:55:39.310-0400 W SHARDING [conn32] possible low cardinality key detected in db1.coll1 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.327-0400 m31100| 2015-07-09T13:55:39.310-0400 W SHARDING [conn32] possible low cardinality key detected in db1.coll1 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.328-0400 m31100| 2015-07-09T13:55:39.310-0400 W SHARDING [conn32] possible low cardinality key detected in db1.coll1 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.328-0400 m31100| 2015-07-09T13:55:39.310-0400 W SHARDING [conn32] possible low cardinality key detected in db1.coll1 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.328-0400 m31100| 2015-07-09T13:55:39.310-0400 W SHARDING [conn32] possible low cardinality key detected in db1.coll1 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.328-0400 m31100| 2015-07-09T13:55:39.310-0400 W SHARDING [conn32] possible low cardinality key detected in db1.coll1 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.328-0400 m31100| 2015-07-09T13:55:39.310-0400 W SHARDING [conn32] possible low cardinality key detected in db1.coll1 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.329-0400 m31100| 2015-07-09T13:55:39.311-0400 I SHARDING [conn32] received splitChunk request: { splitChunk: "db1.coll1", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb59aca4787b9985d1b99') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.329-0400 m31100| 2015-07-09T13:55:39.312-0400 W SHARDING [conn32] could not acquire collection lock for db1.coll1 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db1.coll1 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.329-0400 m30999| 2015-07-09T13:55:39.317-0400 I NETWORK [conn9] end connection 127.0.0.1:62623 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.329-0400 m30998| 2015-07-09T13:55:39.317-0400 I NETWORK [conn7] end connection 127.0.0.1:62624 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.329-0400 m31100| 2015-07-09T13:55:39.320-0400 W SHARDING [conn35] could not acquire collection lock for db1.coll1 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db1.coll1 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.330-0400 m30998| 2015-07-09T13:55:39.320-0400 W SHARDING [conn10] splitChunk failed - cmd: { splitChunk: "db1.coll1", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb59aca4787b9985d1b99') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db1.coll1 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { :...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.330-0400 m30998| 2015-07-09T13:55:39.330-0400 I NETWORK [conn10] end connection 127.0.0.1:62627 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.352-0400 m31100| 2015-07-09T13:55:39.341-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:39.341-0400-559eb59b792e00bb672748de", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464539341), what: "multi-split", ns: "db1.coll1", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 10, of: 10, chunk: { min: { tid: 9.0 }, max: { tid: MaxKey }, lastmod: Timestamp 1000|10, lastmodEpoch: ObjectId('559eb59aca4787b9985d1b99') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.352-0400 m31100| 2015-07-09T13:55:39.348-0400 I SHARDING [conn35] request split points lookup for chunk db1.coll1 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.354-0400 m31100| 2015-07-09T13:55:39.354-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.355-0400 m31100| 2015-07-09T13:55:39.354-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.355-0400 m31100| 2015-07-09T13:55:39.354-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.355-0400 m31100| 2015-07-09T13:55:39.354-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.355-0400 m31100| 2015-07-09T13:55:39.354-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.355-0400 m31100| 2015-07-09T13:55:39.354-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.355-0400 m31100| 2015-07-09T13:55:39.354-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.355-0400 m31100| 2015-07-09T13:55:39.354-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.356-0400 m31100| 2015-07-09T13:55:39.354-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.356-0400 m31100| 2015-07-09T13:55:39.354-0400 W SHARDING [conn35] possible low cardinality key detected in db1.coll1 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.356-0400 m31100| 2015-07-09T13:55:39.355-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db1.coll1", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb59aca4787b9985d1b99') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.357-0400 m31100| 2015-07-09T13:55:39.356-0400 W SHARDING [conn35] could not acquire collection lock for db1.coll1 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db1.coll1 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.357-0400 m30998| 2015-07-09T13:55:39.356-0400 W SHARDING [conn8] splitChunk failed - cmd: { splitChunk: "db1.coll1", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb59aca4787b9985d1b99') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db1.coll1 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { :...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.362-0400 m30998| 2015-07-09T13:55:39.361-0400 I NETWORK [conn8] end connection 127.0.0.1:62625 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.396-0400 m31100| 2015-07-09T13:55:39.395-0400 I SHARDING [conn15] distributed lock 'db1.coll1/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.396-0400 m31100| 2015-07-09T13:55:39.395-0400 I COMMAND [conn15] command db1.coll1 command: splitChunk { splitChunk: "db1.coll1", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb59aca4787b9985d1b99') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 4, w: 2 } }, Database: { acquireCount: { r: 1, w: 2 } }, Collection: { acquireCount: { r: 1, W: 2 }, acquireWaitCount: { W: 2 }, timeAcquiringMicros: { W: 21940 } } } protocol:op_command 550ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.397-0400 m30999| 2015-07-09T13:55:39.397-0400 I SHARDING [conn10] autosplitted db1.coll1 shard: ns: db1.coll1, shard: test-rs0, lastmod: 1|0||000000000000000000000000, min: { tid: MinKey }, max: { tid: MaxKey } into 10 (splitThreshold 921) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.449-0400 m30999| 2015-07-09T13:55:39.449-0400 I NETWORK [conn10] end connection 127.0.0.1:62628 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.467-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.467-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.467-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.467-0400 jstests/concurrency/fsm_workloads/touch_data.js: Workload completed in 948 ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.467-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.468-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.468-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.468-0400 m30999| 2015-07-09T13:55:39.467-0400 I COMMAND [conn1] DROP: db1.coll1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.468-0400 m30999| 2015-07-09T13:55:39.468-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:39.468-0400-559eb59bca4787b9985d1b9b", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464539468), what: "dropCollection.start", ns: "db1.coll1", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.524-0400 m30999| 2015-07-09T13:55:39.524-0400 I SHARDING [conn1] distributed lock 'db1.coll1/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb59bca4787b9985d1b9c [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.525-0400 m31100| 2015-07-09T13:55:39.525-0400 I COMMAND [conn15] CMD: drop db1.coll1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.527-0400 m31200| 2015-07-09T13:55:39.527-0400 I COMMAND [conn18] CMD: drop db1.coll1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.529-0400 m31101| 2015-07-09T13:55:39.529-0400 I COMMAND [repl writer worker 14] CMD: drop db1.coll1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.530-0400 m31102| 2015-07-09T13:55:39.529-0400 I COMMAND [repl writer worker 1] CMD: drop db1.coll1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.531-0400 m31202| 2015-07-09T13:55:39.530-0400 I COMMAND [repl writer worker 7] CMD: drop db1.coll1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.531-0400 m31201| 2015-07-09T13:55:39.530-0400 I COMMAND [repl writer worker 4] CMD: drop db1.coll1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.583-0400 m31100| 2015-07-09T13:55:39.583-0400 I SHARDING [conn15] remotely refreshing metadata for db1.coll1 with requested shard version 0|0||000000000000000000000000, current shard version is 1|10||559eb59aca4787b9985d1b99, current metadata version is 1|10||559eb59aca4787b9985d1b99 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.585-0400 m31100| 2015-07-09T13:55:39.584-0400 W SHARDING [conn15] no chunks found when reloading db1.coll1, previous version was 0|0||559eb59aca4787b9985d1b99, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.585-0400 m31100| 2015-07-09T13:55:39.585-0400 I SHARDING [conn15] dropping metadata for db1.coll1 at shard version 1|10||559eb59aca4787b9985d1b99, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.588-0400 m30999| 2015-07-09T13:55:39.587-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:39.587-0400-559eb59bca4787b9985d1b9d", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464539587), what: "dropCollection", ns: "db1.coll1", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.641-0400 m30999| 2015-07-09T13:55:39.641-0400 I SHARDING [conn1] distributed lock 'db1.coll1/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.698-0400 m30999| 2015-07-09T13:55:39.697-0400 I COMMAND [conn1] DROP DATABASE: db1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.698-0400 m30999| 2015-07-09T13:55:39.697-0400 I SHARDING [conn1] DBConfig::dropDatabase: db1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.698-0400 m30999| 2015-07-09T13:55:39.697-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:39.697-0400-559eb59bca4787b9985d1b9e", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464539697), what: "dropDatabase.start", ns: "db1", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.803-0400 m30999| 2015-07-09T13:55:39.803-0400 I SHARDING [conn1] DBConfig::dropDatabase: db1 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.804-0400 m31100| 2015-07-09T13:55:39.803-0400 I COMMAND [conn28] dropDatabase db1 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.804-0400 m31100| 2015-07-09T13:55:39.803-0400 I COMMAND [conn28] dropDatabase db1 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.804-0400 m30999| 2015-07-09T13:55:39.804-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:39.804-0400-559eb59bca4787b9985d1b9f", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464539804), what: "dropDatabase", ns: "db1", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.805-0400 m31101| 2015-07-09T13:55:39.804-0400 I COMMAND [repl writer worker 12] dropDatabase db1 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.805-0400 m31101| 2015-07-09T13:55:39.805-0400 I COMMAND [repl writer worker 12] dropDatabase db1 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.805-0400 m31102| 2015-07-09T13:55:39.804-0400 I COMMAND [repl writer worker 15] dropDatabase db1 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.805-0400 m31102| 2015-07-09T13:55:39.805-0400 I COMMAND [repl writer worker 15] dropDatabase db1 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.888-0400 m31100| 2015-07-09T13:55:39.888-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.892-0400 m31101| 2015-07-09T13:55:39.892-0400 I COMMAND [repl writer worker 9] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.892-0400 m31102| 2015-07-09T13:55:39.892-0400 I COMMAND [repl writer worker 10] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.927-0400 m31200| 2015-07-09T13:55:39.927-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.930-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.930-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.930-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.930-0400 jstests/concurrency/fsm_workloads/drop_collection.js [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.930-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.930-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.931-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.931-0400 m31201| 2015-07-09T13:55:39.930-0400 I COMMAND [repl writer worker 7] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.931-0400 m31202| 2015-07-09T13:55:39.930-0400 I COMMAND [repl writer worker 12] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.937-0400 m30999| 2015-07-09T13:55:39.936-0400 I SHARDING [conn1] distributed lock 'db2/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb59bca4787b9985d1ba0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.940-0400 m30999| 2015-07-09T13:55:39.940-0400 I SHARDING [conn1] Placing [db2] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.940-0400 m30999| 2015-07-09T13:55:39.940-0400 I SHARDING [conn1] Enabling sharding for database [db2] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:39.994-0400 m30999| 2015-07-09T13:55:39.993-0400 I SHARDING [conn1] distributed lock 'db2/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.014-0400 m31100| 2015-07-09T13:55:40.013-0400 I INDEX [conn29] build index on: db2.coll2 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db2.coll2" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.014-0400 m31100| 2015-07-09T13:55:40.013-0400 I INDEX [conn29] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.022-0400 m31100| 2015-07-09T13:55:40.021-0400 I INDEX [conn29] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.025-0400 m30999| 2015-07-09T13:55:40.023-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db2.coll2", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.027-0400 m30999| 2015-07-09T13:55:40.027-0400 I SHARDING [conn1] distributed lock 'db2.coll2/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb59cca4787b9985d1ba1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.029-0400 m30999| 2015-07-09T13:55:40.028-0400 I SHARDING [conn1] enable sharding on: db2.coll2 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.029-0400 m30999| 2015-07-09T13:55:40.028-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:40.028-0400-559eb59cca4787b9985d1ba2", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464540028), what: "shardCollection.start", ns: "db2.coll2", details: { shardKey: { _id: "hashed" }, collection: "db2.coll2", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.038-0400 m31102| 2015-07-09T13:55:40.037-0400 I INDEX [repl writer worker 8] build index on: db2.coll2 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db2.coll2" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.038-0400 m31102| 2015-07-09T13:55:40.037-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.044-0400 m31101| 2015-07-09T13:55:40.043-0400 I INDEX [repl writer worker 0] build index on: db2.coll2 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db2.coll2" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.044-0400 m31101| 2015-07-09T13:55:40.043-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.045-0400 m31102| 2015-07-09T13:55:40.044-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.050-0400 m31101| 2015-07-09T13:55:40.049-0400 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.083-0400 m30999| 2015-07-09T13:55:40.082-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db2.coll2 using new epoch 559eb59cca4787b9985d1ba3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.189-0400 m30999| 2015-07-09T13:55:40.188-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db2.coll2: 1ms sequenceNumber: 10 version: 1|1||559eb59cca4787b9985d1ba3 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.244-0400 m30999| 2015-07-09T13:55:40.244-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db2.coll2: 0ms sequenceNumber: 11 version: 1|1||559eb59cca4787b9985d1ba3 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.246-0400 m31100| 2015-07-09T13:55:40.246-0400 I SHARDING [conn41] remotely refreshing metadata for db2.coll2 with requested shard version 1|1||559eb59cca4787b9985d1ba3, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.248-0400 m31100| 2015-07-09T13:55:40.247-0400 I SHARDING [conn41] collection db2.coll2 was previously unsharded, new metadata loaded with shard version 1|1||559eb59cca4787b9985d1ba3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.248-0400 m31100| 2015-07-09T13:55:40.248-0400 I SHARDING [conn41] collection version was loaded at version 1|1||559eb59cca4787b9985d1ba3, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.249-0400 m30999| 2015-07-09T13:55:40.248-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:40.248-0400-559eb59cca4787b9985d1ba4", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464540248), what: "shardCollection", ns: "db2.coll2", details: { version: "1|1||559eb59cca4787b9985d1ba3" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.303-0400 m30999| 2015-07-09T13:55:40.303-0400 I SHARDING [conn1] distributed lock 'db2.coll2/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.304-0400 m30999| 2015-07-09T13:55:40.303-0400 I SHARDING [conn1] moving chunk ns: db2.coll2 moving ( ns: db2.coll2, shard: test-rs0, lastmod: 1|1||000000000000000000000000, min: { _id: 0 }, max: { _id: MaxKey }) test-rs0 -> test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.304-0400 m31100| 2015-07-09T13:55:40.304-0400 I SHARDING [conn15] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.305-0400 m31100| 2015-07-09T13:55:40.305-0400 I SHARDING [conn15] received moveChunk request: { moveChunk: "db2.coll2", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb59cca4787b9985d1ba3') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.309-0400 m31100| 2015-07-09T13:55:40.309-0400 I SHARDING [conn15] distributed lock 'db2.coll2/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb59c792e00bb672748e0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.310-0400 m31100| 2015-07-09T13:55:40.309-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:40.309-0400-559eb59c792e00bb672748e1", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464540309), what: "moveChunk.start", ns: "db2.coll2", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.362-0400 m31100| 2015-07-09T13:55:40.361-0400 I SHARDING [conn15] remotely refreshing metadata for db2.coll2 based on current shard version 1|1||559eb59cca4787b9985d1ba3, current metadata version is 1|1||559eb59cca4787b9985d1ba3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.363-0400 m31100| 2015-07-09T13:55:40.363-0400 I SHARDING [conn15] metadata of collection db2.coll2 already up to date (shard version : 1|1||559eb59cca4787b9985d1ba3, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.363-0400 m31100| 2015-07-09T13:55:40.363-0400 I SHARDING [conn15] moveChunk request accepted at version 1|1||559eb59cca4787b9985d1ba3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.364-0400 m31100| 2015-07-09T13:55:40.363-0400 I SHARDING [conn15] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.364-0400 m31200| 2015-07-09T13:55:40.364-0400 I SHARDING [conn16] remotely refreshing metadata for db2.coll2, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.366-0400 m31200| 2015-07-09T13:55:40.365-0400 I SHARDING [conn16] collection db2.coll2 was previously unsharded, new metadata loaded with shard version 0|0||559eb59cca4787b9985d1ba3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.366-0400 m31200| 2015-07-09T13:55:40.365-0400 I SHARDING [conn16] collection version was loaded at version 1|1||559eb59cca4787b9985d1ba3, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.366-0400 m31200| 2015-07-09T13:55:40.366-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: 0 } -> { _id: MaxKey } for collection db2.coll2 from test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 at epoch 559eb59cca4787b9985d1ba3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.368-0400 m31100| 2015-07-09T13:55:40.368-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db2.coll2", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.372-0400 m31100| 2015-07-09T13:55:40.371-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db2.coll2", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.377-0400 m31100| 2015-07-09T13:55:40.376-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db2.coll2", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.383-0400 m31200| 2015-07-09T13:55:40.383-0400 I INDEX [migrateThread] build index on: db2.coll2 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db2.coll2" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.383-0400 m31200| 2015-07-09T13:55:40.383-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.386-0400 m31100| 2015-07-09T13:55:40.386-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db2.coll2", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.390-0400 m31200| 2015-07-09T13:55:40.390-0400 I INDEX [migrateThread] build index on: db2.coll2 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db2.coll2" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.391-0400 m31200| 2015-07-09T13:55:40.390-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.400-0400 m31200| 2015-07-09T13:55:40.400-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.401-0400 m31200| 2015-07-09T13:55:40.401-0400 I SHARDING [migrateThread] Deleter starting delete for: db2.coll2 from { _id: 0 } -> { _id: MaxKey }, with opId: 523 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.401-0400 m31200| 2015-07-09T13:55:40.401-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db2.coll2 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.403-0400 m31100| 2015-07-09T13:55:40.403-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db2.coll2", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.408-0400 m31201| 2015-07-09T13:55:40.408-0400 I INDEX [repl writer worker 6] build index on: db2.coll2 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db2.coll2" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.408-0400 m31201| 2015-07-09T13:55:40.408-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.409-0400 m31202| 2015-07-09T13:55:40.408-0400 I INDEX [repl writer worker 10] build index on: db2.coll2 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db2.coll2" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.409-0400 m31202| 2015-07-09T13:55:40.408-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.413-0400 m31202| 2015-07-09T13:55:40.413-0400 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.415-0400 m31200| 2015-07-09T13:55:40.414-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.415-0400 m31200| 2015-07-09T13:55:40.414-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db2.coll2' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.416-0400 m31201| 2015-07-09T13:55:40.415-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.437-0400 m31100| 2015-07-09T13:55:40.437-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db2.coll2", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.438-0400 m31100| 2015-07-09T13:55:40.437-0400 I SHARDING [conn15] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.438-0400 m31100| 2015-07-09T13:55:40.438-0400 I SHARDING [conn15] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.438-0400 m31100| 2015-07-09T13:55:40.438-0400 I SHARDING [conn15] moveChunk setting version to: 2|0||559eb59cca4787b9985d1ba3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.450-0400 m31200| 2015-07-09T13:55:40.449-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db2.coll2' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.450-0400 m31200| 2015-07-09T13:55:40.449-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:40.449-0400-559eb59cd5a107a5b9c0da86", server: "bs-osx108-8", clientAddr: "", time: new Date(1436464540449), what: "moveChunk.to", ns: "db2.coll2", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 5: 34, step 2 of 5: 12, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 35, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.503-0400 m31100| 2015-07-09T13:55:40.503-0400 I SHARDING [conn15] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db2.coll2", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.504-0400 m31100| 2015-07-09T13:55:40.503-0400 I SHARDING [conn15] moveChunk updating self version to: 2|1||559eb59cca4787b9985d1ba3 through { _id: MinKey } -> { _id: 0 } for collection 'db2.coll2' [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.505-0400 m31100| 2015-07-09T13:55:40.505-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:40.505-0400-559eb59c792e00bb672748e2", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464540505), what: "moveChunk.commit", ns: "db2.coll2", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.559-0400 m31100| 2015-07-09T13:55:40.558-0400 I SHARDING [conn15] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.559-0400 m31100| 2015-07-09T13:55:40.558-0400 I SHARDING [conn15] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.559-0400 m31100| 2015-07-09T13:55:40.558-0400 I SHARDING [conn15] Deleter starting delete for: db2.coll2 from { _id: 0 } -> { _id: MaxKey }, with opId: 1319 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.560-0400 m31100| 2015-07-09T13:55:40.559-0400 I SHARDING [conn15] rangeDeleter deleted 0 documents for db2.coll2 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.560-0400 m31100| 2015-07-09T13:55:40.559-0400 I SHARDING [conn15] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.560-0400 m31100| 2015-07-09T13:55:40.559-0400 I SHARDING [conn15] distributed lock 'db2.coll2/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.560-0400 m31100| 2015-07-09T13:55:40.560-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:40.560-0400-559eb59c792e00bb672748e3", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464540560), what: "moveChunk.from", ns: "db2.coll2", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 6: 0, step 2 of 6: 58, step 3 of 6: 2, step 4 of 6: 71, step 5 of 6: 121, step 6 of 6: 0, to: "test-rs1", from: "test-rs0", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.613-0400 m31100| 2015-07-09T13:55:40.612-0400 I COMMAND [conn15] command db2.coll2 command: moveChunk { moveChunk: "db2.coll2", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb59cca4787b9985d1ba3') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 308ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.615-0400 m30999| 2015-07-09T13:55:40.615-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db2.coll2: 0ms sequenceNumber: 12 version: 2|1||559eb59cca4787b9985d1ba3 based on: 1|1||559eb59cca4787b9985d1ba3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.616-0400 m31100| 2015-07-09T13:55:40.616-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db2.coll2", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb59cca4787b9985d1ba3') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.620-0400 m31100| 2015-07-09T13:55:40.619-0400 I SHARDING [conn15] distributed lock 'db2.coll2/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb59c792e00bb672748e4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.620-0400 m31100| 2015-07-09T13:55:40.620-0400 I SHARDING [conn15] remotely refreshing metadata for db2.coll2 based on current shard version 2|0||559eb59cca4787b9985d1ba3, current metadata version is 2|0||559eb59cca4787b9985d1ba3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.621-0400 m31100| 2015-07-09T13:55:40.621-0400 I SHARDING [conn15] updating metadata for db2.coll2 from shard version 2|0||559eb59cca4787b9985d1ba3 to shard version 2|1||559eb59cca4787b9985d1ba3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.621-0400 m31100| 2015-07-09T13:55:40.621-0400 I SHARDING [conn15] collection version was loaded at version 2|1||559eb59cca4787b9985d1ba3, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.622-0400 m31100| 2015-07-09T13:55:40.621-0400 I SHARDING [conn15] splitChunk accepted at version 2|1||559eb59cca4787b9985d1ba3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.623-0400 m31100| 2015-07-09T13:55:40.623-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:40.623-0400-559eb59c792e00bb672748e5", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464540623), what: "split", ns: "db2.coll2", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559eb59cca4787b9985d1ba3') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559eb59cca4787b9985d1ba3') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.677-0400 m31100| 2015-07-09T13:55:40.677-0400 I SHARDING [conn15] distributed lock 'db2.coll2/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.679-0400 m30999| 2015-07-09T13:55:40.678-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db2.coll2: 0ms sequenceNumber: 13 version: 2|3||559eb59cca4787b9985d1ba3 based on: 2|1||559eb59cca4787b9985d1ba3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.680-0400 m31200| 2015-07-09T13:55:40.679-0400 I SHARDING [conn18] received splitChunk request: { splitChunk: "db2.coll2", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb59cca4787b9985d1ba3') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.683-0400 m31200| 2015-07-09T13:55:40.682-0400 I SHARDING [conn18] distributed lock 'db2.coll2/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb59cd5a107a5b9c0da87 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.683-0400 m31200| 2015-07-09T13:55:40.683-0400 I SHARDING [conn18] remotely refreshing metadata for db2.coll2 based on current shard version 0|0||559eb59cca4787b9985d1ba3, current metadata version is 1|1||559eb59cca4787b9985d1ba3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.684-0400 m31200| 2015-07-09T13:55:40.684-0400 I SHARDING [conn18] updating metadata for db2.coll2 from shard version 0|0||559eb59cca4787b9985d1ba3 to shard version 2|0||559eb59cca4787b9985d1ba3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.685-0400 m31200| 2015-07-09T13:55:40.684-0400 I SHARDING [conn18] collection version was loaded at version 2|3||559eb59cca4787b9985d1ba3, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.685-0400 m31200| 2015-07-09T13:55:40.684-0400 I SHARDING [conn18] splitChunk accepted at version 2|0||559eb59cca4787b9985d1ba3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.686-0400 m31200| 2015-07-09T13:55:40.685-0400 I SHARDING [conn18] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:40.685-0400-559eb59cd5a107a5b9c0da88", server: "bs-osx108-8", clientAddr: "127.0.0.1:62585", time: new Date(1436464540685), what: "split", ns: "db2.coll2", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559eb59cca4787b9985d1ba3') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559eb59cca4787b9985d1ba3') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.740-0400 m31200| 2015-07-09T13:55:40.740-0400 I SHARDING [conn18] distributed lock 'db2.coll2/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.742-0400 m30999| 2015-07-09T13:55:40.741-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db2.coll2: 0ms sequenceNumber: 14 version: 2|5||559eb59cca4787b9985d1ba3 based on: 2|3||559eb59cca4787b9985d1ba3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.743-0400 Using 10 threads (requested 10) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.837-0400 m30999| 2015-07-09T13:55:40.837-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62652 #12 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.854-0400 m30998| 2015-07-09T13:55:40.853-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62653 #12 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.855-0400 m30998| 2015-07-09T13:55:40.854-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62654 #13 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.866-0400 m30998| 2015-07-09T13:55:40.865-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62655 #14 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.866-0400 m30999| 2015-07-09T13:55:40.865-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62656 #13 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.866-0400 m30999| 2015-07-09T13:55:40.865-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62657 #14 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.877-0400 m30998| 2015-07-09T13:55:40.877-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62658 #15 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.879-0400 m30998| 2015-07-09T13:55:40.879-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62659 #16 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.881-0400 m30999| 2015-07-09T13:55:40.880-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62660 #15 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.883-0400 m30999| 2015-07-09T13:55:40.883-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62661 #16 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.888-0400 setting random seed: 2417845288291 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.888-0400 setting random seed: 6980962464585 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.888-0400 setting random seed: 5375964590348 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.889-0400 setting random seed: 9036196926608 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.889-0400 setting random seed: 6725779236294 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.889-0400 setting random seed: 6523083909414 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.890-0400 setting random seed: 171677037142 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.892-0400 setting random seed: 3471316583454 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.892-0400 setting random seed: 3836912056431 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.892-0400 m31100| 2015-07-09T13:55:40.891-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62662 #45 (40 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.892-0400 setting random seed: 2629054742865 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.892-0400 m30998| 2015-07-09T13:55:40.891-0400 I SHARDING [conn14] ChunkManager: time to load chunks for db2.coll2: 0ms sequenceNumber: 4 version: 2|5||559eb59cca4787b9985d1ba3 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.893-0400 m31100| 2015-07-09T13:55:40.892-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62663 #46 (41 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.893-0400 m31100| 2015-07-09T13:55:40.893-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62664 #47 (42 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.894-0400 m31100| 2015-07-09T13:55:40.894-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62665 #48 (43 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.895-0400 m31100| 2015-07-09T13:55:40.895-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62666 #49 (44 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.896-0400 m31100| 2015-07-09T13:55:40.895-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62667 #50 (45 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.897-0400 m31100| 2015-07-09T13:55:40.896-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62668 #51 (46 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.904-0400 m30999| 2015-07-09T13:55:40.904-0400 I COMMAND [conn14] DROP: db2.drop_collection6_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.905-0400 m30999| 2015-07-09T13:55:40.904-0400 I COMMAND [conn14] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.905-0400 m31100| 2015-07-09T13:55:40.904-0400 I COMMAND [conn41] CMD: drop db2.drop_collection6_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.915-0400 m30999| 2015-07-09T13:55:40.915-0400 I COMMAND [conn12] DROP: db2.drop_collection8_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.915-0400 m30999| 2015-07-09T13:55:40.915-0400 I COMMAND [conn12] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.916-0400 m31100| 2015-07-09T13:55:40.915-0400 I COMMAND [conn20] CMD: drop db2.drop_collection8_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.938-0400 m30998| 2015-07-09T13:55:40.937-0400 I COMMAND [conn14] DROP: db2.drop_collection9_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.938-0400 m30998| 2015-07-09T13:55:40.937-0400 I COMMAND [conn14] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.938-0400 m31100| 2015-07-09T13:55:40.937-0400 I COMMAND [conn33] CMD: drop db2.drop_collection9_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.950-0400 m30999| 2015-07-09T13:55:40.950-0400 I COMMAND [conn13] DROP: db2.drop_collection4_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.950-0400 m30999| 2015-07-09T13:55:40.950-0400 I COMMAND [conn13] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.951-0400 m31100| 2015-07-09T13:55:40.950-0400 I COMMAND [conn45] CMD: drop db2.drop_collection4_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.963-0400 m30998| 2015-07-09T13:55:40.962-0400 I COMMAND [conn12] DROP: db2.drop_collection3_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.963-0400 m30998| 2015-07-09T13:55:40.962-0400 I COMMAND [conn12] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.964-0400 m31100| 2015-07-09T13:55:40.963-0400 I COMMAND [conn46] CMD: drop db2.drop_collection3_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.977-0400 m30999| 2015-07-09T13:55:40.977-0400 I COMMAND [conn16] DROP: db2.drop_collection2_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.977-0400 m31100| 2015-07-09T13:55:40.977-0400 I COMMAND [conn47] CMD: drop db2.drop_collection2_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.977-0400 m30999| 2015-07-09T13:55:40.977-0400 I COMMAND [conn16] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.995-0400 m30998| 2015-07-09T13:55:40.994-0400 I COMMAND [conn16] DROP: db2.drop_collection7_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.995-0400 m30998| 2015-07-09T13:55:40.994-0400 I COMMAND [conn16] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:40.996-0400 m31100| 2015-07-09T13:55:40.995-0400 I COMMAND [conn49] CMD: drop db2.drop_collection7_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.020-0400 m31100| 2015-07-09T13:55:41.018-0400 I COMMAND [conn48] command db2.drop_collection1_0 command: create { create: "drop_collection1_0" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 97526 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 122ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.022-0400 m30998| 2015-07-09T13:55:41.022-0400 I COMMAND [conn15] DROP: db2.drop_collection1_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.022-0400 m30998| 2015-07-09T13:55:41.022-0400 I COMMAND [conn15] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.023-0400 m31100| 2015-07-09T13:55:41.022-0400 I COMMAND [conn48] CMD: drop db2.drop_collection1_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.046-0400 m31100| 2015-07-09T13:55:41.043-0400 I COMMAND [conn50] command db2.drop_collection5_0 command: create { create: "drop_collection5_0" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 121648 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 146ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.046-0400 m30998| 2015-07-09T13:55:41.045-0400 I COMMAND [conn13] DROP: db2.drop_collection5_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.046-0400 m30998| 2015-07-09T13:55:41.045-0400 I COMMAND [conn13] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.047-0400 m31100| 2015-07-09T13:55:41.046-0400 I COMMAND [conn50] CMD: drop db2.drop_collection5_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.068-0400 m31100| 2015-07-09T13:55:41.066-0400 I COMMAND [conn51] command db2.drop_collection0_0 command: create { create: "drop_collection0_0" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 145552 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 168ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.068-0400 m30999| 2015-07-09T13:55:41.068-0400 I COMMAND [conn15] DROP: db2.drop_collection0_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.070-0400 m30999| 2015-07-09T13:55:41.068-0400 I COMMAND [conn15] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.070-0400 m31100| 2015-07-09T13:55:41.068-0400 I COMMAND [conn51] CMD: drop db2.drop_collection0_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.071-0400 m31100| 2015-07-09T13:55:41.070-0400 I COMMAND [conn41] command db2.drop_collection6_0 command: drop { drop: "drop_collection6_0" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:130 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 162395 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 166ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.075-0400 m31100| 2015-07-09T13:55:41.074-0400 I COMMAND [conn20] command db2.drop_collection8_0 command: drop { drop: "drop_collection8_0" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:130 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 155504 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 159ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.079-0400 m31100| 2015-07-09T13:55:41.078-0400 I COMMAND [conn33] command db2.drop_collection9_0 command: drop { drop: "drop_collection9_0" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:130 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 136689 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 140ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.083-0400 m31100| 2015-07-09T13:55:41.083-0400 I COMMAND [conn45] command db2.drop_collection4_0 command: drop { drop: "drop_collection4_0" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:130 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 127867 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 132ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.087-0400 m31100| 2015-07-09T13:55:41.086-0400 I COMMAND [conn46] command db2.drop_collection3_0 command: drop { drop: "drop_collection3_0" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:130 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 119876 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 123ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.089-0400 m31100| 2015-07-09T13:55:41.087-0400 I COMMAND [conn47] command db2.drop_collection2_0 command: drop { drop: "drop_collection2_0" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:130 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 109314 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 110ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.090-0400 m31101| 2015-07-09T13:55:41.090-0400 I COMMAND [repl writer worker 12] CMD: drop db2.drop_collection6_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.093-0400 m31102| 2015-07-09T13:55:41.093-0400 I COMMAND [repl writer worker 15] CMD: drop db2.drop_collection6_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.094-0400 m31101| 2015-07-09T13:55:41.094-0400 I COMMAND [repl writer worker 6] CMD: drop db2.drop_collection8_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.097-0400 m31102| 2015-07-09T13:55:41.096-0400 I COMMAND [repl writer worker 14] CMD: drop db2.drop_collection8_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.099-0400 m31101| 2015-07-09T13:55:41.099-0400 I COMMAND [repl writer worker 10] CMD: drop db2.drop_collection9_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.100-0400 m31102| 2015-07-09T13:55:41.100-0400 I COMMAND [repl writer worker 4] CMD: drop db2.drop_collection9_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.101-0400 m31101| 2015-07-09T13:55:41.101-0400 I COMMAND [repl writer worker 9] CMD: drop db2.drop_collection4_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.104-0400 m31102| 2015-07-09T13:55:41.104-0400 I COMMAND [repl writer worker 10] CMD: drop db2.drop_collection4_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.105-0400 m30999| 2015-07-09T13:55:41.104-0400 I COMMAND [conn14] DROP: db2.drop_collection6_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.105-0400 m30999| 2015-07-09T13:55:41.104-0400 I COMMAND [conn14] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.105-0400 m31100| 2015-07-09T13:55:41.105-0400 I COMMAND [conn41] CMD: drop db2.drop_collection6_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.105-0400 m31101| 2015-07-09T13:55:41.105-0400 I COMMAND [repl writer worker 2] CMD: drop db2.drop_collection3_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.106-0400 m31101| 2015-07-09T13:55:41.106-0400 I COMMAND [repl writer worker 0] CMD: drop db2.drop_collection2_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.109-0400 m31101| 2015-07-09T13:55:41.109-0400 I COMMAND [repl writer worker 5] CMD: drop db2.drop_collection7_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.109-0400 m31102| 2015-07-09T13:55:41.107-0400 I COMMAND [repl writer worker 7] CMD: drop db2.drop_collection3_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.110-0400 m31102| 2015-07-09T13:55:41.109-0400 I COMMAND [repl writer worker 8] CMD: drop db2.drop_collection2_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.111-0400 m31102| 2015-07-09T13:55:41.111-0400 I COMMAND [repl writer worker 5] CMD: drop db2.drop_collection7_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.112-0400 m31101| 2015-07-09T13:55:41.112-0400 I COMMAND [repl writer worker 3] CMD: drop db2.drop_collection1_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.115-0400 m31101| 2015-07-09T13:55:41.114-0400 I COMMAND [repl writer worker 11] CMD: drop db2.drop_collection5_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.116-0400 m30999| 2015-07-09T13:55:41.115-0400 I COMMAND [conn12] DROP: db2.drop_collection8_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.116-0400 m30999| 2015-07-09T13:55:41.115-0400 I COMMAND [conn12] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.116-0400 m31100| 2015-07-09T13:55:41.115-0400 I COMMAND [conn20] CMD: drop db2.drop_collection8_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.117-0400 m31102| 2015-07-09T13:55:41.117-0400 I COMMAND [repl writer worker 3] CMD: drop db2.drop_collection1_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.119-0400 m31101| 2015-07-09T13:55:41.119-0400 I COMMAND [repl writer worker 1] CMD: drop db2.drop_collection0_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.120-0400 m31102| 2015-07-09T13:55:41.119-0400 I COMMAND [repl writer worker 11] CMD: drop db2.drop_collection5_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.121-0400 m31102| 2015-07-09T13:55:41.121-0400 I COMMAND [repl writer worker 6] CMD: drop db2.drop_collection0_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.129-0400 m30998| 2015-07-09T13:55:41.129-0400 I COMMAND [conn14] DROP: db2.drop_collection9_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.129-0400 m30998| 2015-07-09T13:55:41.129-0400 I COMMAND [conn14] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.130-0400 m31100| 2015-07-09T13:55:41.129-0400 I COMMAND [conn33] CMD: drop db2.drop_collection9_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.150-0400 m30999| 2015-07-09T13:55:41.149-0400 I COMMAND [conn13] DROP: db2.drop_collection4_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.150-0400 m30999| 2015-07-09T13:55:41.150-0400 I COMMAND [conn13] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.150-0400 m31100| 2015-07-09T13:55:41.150-0400 I COMMAND [conn45] CMD: drop db2.drop_collection4_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.160-0400 m30998| 2015-07-09T13:55:41.160-0400 I COMMAND [conn12] DROP: db2.drop_collection3_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.161-0400 m30998| 2015-07-09T13:55:41.160-0400 I COMMAND [conn12] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.161-0400 m31100| 2015-07-09T13:55:41.160-0400 I COMMAND [conn46] CMD: drop db2.drop_collection3_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.182-0400 m30998| 2015-07-09T13:55:41.181-0400 I COMMAND [conn16] DROP: db2.drop_collection7_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.182-0400 m30998| 2015-07-09T13:55:41.181-0400 I COMMAND [conn16] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.182-0400 m31100| 2015-07-09T13:55:41.181-0400 I COMMAND [conn49] CMD: drop db2.drop_collection7_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.206-0400 m31100| 2015-07-09T13:55:41.205-0400 I COMMAND [conn47] command db2.drop_collection2_1 command: create { create: "drop_collection2_1" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 88557 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 113ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.206-0400 m30999| 2015-07-09T13:55:41.206-0400 I COMMAND [conn16] DROP: db2.drop_collection2_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.206-0400 m30999| 2015-07-09T13:55:41.206-0400 I COMMAND [conn16] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.207-0400 m31100| 2015-07-09T13:55:41.206-0400 I COMMAND [conn47] CMD: drop db2.drop_collection2_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.226-0400 m31100| 2015-07-09T13:55:41.225-0400 I COMMAND [conn48] command db2.drop_collection1_1 command: create { create: "drop_collection1_1" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 112186 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 132ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.227-0400 m30998| 2015-07-09T13:55:41.226-0400 I COMMAND [conn15] DROP: db2.drop_collection1_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.227-0400 m30998| 2015-07-09T13:55:41.226-0400 I COMMAND [conn15] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.228-0400 m31100| 2015-07-09T13:55:41.227-0400 I COMMAND [conn48] CMD: drop db2.drop_collection1_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.245-0400 m31100| 2015-07-09T13:55:41.243-0400 I COMMAND [conn50] command db2.drop_collection5_1 command: create { create: "drop_collection5_1" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 130121 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 148ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.245-0400 m30998| 2015-07-09T13:55:41.244-0400 I COMMAND [conn13] DROP: db2.drop_collection5_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.245-0400 m30998| 2015-07-09T13:55:41.244-0400 I COMMAND [conn13] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.246-0400 m31100| 2015-07-09T13:55:41.245-0400 I COMMAND [conn50] CMD: drop db2.drop_collection5_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.264-0400 m31100| 2015-07-09T13:55:41.264-0400 I COMMAND [conn51] command db2.drop_collection0_1 command: create { create: "drop_collection0_1" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 148112 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 168ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.265-0400 m30999| 2015-07-09T13:55:41.265-0400 I COMMAND [conn15] DROP: db2.drop_collection0_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.265-0400 m30999| 2015-07-09T13:55:41.265-0400 I COMMAND [conn15] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.266-0400 m31100| 2015-07-09T13:55:41.265-0400 I COMMAND [conn51] CMD: drop db2.drop_collection0_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.268-0400 m31100| 2015-07-09T13:55:41.266-0400 I COMMAND [conn41] command db2.drop_collection6_1 command: drop { drop: "drop_collection6_1" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:130 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 159129 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 161ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.268-0400 m31100| 2015-07-09T13:55:41.267-0400 I COMMAND [conn20] command db2.drop_collection8_1 command: drop { drop: "drop_collection8_1" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:130 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 150782 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 151ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.269-0400 m31100| 2015-07-09T13:55:41.268-0400 I COMMAND [conn33] command db2.drop_collection9_1 command: drop { drop: "drop_collection9_1" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:130 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 137491 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 138ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.271-0400 m31100| 2015-07-09T13:55:41.271-0400 I COMMAND [conn45] command db2.drop_collection4_1 command: drop { drop: "drop_collection4_1" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:130 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 117800 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 120ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.273-0400 m31100| 2015-07-09T13:55:41.272-0400 I COMMAND [conn46] command db2.drop_collection3_1 command: drop { drop: "drop_collection3_1" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:130 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 110170 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 111ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.302-0400 m30999| 2015-07-09T13:55:41.301-0400 I COMMAND [conn14] DROP: db2.drop_collection6_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.302-0400 m30999| 2015-07-09T13:55:41.301-0400 I COMMAND [conn14] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.302-0400 m31100| 2015-07-09T13:55:41.301-0400 I COMMAND [conn41] CMD: drop db2.drop_collection6_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.332-0400 m30999| 2015-07-09T13:55:41.331-0400 I COMMAND [conn12] DROP: db2.drop_collection8_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.332-0400 m30999| 2015-07-09T13:55:41.331-0400 I COMMAND [conn12] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.332-0400 m31100| 2015-07-09T13:55:41.332-0400 I COMMAND [conn20] CMD: drop db2.drop_collection8_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.335-0400 m31102| 2015-07-09T13:55:41.335-0400 I COMMAND [repl writer worker 7] CMD: drop db2.drop_collection6_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.336-0400 m31102| 2015-07-09T13:55:41.336-0400 I COMMAND [repl writer worker 8] CMD: drop db2.drop_collection8_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.338-0400 m31101| 2015-07-09T13:55:41.338-0400 I COMMAND [repl writer worker 2] CMD: drop db2.drop_collection6_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.338-0400 m31102| 2015-07-09T13:55:41.338-0400 I COMMAND [repl writer worker 5] CMD: drop db2.drop_collection9_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.340-0400 m31102| 2015-07-09T13:55:41.340-0400 I COMMAND [repl writer worker 3] CMD: drop db2.drop_collection4_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.341-0400 m31101| 2015-07-09T13:55:41.341-0400 I COMMAND [repl writer worker 0] CMD: drop db2.drop_collection8_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.342-0400 m31102| 2015-07-09T13:55:41.342-0400 I COMMAND [repl writer worker 11] CMD: drop db2.drop_collection3_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.344-0400 m30998| 2015-07-09T13:55:41.343-0400 I COMMAND [conn14] DROP: db2.drop_collection9_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.345-0400 m30998| 2015-07-09T13:55:41.343-0400 I COMMAND [conn14] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.345-0400 m31101| 2015-07-09T13:55:41.343-0400 I COMMAND [repl writer worker 5] CMD: drop db2.drop_collection9_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.345-0400 m31100| 2015-07-09T13:55:41.344-0400 I COMMAND [conn33] CMD: drop db2.drop_collection9_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.345-0400 m31102| 2015-07-09T13:55:41.344-0400 I COMMAND [repl writer worker 6] CMD: drop db2.drop_collection7_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.346-0400 m31101| 2015-07-09T13:55:41.346-0400 I COMMAND [repl writer worker 3] CMD: drop db2.drop_collection4_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.346-0400 m31102| 2015-07-09T13:55:41.346-0400 I COMMAND [repl writer worker 13] CMD: drop db2.drop_collection2_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.348-0400 m31101| 2015-07-09T13:55:41.347-0400 I COMMAND [repl writer worker 11] CMD: drop db2.drop_collection3_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.350-0400 m31102| 2015-07-09T13:55:41.350-0400 I COMMAND [repl writer worker 9] CMD: drop db2.drop_collection1_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.351-0400 m31101| 2015-07-09T13:55:41.350-0400 I COMMAND [repl writer worker 1] CMD: drop db2.drop_collection7_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.352-0400 m31102| 2015-07-09T13:55:41.351-0400 I COMMAND [repl writer worker 12] CMD: drop db2.drop_collection5_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.353-0400 m31101| 2015-07-09T13:55:41.352-0400 I COMMAND [repl writer worker 13] CMD: drop db2.drop_collection2_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.354-0400 m31102| 2015-07-09T13:55:41.353-0400 I COMMAND [repl writer worker 2] CMD: drop db2.drop_collection0_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.354-0400 m30999| 2015-07-09T13:55:41.354-0400 I COMMAND [conn13] DROP: db2.drop_collection4_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.354-0400 m30999| 2015-07-09T13:55:41.354-0400 I COMMAND [conn13] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.354-0400 m31101| 2015-07-09T13:55:41.354-0400 I COMMAND [repl writer worker 15] CMD: drop db2.drop_collection1_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.355-0400 m31100| 2015-07-09T13:55:41.354-0400 I COMMAND [conn45] CMD: drop db2.drop_collection4_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.358-0400 m31101| 2015-07-09T13:55:41.358-0400 I COMMAND [repl writer worker 4] CMD: drop db2.drop_collection5_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.360-0400 m31101| 2015-07-09T13:55:41.360-0400 I COMMAND [repl writer worker 8] CMD: drop db2.drop_collection0_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.367-0400 m30998| 2015-07-09T13:55:41.367-0400 I COMMAND [conn12] DROP: db2.drop_collection3_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.367-0400 m30998| 2015-07-09T13:55:41.367-0400 I COMMAND [conn12] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.367-0400 m31100| 2015-07-09T13:55:41.367-0400 I COMMAND [conn46] CMD: drop db2.drop_collection3_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.390-0400 m31100| 2015-07-09T13:55:41.389-0400 I COMMAND [conn49] command db2.drop_collection7_2 command: create { create: "drop_collection7_2" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 91765 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 115ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.391-0400 m30998| 2015-07-09T13:55:41.390-0400 I COMMAND [conn16] DROP: db2.drop_collection7_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.391-0400 m30998| 2015-07-09T13:55:41.390-0400 I COMMAND [conn16] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.391-0400 m31100| 2015-07-09T13:55:41.390-0400 I COMMAND [conn49] CMD: drop db2.drop_collection7_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.405-0400 m31100| 2015-07-09T13:55:41.404-0400 I COMMAND [conn47] command db2.drop_collection2_2 command: create { create: "drop_collection2_2" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 106429 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 121ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.406-0400 m30999| 2015-07-09T13:55:41.405-0400 I COMMAND [conn16] DROP: db2.drop_collection2_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.406-0400 m30999| 2015-07-09T13:55:41.405-0400 I COMMAND [conn16] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.406-0400 m31100| 2015-07-09T13:55:41.405-0400 I COMMAND [conn47] CMD: drop db2.drop_collection2_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.421-0400 m31100| 2015-07-09T13:55:41.420-0400 I COMMAND [conn48] command db2.drop_collection1_2 command: create { create: "drop_collection1_2" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 120246 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 135ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.421-0400 m30998| 2015-07-09T13:55:41.421-0400 I COMMAND [conn15] DROP: db2.drop_collection1_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.421-0400 m30998| 2015-07-09T13:55:41.421-0400 I COMMAND [conn15] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.422-0400 m31100| 2015-07-09T13:55:41.421-0400 I COMMAND [conn48] CMD: drop db2.drop_collection1_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.437-0400 m31100| 2015-07-09T13:55:41.436-0400 I COMMAND [conn51] command db2.drop_collection0_2 command: create { create: "drop_collection0_2" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 135250 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 151ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.437-0400 m30999| 2015-07-09T13:55:41.437-0400 I COMMAND [conn15] DROP: db2.drop_collection0_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.438-0400 m30999| 2015-07-09T13:55:41.437-0400 I COMMAND [conn15] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.438-0400 m31100| 2015-07-09T13:55:41.437-0400 I COMMAND [conn51] CMD: drop db2.drop_collection0_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.453-0400 m31100| 2015-07-09T13:55:41.453-0400 I COMMAND [conn50] command db2.drop_collection5_2 command: create { create: "drop_collection5_2" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 151093 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 167ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.454-0400 m31100| 2015-07-09T13:55:41.453-0400 I COMMAND [conn41] command db2.drop_collection6_2 command: drop { drop: "drop_collection6_2" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:130 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 151135 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 152ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.455-0400 m30998| 2015-07-09T13:55:41.455-0400 I COMMAND [conn13] DROP: db2.drop_collection5_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.455-0400 m30998| 2015-07-09T13:55:41.455-0400 I COMMAND [conn13] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.456-0400 m31100| 2015-07-09T13:55:41.455-0400 I COMMAND [conn50] CMD: drop db2.drop_collection5_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.457-0400 m31100| 2015-07-09T13:55:41.456-0400 I COMMAND [conn20] command db2.drop_collection8_2 command: drop { drop: "drop_collection8_2" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:130 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 121871 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 123ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.457-0400 m31100| 2015-07-09T13:55:41.457-0400 I COMMAND [conn33] command db2.drop_collection9_2 command: drop { drop: "drop_collection9_2" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:130 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 111889 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 113ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.460-0400 m31100| 2015-07-09T13:55:41.459-0400 I COMMAND [conn45] command db2.drop_collection4_2 command: drop { drop: "drop_collection4_2" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:130 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 102374 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 104ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.494-0400 m30999| 2015-07-09T13:55:41.493-0400 I COMMAND [conn14] DROP: db2.drop_collection6_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.494-0400 m30999| 2015-07-09T13:55:41.493-0400 I COMMAND [conn14] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.494-0400 m31100| 2015-07-09T13:55:41.493-0400 I COMMAND [conn41] CMD: drop db2.drop_collection6_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.507-0400 m30999| 2015-07-09T13:55:41.507-0400 I COMMAND [conn12] DROP: db2.drop_collection8_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.508-0400 m30999| 2015-07-09T13:55:41.507-0400 I COMMAND [conn12] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.508-0400 m31100| 2015-07-09T13:55:41.508-0400 I COMMAND [conn20] CMD: drop db2.drop_collection8_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.534-0400 m30998| 2015-07-09T13:55:41.534-0400 I COMMAND [conn14] DROP: db2.drop_collection9_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.535-0400 m30998| 2015-07-09T13:55:41.534-0400 I COMMAND [conn14] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.535-0400 m31100| 2015-07-09T13:55:41.534-0400 I COMMAND [conn33] CMD: drop db2.drop_collection9_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.552-0400 m30999| 2015-07-09T13:55:41.552-0400 I COMMAND [conn13] DROP: db2.drop_collection4_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.552-0400 m30999| 2015-07-09T13:55:41.552-0400 I COMMAND [conn13] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.552-0400 m31100| 2015-07-09T13:55:41.552-0400 I COMMAND [conn45] CMD: drop db2.drop_collection4_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.569-0400 m31101| 2015-07-09T13:55:41.568-0400 I COMMAND [repl writer worker 11] CMD: drop db2.drop_collection6_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.570-0400 m31101| 2015-07-09T13:55:41.570-0400 I COMMAND [repl writer worker 1] CMD: drop db2.drop_collection8_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.572-0400 m31100| 2015-07-09T13:55:41.570-0400 I COMMAND [conn46] command db2.drop_collection3_3 command: create { create: "drop_collection3_3" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 88918 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 108ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.572-0400 m30998| 2015-07-09T13:55:41.571-0400 I COMMAND [conn12] DROP: db2.drop_collection3_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.573-0400 m30998| 2015-07-09T13:55:41.571-0400 I COMMAND [conn12] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.573-0400 m31100| 2015-07-09T13:55:41.572-0400 I COMMAND [conn46] CMD: drop db2.drop_collection3_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.573-0400 m31101| 2015-07-09T13:55:41.572-0400 I COMMAND [repl writer worker 13] CMD: drop db2.drop_collection9_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.576-0400 m31102| 2015-07-09T13:55:41.575-0400 I COMMAND [repl writer worker 11] CMD: drop db2.drop_collection6_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.576-0400 m31101| 2015-07-09T13:55:41.576-0400 I COMMAND [repl writer worker 15] CMD: drop db2.drop_collection4_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.578-0400 m31102| 2015-07-09T13:55:41.578-0400 I COMMAND [repl writer worker 6] CMD: drop db2.drop_collection8_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.578-0400 m31101| 2015-07-09T13:55:41.578-0400 I COMMAND [repl writer worker 4] CMD: drop db2.drop_collection3_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.580-0400 m31102| 2015-07-09T13:55:41.579-0400 I COMMAND [repl writer worker 13] CMD: drop db2.drop_collection9_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.580-0400 m31101| 2015-07-09T13:55:41.580-0400 I COMMAND [repl writer worker 8] CMD: drop db2.drop_collection7_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.582-0400 m31102| 2015-07-09T13:55:41.581-0400 I COMMAND [repl writer worker 9] CMD: drop db2.drop_collection4_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.584-0400 m31100| 2015-07-09T13:55:41.582-0400 I COMMAND [conn49] command db2.drop_collection7_3 command: create { create: "drop_collection7_3" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 107601 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 119ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.584-0400 m30998| 2015-07-09T13:55:41.583-0400 I COMMAND [conn16] DROP: db2.drop_collection7_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.584-0400 m30998| 2015-07-09T13:55:41.583-0400 I COMMAND [conn16] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.584-0400 m31101| 2015-07-09T13:55:41.583-0400 I COMMAND [repl writer worker 7] CMD: drop db2.drop_collection2_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.585-0400 m31100| 2015-07-09T13:55:41.584-0400 I COMMAND [conn49] CMD: drop db2.drop_collection7_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.585-0400 m31101| 2015-07-09T13:55:41.585-0400 I COMMAND [repl writer worker 14] CMD: drop db2.drop_collection1_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.585-0400 m31102| 2015-07-09T13:55:41.585-0400 I COMMAND [repl writer worker 12] CMD: drop db2.drop_collection3_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.587-0400 m31101| 2015-07-09T13:55:41.587-0400 I COMMAND [repl writer worker 12] CMD: drop db2.drop_collection0_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.588-0400 m31102| 2015-07-09T13:55:41.587-0400 I COMMAND [repl writer worker 2] CMD: drop db2.drop_collection7_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.589-0400 m31101| 2015-07-09T13:55:41.589-0400 I COMMAND [repl writer worker 6] CMD: drop db2.drop_collection5_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.590-0400 m31102| 2015-07-09T13:55:41.590-0400 I COMMAND [repl writer worker 0] CMD: drop db2.drop_collection2_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.591-0400 m31102| 2015-07-09T13:55:41.591-0400 I COMMAND [repl writer worker 1] CMD: drop db2.drop_collection1_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.594-0400 m31102| 2015-07-09T13:55:41.593-0400 I COMMAND [repl writer worker 15] CMD: drop db2.drop_collection0_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.596-0400 m31100| 2015-07-09T13:55:41.595-0400 I COMMAND [conn47] command db2.drop_collection2_3 command: create { create: "drop_collection2_3" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 119857 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 130ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.596-0400 m30999| 2015-07-09T13:55:41.595-0400 I COMMAND [conn16] DROP: db2.drop_collection2_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.597-0400 m31100| 2015-07-09T13:55:41.596-0400 I COMMAND [conn47] CMD: drop db2.drop_collection2_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.597-0400 m30999| 2015-07-09T13:55:41.595-0400 I COMMAND [conn16] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.598-0400 m31102| 2015-07-09T13:55:41.598-0400 I COMMAND [repl writer worker 14] CMD: drop db2.drop_collection5_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.611-0400 m31100| 2015-07-09T13:55:41.611-0400 I COMMAND [conn48] command db2.drop_collection1_3 command: create { create: "drop_collection1_3" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 129065 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 145ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.612-0400 m30998| 2015-07-09T13:55:41.612-0400 I COMMAND [conn15] DROP: db2.drop_collection1_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.612-0400 m31100| 2015-07-09T13:55:41.612-0400 I COMMAND [conn48] CMD: drop db2.drop_collection1_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.612-0400 m30998| 2015-07-09T13:55:41.612-0400 I COMMAND [conn15] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.643-0400 m31100| 2015-07-09T13:55:41.641-0400 I COMMAND [conn51] command db2.drop_collection0_3 command: create { create: "drop_collection0_3" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 139536 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 169ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.643-0400 m30999| 2015-07-09T13:55:41.643-0400 I COMMAND [conn15] DROP: db2.drop_collection0_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.643-0400 m30999| 2015-07-09T13:55:41.643-0400 I COMMAND [conn15] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.644-0400 m31100| 2015-07-09T13:55:41.643-0400 I COMMAND [conn51] CMD: drop db2.drop_collection0_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.658-0400 m31100| 2015-07-09T13:55:41.657-0400 I COMMAND [conn50] command db2.drop_collection5_3 command: create { create: "drop_collection5_3" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 163941 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 179ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.660-0400 m31100| 2015-07-09T13:55:41.658-0400 I COMMAND [conn41] command db2.drop_collection6_3 command: drop { drop: "drop_collection6_3" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:130 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 163080 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 164ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.661-0400 m30998| 2015-07-09T13:55:41.660-0400 I COMMAND [conn13] DROP: db2.drop_collection5_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.661-0400 m30998| 2015-07-09T13:55:41.660-0400 I COMMAND [conn13] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.661-0400 m31100| 2015-07-09T13:55:41.660-0400 I COMMAND [conn20] command db2.drop_collection8_3 command: drop { drop: "drop_collection8_3" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:130 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 150153 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 152ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.662-0400 m31100| 2015-07-09T13:55:41.661-0400 I COMMAND [conn50] CMD: drop db2.drop_collection5_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.662-0400 m31100| 2015-07-09T13:55:41.661-0400 I COMMAND [conn33] command db2.drop_collection9_3 command: drop { drop: "drop_collection9_3" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:130 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 125621 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 126ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.667-0400 m31100| 2015-07-09T13:55:41.666-0400 I COMMAND [conn45] command db2.drop_collection4_3 command: drop { drop: "drop_collection4_3" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:130 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 109114 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 114ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.702-0400 m30999| 2015-07-09T13:55:41.701-0400 I COMMAND [conn14] DROP: db2.drop_collection6_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.702-0400 m31100| 2015-07-09T13:55:41.702-0400 I COMMAND [conn41] CMD: drop db2.drop_collection6_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.703-0400 m30999| 2015-07-09T13:55:41.702-0400 I COMMAND [conn14] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.722-0400 m30999| 2015-07-09T13:55:41.722-0400 I COMMAND [conn12] DROP: db2.drop_collection8_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.723-0400 m30999| 2015-07-09T13:55:41.722-0400 I COMMAND [conn12] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.723-0400 m31100| 2015-07-09T13:55:41.722-0400 I COMMAND [conn20] CMD: drop db2.drop_collection8_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.739-0400 m30998| 2015-07-09T13:55:41.739-0400 I COMMAND [conn14] DROP: db2.drop_collection9_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.740-0400 m30998| 2015-07-09T13:55:41.739-0400 I COMMAND [conn14] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.740-0400 m31100| 2015-07-09T13:55:41.739-0400 I COMMAND [conn33] CMD: drop db2.drop_collection9_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.761-0400 m30998| 2015-07-09T13:55:41.760-0400 I COMMAND [conn12] DROP: db2.drop_collection3_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.761-0400 m30998| 2015-07-09T13:55:41.760-0400 I COMMAND [conn12] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.761-0400 m31100| 2015-07-09T13:55:41.760-0400 I COMMAND [conn46] CMD: drop db2.drop_collection3_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.785-0400 m31101| 2015-07-09T13:55:41.785-0400 I COMMAND [repl writer worker 4] CMD: drop db2.drop_collection6_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.788-0400 m31100| 2015-07-09T13:55:41.787-0400 I COMMAND [conn45] command db2.drop_collection4_4 command: create { create: "drop_collection4_4" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 90821 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 118ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.789-0400 m30999| 2015-07-09T13:55:41.788-0400 I COMMAND [conn13] DROP: db2.drop_collection4_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.789-0400 m30999| 2015-07-09T13:55:41.789-0400 I COMMAND [conn13] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.789-0400 m31100| 2015-07-09T13:55:41.789-0400 I COMMAND [conn45] CMD: drop db2.drop_collection4_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.791-0400 m31101| 2015-07-09T13:55:41.791-0400 I COMMAND [repl writer worker 8] CMD: drop db2.drop_collection8_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.794-0400 m31101| 2015-07-09T13:55:41.793-0400 I COMMAND [repl writer worker 7] CMD: drop db2.drop_collection9_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.796-0400 m31101| 2015-07-09T13:55:41.796-0400 I COMMAND [repl writer worker 14] CMD: drop db2.drop_collection4_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.799-0400 m31101| 2015-07-09T13:55:41.799-0400 I COMMAND [repl writer worker 12] CMD: drop db2.drop_collection3_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.801-0400 m31102| 2015-07-09T13:55:41.801-0400 I COMMAND [repl writer worker 12] CMD: drop db2.drop_collection6_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.804-0400 m31101| 2015-07-09T13:55:41.803-0400 I COMMAND [repl writer worker 6] CMD: drop db2.drop_collection7_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.806-0400 m31102| 2015-07-09T13:55:41.805-0400 I COMMAND [repl writer worker 2] CMD: drop db2.drop_collection8_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.806-0400 m31100| 2015-07-09T13:55:41.805-0400 I COMMAND [conn49] command db2.drop_collection7_4 command: create { create: "drop_collection7_4" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 114733 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 132ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.807-0400 m30998| 2015-07-09T13:55:41.806-0400 I COMMAND [conn16] DROP: db2.drop_collection7_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.807-0400 m30998| 2015-07-09T13:55:41.807-0400 I COMMAND [conn16] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.807-0400 m31100| 2015-07-09T13:55:41.807-0400 I COMMAND [conn49] CMD: drop db2.drop_collection7_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.807-0400 m31101| 2015-07-09T13:55:41.807-0400 I COMMAND [repl writer worker 10] CMD: drop db2.drop_collection2_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.808-0400 m31102| 2015-07-09T13:55:41.808-0400 I COMMAND [repl writer worker 0] CMD: drop db2.drop_collection9_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.810-0400 m31101| 2015-07-09T13:55:41.810-0400 I COMMAND [repl writer worker 9] CMD: drop db2.drop_collection1_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.811-0400 m31102| 2015-07-09T13:55:41.811-0400 I COMMAND [repl writer worker 1] CMD: drop db2.drop_collection4_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.812-0400 m31101| 2015-07-09T13:55:41.811-0400 I COMMAND [repl writer worker 2] CMD: drop db2.drop_collection0_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.814-0400 m31100| 2015-07-09T13:55:41.813-0400 I COMMAND [conn47] command db2.drop_collection2_4 command: create { create: "drop_collection2_4" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 130204 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 138ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.814-0400 m30999| 2015-07-09T13:55:41.814-0400 I COMMAND [conn16] DROP: db2.drop_collection2_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.815-0400 m30999| 2015-07-09T13:55:41.814-0400 I COMMAND [conn16] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.816-0400 m31100| 2015-07-09T13:55:41.814-0400 I COMMAND [conn47] CMD: drop db2.drop_collection2_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.816-0400 m31102| 2015-07-09T13:55:41.816-0400 I COMMAND [repl writer worker 15] CMD: drop db2.drop_collection3_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.818-0400 m31102| 2015-07-09T13:55:41.818-0400 I COMMAND [repl writer worker 14] CMD: drop db2.drop_collection7_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.820-0400 m31102| 2015-07-09T13:55:41.819-0400 I COMMAND [repl writer worker 4] CMD: drop db2.drop_collection2_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.822-0400 m31102| 2015-07-09T13:55:41.822-0400 I COMMAND [repl writer worker 10] CMD: drop db2.drop_collection1_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.825-0400 m31102| 2015-07-09T13:55:41.824-0400 I COMMAND [repl writer worker 7] CMD: drop db2.drop_collection0_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.827-0400 m31100| 2015-07-09T13:55:41.825-0400 I COMMAND [conn48] command db2.drop_collection1_4 command: create { create: "drop_collection1_4" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 137279 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 149ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.828-0400 m30998| 2015-07-09T13:55:41.827-0400 I COMMAND [conn15] DROP: db2.drop_collection1_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.828-0400 m30998| 2015-07-09T13:55:41.827-0400 I COMMAND [conn15] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.828-0400 m31100| 2015-07-09T13:55:41.827-0400 I COMMAND [conn48] CMD: drop db2.drop_collection1_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.833-0400 m31101| 2015-07-09T13:55:41.833-0400 I COMMAND [repl writer worker 5] CMD: drop db2.drop_collection5_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.843-0400 m31100| 2015-07-09T13:55:41.842-0400 I COMMAND [conn51] command db2.drop_collection0_4 command: create { create: "drop_collection0_4" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 148471 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 165ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.844-0400 m30999| 2015-07-09T13:55:41.844-0400 I COMMAND [conn15] DROP: db2.drop_collection0_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.844-0400 m30999| 2015-07-09T13:55:41.844-0400 I COMMAND [conn15] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.844-0400 m31100| 2015-07-09T13:55:41.844-0400 I COMMAND [conn51] CMD: drop db2.drop_collection0_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.854-0400 m31100| 2015-07-09T13:55:41.853-0400 I COMMAND [conn41] command db2.drop_collection6_4 command: drop { drop: "drop_collection6_4" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:130 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 140603 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 151ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.854-0400 m31102| 2015-07-09T13:55:41.854-0400 I COMMAND [repl writer worker 5] CMD: drop db2.drop_collection5_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.876-0400 m31100| 2015-07-09T13:55:41.874-0400 I COMMAND [conn50] command db2.drop_collection5_4 command: create { create: "drop_collection5_4" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 149330 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 170ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.876-0400 m30998| 2015-07-09T13:55:41.875-0400 I COMMAND [conn13] DROP: db2.drop_collection5_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.876-0400 m30998| 2015-07-09T13:55:41.875-0400 I COMMAND [conn13] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.877-0400 m31100| 2015-07-09T13:55:41.875-0400 I COMMAND [conn20] command db2.drop_collection8_4 command: drop { drop: "drop_collection8_4" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:130 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 151628 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 152ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.877-0400 m31100| 2015-07-09T13:55:41.876-0400 I COMMAND [conn50] CMD: drop db2.drop_collection5_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.878-0400 m31100| 2015-07-09T13:55:41.876-0400 I COMMAND [conn33] command db2.drop_collection9_4 command: drop { drop: "drop_collection9_4" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:130 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 135777 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 136ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.880-0400 m31100| 2015-07-09T13:55:41.880-0400 I COMMAND [conn46] command db2.drop_collection3_4 command: drop { drop: "drop_collection3_4" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:130 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 115184 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 119ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.923-0400 m30999| 2015-07-09T13:55:41.923-0400 I COMMAND [conn14] DROP: db2.drop_collection6_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.924-0400 m30999| 2015-07-09T13:55:41.923-0400 I COMMAND [conn14] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.924-0400 m31100| 2015-07-09T13:55:41.923-0400 I COMMAND [conn41] CMD: drop db2.drop_collection6_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.948-0400 m30998| 2015-07-09T13:55:41.948-0400 I COMMAND [conn14] DROP: db2.drop_collection9_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.949-0400 m30998| 2015-07-09T13:55:41.948-0400 I COMMAND [conn14] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.949-0400 m31100| 2015-07-09T13:55:41.948-0400 I COMMAND [conn33] CMD: drop db2.drop_collection9_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.968-0400 m30999| 2015-07-09T13:55:41.967-0400 I COMMAND [conn12] DROP: db2.drop_collection8_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.968-0400 m30999| 2015-07-09T13:55:41.968-0400 I COMMAND [conn12] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.969-0400 m31100| 2015-07-09T13:55:41.968-0400 I COMMAND [conn20] CMD: drop db2.drop_collection8_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.993-0400 m31100| 2015-07-09T13:55:41.992-0400 I COMMAND [conn46] command db2.drop_collection3_5 command: create { create: "drop_collection3_5" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 85841 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 111ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.993-0400 m30998| 2015-07-09T13:55:41.993-0400 I COMMAND [conn12] DROP: db2.drop_collection3_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.993-0400 m30998| 2015-07-09T13:55:41.993-0400 I COMMAND [conn12] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:41.994-0400 m31100| 2015-07-09T13:55:41.993-0400 I COMMAND [conn46] CMD: drop db2.drop_collection3_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.006-0400 m31102| 2015-07-09T13:55:42.005-0400 I COMMAND [repl writer worker 1] CMD: drop db2.drop_collection6_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.009-0400 m31100| 2015-07-09T13:55:42.007-0400 I COMMAND [conn45] command db2.drop_collection4_5 command: create { create: "drop_collection4_5" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 104604 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 119ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.010-0400 m30999| 2015-07-09T13:55:42.010-0400 I COMMAND [conn13] DROP: db2.drop_collection4_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.010-0400 m30999| 2015-07-09T13:55:42.010-0400 I COMMAND [conn13] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.010-0400 m31100| 2015-07-09T13:55:42.010-0400 I COMMAND [conn45] CMD: drop db2.drop_collection4_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.023-0400 m31102| 2015-07-09T13:55:42.022-0400 I COMMAND [repl writer worker 14] CMD: drop db2.drop_collection8_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.028-0400 m31100| 2015-07-09T13:55:42.026-0400 I COMMAND [conn49] command db2.drop_collection7_5 command: create { create: "drop_collection7_5" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 118882 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 138ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.028-0400 m31102| 2015-07-09T13:55:42.027-0400 I COMMAND [repl writer worker 4] CMD: drop db2.drop_collection9_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.028-0400 m30998| 2015-07-09T13:55:42.028-0400 I COMMAND [conn16] DROP: db2.drop_collection7_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.029-0400 m30998| 2015-07-09T13:55:42.028-0400 I COMMAND [conn16] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.029-0400 m31100| 2015-07-09T13:55:42.028-0400 I COMMAND [conn49] CMD: drop db2.drop_collection7_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.029-0400 m31102| 2015-07-09T13:55:42.029-0400 I COMMAND [repl writer worker 10] CMD: drop db2.drop_collection3_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.032-0400 m31102| 2015-07-09T13:55:42.032-0400 I COMMAND [repl writer worker 7] CMD: drop db2.drop_collection4_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.036-0400 m31101| 2015-07-09T13:55:42.036-0400 I COMMAND [repl writer worker 14] CMD: drop db2.drop_collection6_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.039-0400 m31102| 2015-07-09T13:55:42.038-0400 I COMMAND [repl writer worker 8] CMD: drop db2.drop_collection7_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.041-0400 m31100| 2015-07-09T13:55:42.040-0400 I COMMAND [conn47] command db2.drop_collection2_5 command: create { create: "drop_collection2_5" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 131887 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 145ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.041-0400 m31102| 2015-07-09T13:55:42.040-0400 I COMMAND [repl writer worker 5] CMD: drop db2.drop_collection2_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.042-0400 m30999| 2015-07-09T13:55:42.042-0400 I COMMAND [conn16] DROP: db2.drop_collection2_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.042-0400 m30999| 2015-07-09T13:55:42.042-0400 I COMMAND [conn16] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.042-0400 m31102| 2015-07-09T13:55:42.042-0400 I COMMAND [repl writer worker 3] CMD: drop db2.drop_collection1_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.043-0400 m31100| 2015-07-09T13:55:42.042-0400 I COMMAND [conn47] CMD: drop db2.drop_collection2_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.045-0400 m31102| 2015-07-09T13:55:42.044-0400 I COMMAND [repl writer worker 11] CMD: drop db2.drop_collection0_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.050-0400 m31101| 2015-07-09T13:55:42.049-0400 I COMMAND [repl writer worker 6] CMD: drop db2.drop_collection8_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.053-0400 m31101| 2015-07-09T13:55:42.053-0400 I COMMAND [repl writer worker 10] CMD: drop db2.drop_collection9_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.056-0400 m31100| 2015-07-09T13:55:42.054-0400 I COMMAND [conn48] command db2.drop_collection1_5 command: create { create: "drop_collection1_5" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 139572 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 154ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.056-0400 m31101| 2015-07-09T13:55:42.055-0400 I COMMAND [repl writer worker 9] CMD: drop db2.drop_collection3_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.056-0400 m30998| 2015-07-09T13:55:42.056-0400 I COMMAND [conn15] DROP: db2.drop_collection1_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.056-0400 m30998| 2015-07-09T13:55:42.056-0400 I COMMAND [conn15] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.057-0400 m31100| 2015-07-09T13:55:42.056-0400 I COMMAND [conn48] CMD: drop db2.drop_collection1_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.057-0400 m31101| 2015-07-09T13:55:42.057-0400 I COMMAND [repl writer worker 2] CMD: drop db2.drop_collection4_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.065-0400 m31101| 2015-07-09T13:55:42.065-0400 I COMMAND [repl writer worker 0] CMD: drop db2.drop_collection7_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.067-0400 m31102| 2015-07-09T13:55:42.066-0400 I COMMAND [repl writer worker 13] CMD: drop db2.drop_collection5_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.069-0400 m31100| 2015-07-09T13:55:42.067-0400 I COMMAND [conn51] command db2.drop_collection0_5 command: create { create: "drop_collection0_5" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 153272 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 165ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.069-0400 m30999| 2015-07-09T13:55:42.068-0400 I COMMAND [conn15] DROP: db2.drop_collection0_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.069-0400 m30999| 2015-07-09T13:55:42.068-0400 I COMMAND [conn15] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.069-0400 m31100| 2015-07-09T13:55:42.068-0400 I COMMAND [conn51] CMD: drop db2.drop_collection0_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.070-0400 m31100| 2015-07-09T13:55:42.068-0400 I COMMAND [conn41] command db2.drop_collection6_5 command: drop { drop: "drop_collection6_5" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:130 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 143300 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 144ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.070-0400 m31101| 2015-07-09T13:55:42.070-0400 I COMMAND [repl writer worker 5] CMD: drop db2.drop_collection2_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.072-0400 m31101| 2015-07-09T13:55:42.071-0400 I COMMAND [repl writer worker 3] CMD: drop db2.drop_collection1_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.076-0400 m31101| 2015-07-09T13:55:42.075-0400 I COMMAND [repl writer worker 11] CMD: drop db2.drop_collection0_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.079-0400 m31100| 2015-07-09T13:55:42.077-0400 I COMMAND [conn50] command db2.drop_collection5_5 command: create { create: "drop_collection5_5" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 142626 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 151ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.080-0400 m30998| 2015-07-09T13:55:42.079-0400 I COMMAND [conn13] DROP: db2.drop_collection5_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.080-0400 m30998| 2015-07-09T13:55:42.080-0400 I COMMAND [conn13] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.080-0400 m31100| 2015-07-09T13:55:42.080-0400 I COMMAND [conn50] CMD: drop db2.drop_collection5_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.081-0400 m31100| 2015-07-09T13:55:42.080-0400 I COMMAND [conn33] command db2.drop_collection9_5 command: drop { drop: "drop_collection9_5" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:130 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 129430 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 132ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.082-0400 m31100| 2015-07-09T13:55:42.081-0400 I COMMAND [conn20] command db2.drop_collection8_5 command: drop { drop: "drop_collection8_5" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:130 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 112153 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 113ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.098-0400 m31101| 2015-07-09T13:55:42.098-0400 I COMMAND [repl writer worker 13] CMD: drop db2.drop_collection5_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.120-0400 m31100| 2015-07-09T13:55:42.119-0400 I COMMAND [conn41] CMD: drop db2.drop_collection6_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.121-0400 m30999| 2015-07-09T13:55:42.119-0400 I COMMAND [conn14] DROP: db2.drop_collection6_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.121-0400 m30999| 2015-07-09T13:55:42.119-0400 I COMMAND [conn14] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.143-0400 m30998| 2015-07-09T13:55:42.143-0400 I COMMAND [conn14] DROP: db2.drop_collection9_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.143-0400 m30998| 2015-07-09T13:55:42.143-0400 I COMMAND [conn14] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.143-0400 m31100| 2015-07-09T13:55:42.143-0400 I COMMAND [conn33] CMD: drop db2.drop_collection9_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.176-0400 m30999| 2015-07-09T13:55:42.176-0400 I COMMAND [conn12] DROP: db2.drop_collection8_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.176-0400 m30999| 2015-07-09T13:55:42.176-0400 I COMMAND [conn12] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.177-0400 m31100| 2015-07-09T13:55:42.176-0400 I COMMAND [conn20] CMD: drop db2.drop_collection8_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.194-0400 m31100| 2015-07-09T13:55:42.193-0400 I COMMAND [conn46] command db2.drop_collection3_6 command: create { create: "drop_collection3_6" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 91783 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 109ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.195-0400 m30998| 2015-07-09T13:55:42.194-0400 I COMMAND [conn12] DROP: db2.drop_collection3_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.195-0400 m30998| 2015-07-09T13:55:42.194-0400 I COMMAND [conn12] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.195-0400 m31100| 2015-07-09T13:55:42.194-0400 I COMMAND [conn46] CMD: drop db2.drop_collection3_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.213-0400 m31100| 2015-07-09T13:55:42.212-0400 I COMMAND [conn45] command db2.drop_collection4_6 command: create { create: "drop_collection4_6" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 104938 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 123ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.214-0400 m30999| 2015-07-09T13:55:42.213-0400 I COMMAND [conn13] DROP: db2.drop_collection4_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.214-0400 m31100| 2015-07-09T13:55:42.213-0400 I COMMAND [conn45] CMD: drop db2.drop_collection4_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.214-0400 m30999| 2015-07-09T13:55:42.213-0400 I COMMAND [conn13] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.233-0400 m31100| 2015-07-09T13:55:42.232-0400 I COMMAND [conn49] command db2.drop_collection7_6 command: create { create: "drop_collection7_6" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 121942 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 142ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.233-0400 m30998| 2015-07-09T13:55:42.233-0400 I COMMAND [conn16] DROP: db2.drop_collection7_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.233-0400 m30998| 2015-07-09T13:55:42.233-0400 I COMMAND [conn16] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.233-0400 m31100| 2015-07-09T13:55:42.233-0400 I COMMAND [conn49] CMD: drop db2.drop_collection7_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.237-0400 m31102| 2015-07-09T13:55:42.237-0400 I COMMAND [repl writer worker 10] CMD: drop db2.drop_collection6_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.250-0400 m31100| 2015-07-09T13:55:42.249-0400 I COMMAND [conn47] command db2.drop_collection2_6 command: create { create: "drop_collection2_6" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 140519 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 157ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.250-0400 m30999| 2015-07-09T13:55:42.250-0400 I COMMAND [conn16] DROP: db2.drop_collection2_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.251-0400 m30999| 2015-07-09T13:55:42.250-0400 I COMMAND [conn16] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.251-0400 m31100| 2015-07-09T13:55:42.250-0400 I COMMAND [conn47] CMD: drop db2.drop_collection2_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.258-0400 m31102| 2015-07-09T13:55:42.258-0400 I COMMAND [repl writer worker 8] CMD: drop db2.drop_collection9_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.269-0400 m31102| 2015-07-09T13:55:42.269-0400 I COMMAND [repl writer worker 5] CMD: drop db2.drop_collection8_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.271-0400 m31101| 2015-07-09T13:55:42.269-0400 I COMMAND [repl writer worker 9] CMD: drop db2.drop_collection6_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.272-0400 m31100| 2015-07-09T13:55:42.270-0400 I COMMAND [conn48] command db2.drop_collection1_6 command: create { create: "drop_collection1_6" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 152317 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 173ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.272-0400 m30998| 2015-07-09T13:55:42.272-0400 I COMMAND [conn15] DROP: db2.drop_collection1_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.272-0400 m30998| 2015-07-09T13:55:42.272-0400 I COMMAND [conn15] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.272-0400 m31100| 2015-07-09T13:55:42.272-0400 I COMMAND [conn48] CMD: drop db2.drop_collection1_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.273-0400 m31102| 2015-07-09T13:55:42.273-0400 I COMMAND [repl writer worker 3] CMD: drop db2.drop_collection3_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.275-0400 m31102| 2015-07-09T13:55:42.274-0400 I COMMAND [repl writer worker 11] CMD: drop db2.drop_collection4_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.276-0400 m31102| 2015-07-09T13:55:42.276-0400 I COMMAND [repl writer worker 6] CMD: drop db2.drop_collection7_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.280-0400 m31102| 2015-07-09T13:55:42.279-0400 I COMMAND [repl writer worker 13] CMD: drop db2.drop_collection2_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.282-0400 m31102| 2015-07-09T13:55:42.282-0400 I COMMAND [repl writer worker 9] CMD: drop db2.drop_collection1_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.285-0400 m31100| 2015-07-09T13:55:42.284-0400 I COMMAND [conn51] command db2.drop_collection0_6 command: create { create: "drop_collection0_6" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 171548 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 185ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.285-0400 m31102| 2015-07-09T13:55:42.285-0400 I COMMAND [repl writer worker 12] CMD: drop db2.drop_collection0_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.286-0400 m30999| 2015-07-09T13:55:42.285-0400 I COMMAND [conn15] DROP: db2.drop_collection0_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.286-0400 m30999| 2015-07-09T13:55:42.285-0400 I COMMAND [conn15] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.286-0400 m31100| 2015-07-09T13:55:42.286-0400 I COMMAND [conn51] CMD: drop db2.drop_collection0_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.287-0400 m31100| 2015-07-09T13:55:42.287-0400 I COMMAND [conn41] command db2.drop_collection6_6 command: drop { drop: "drop_collection6_6" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:130 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 164586 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 167ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.292-0400 m31101| 2015-07-09T13:55:42.292-0400 I COMMAND [repl writer worker 0] CMD: drop db2.drop_collection9_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.295-0400 m31101| 2015-07-09T13:55:42.293-0400 I COMMAND [repl writer worker 5] CMD: drop db2.drop_collection8_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.295-0400 m31101| 2015-07-09T13:55:42.295-0400 I COMMAND [repl writer worker 3] CMD: drop db2.drop_collection3_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.302-0400 m31101| 2015-07-09T13:55:42.302-0400 I COMMAND [repl writer worker 11] CMD: drop db2.drop_collection4_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.303-0400 m31100| 2015-07-09T13:55:42.302-0400 I COMMAND [conn50] command db2.drop_collection5_6 command: create { create: "drop_collection5_6" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 165650 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 180ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.304-0400 m31100| 2015-07-09T13:55:42.303-0400 I COMMAND [conn33] command db2.drop_collection9_6 command: drop { drop: "drop_collection9_6" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:130 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 158818 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 159ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.305-0400 m30998| 2015-07-09T13:55:42.304-0400 I COMMAND [conn13] DROP: db2.drop_collection5_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.305-0400 m30998| 2015-07-09T13:55:42.304-0400 I COMMAND [conn13] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.305-0400 m31100| 2015-07-09T13:55:42.304-0400 I COMMAND [conn50] CMD: drop db2.drop_collection5_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.306-0400 m31100| 2015-07-09T13:55:42.305-0400 I COMMAND [conn20] command db2.drop_collection8_6 command: drop { drop: "drop_collection8_6" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:130 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 126721 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 129ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.307-0400 m31102| 2015-07-09T13:55:42.306-0400 I COMMAND [repl writer worker 0] CMD: drop db2.drop_collection5_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.307-0400 m31100| 2015-07-09T13:55:42.306-0400 I COMMAND [conn46] command db2.drop_collection3_6 command: drop { drop: "drop_collection3_6" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:130 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 111027 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 111ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.307-0400 m31101| 2015-07-09T13:55:42.307-0400 I COMMAND [repl writer worker 1] CMD: drop db2.drop_collection7_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.308-0400 m31101| 2015-07-09T13:55:42.308-0400 I COMMAND [repl writer worker 13] CMD: drop db2.drop_collection2_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.310-0400 m31101| 2015-07-09T13:55:42.310-0400 I COMMAND [repl writer worker 15] CMD: drop db2.drop_collection1_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.312-0400 m31101| 2015-07-09T13:55:42.312-0400 I COMMAND [repl writer worker 4] CMD: drop db2.drop_collection0_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.330-0400 m30999| 2015-07-09T13:55:42.329-0400 I COMMAND [conn14] DROP: db2.drop_collection6_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.330-0400 m30999| 2015-07-09T13:55:42.330-0400 I COMMAND [conn14] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.331-0400 m31100| 2015-07-09T13:55:42.330-0400 I COMMAND [conn41] CMD: drop db2.drop_collection6_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.348-0400 m30998| 2015-07-09T13:55:42.347-0400 I COMMAND [conn14] DROP: db2.drop_collection9_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.348-0400 m30998| 2015-07-09T13:55:42.347-0400 I COMMAND [conn14] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.348-0400 m31100| 2015-07-09T13:55:42.348-0400 I COMMAND [conn33] CMD: drop db2.drop_collection9_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.349-0400 m31101| 2015-07-09T13:55:42.349-0400 I COMMAND [repl writer worker 7] CMD: drop db2.drop_collection5_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.366-0400 m30999| 2015-07-09T13:55:42.365-0400 I COMMAND [conn12] DROP: db2.drop_collection8_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.366-0400 m30999| 2015-07-09T13:55:42.365-0400 I COMMAND [conn12] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.366-0400 m31100| 2015-07-09T13:55:42.365-0400 I COMMAND [conn20] CMD: drop db2.drop_collection8_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.389-0400 m30998| 2015-07-09T13:55:42.389-0400 I COMMAND [conn12] DROP: db2.drop_collection3_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.390-0400 m30998| 2015-07-09T13:55:42.389-0400 I COMMAND [conn12] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.390-0400 m31100| 2015-07-09T13:55:42.389-0400 I COMMAND [conn46] CMD: drop db2.drop_collection3_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.407-0400 m30999| 2015-07-09T13:55:42.406-0400 I COMMAND [conn13] DROP: db2.drop_collection4_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.407-0400 m30999| 2015-07-09T13:55:42.406-0400 I COMMAND [conn13] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.407-0400 m31100| 2015-07-09T13:55:42.407-0400 I COMMAND [conn45] CMD: drop db2.drop_collection4_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.420-0400 m31100| 2015-07-09T13:55:42.419-0400 I COMMAND [conn49] command db2.drop_collection7_7 command: create { create: "drop_collection7_7" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 95900 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 109ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.421-0400 m30998| 2015-07-09T13:55:42.420-0400 I COMMAND [conn16] DROP: db2.drop_collection7_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.421-0400 m30998| 2015-07-09T13:55:42.420-0400 I COMMAND [conn16] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.421-0400 m31100| 2015-07-09T13:55:42.421-0400 I COMMAND [conn49] CMD: drop db2.drop_collection7_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.444-0400 m31100| 2015-07-09T13:55:42.442-0400 I COMMAND [conn47] command db2.drop_collection2_7 command: create { create: "drop_collection2_7" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 108458 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 131ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.444-0400 m30999| 2015-07-09T13:55:42.443-0400 I COMMAND [conn16] DROP: db2.drop_collection2_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.444-0400 m30999| 2015-07-09T13:55:42.443-0400 I COMMAND [conn16] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.444-0400 m31100| 2015-07-09T13:55:42.444-0400 I COMMAND [conn47] CMD: drop db2.drop_collection2_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.468-0400 m31100| 2015-07-09T13:55:42.467-0400 I COMMAND [conn48] command db2.drop_collection1_7 command: create { create: "drop_collection1_7" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 130766 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 155ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.468-0400 m30998| 2015-07-09T13:55:42.468-0400 I COMMAND [conn15] DROP: db2.drop_collection1_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.469-0400 m30998| 2015-07-09T13:55:42.468-0400 I COMMAND [conn15] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.469-0400 m31100| 2015-07-09T13:55:42.468-0400 I COMMAND [conn48] CMD: drop db2.drop_collection1_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.470-0400 m31102| 2015-07-09T13:55:42.470-0400 I COMMAND [repl writer worker 3] CMD: drop db2.drop_collection6_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.493-0400 m31100| 2015-07-09T13:55:42.492-0400 I COMMAND [conn51] command db2.drop_collection0_7 command: create { create: "drop_collection0_7" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 153160 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 178ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.494-0400 m30999| 2015-07-09T13:55:42.493-0400 I COMMAND [conn15] DROP: db2.drop_collection0_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.494-0400 m30999| 2015-07-09T13:55:42.493-0400 I COMMAND [conn15] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.494-0400 m31100| 2015-07-09T13:55:42.494-0400 I COMMAND [conn51] CMD: drop db2.drop_collection0_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.495-0400 m31100| 2015-07-09T13:55:42.494-0400 I COMMAND [conn41] command db2.drop_collection6_7 command: drop { drop: "drop_collection6_7" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:130 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 162514 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 164ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.496-0400 m31102| 2015-07-09T13:55:42.496-0400 I COMMAND [repl writer worker 6] CMD: drop db2.drop_collection9_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.498-0400 m31100| 2015-07-09T13:55:42.497-0400 I COMMAND [conn33] command db2.drop_collection9_7 command: drop { drop: "drop_collection9_7" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:130 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 145745 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 148ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.499-0400 m31102| 2015-07-09T13:55:42.497-0400 I COMMAND [repl writer worker 13] CMD: drop db2.drop_collection8_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.510-0400 m31102| 2015-07-09T13:55:42.510-0400 I COMMAND [repl writer worker 9] CMD: drop db2.drop_collection3_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.515-0400 m31100| 2015-07-09T13:55:42.513-0400 I COMMAND [conn50] command db2.drop_collection5_7 command: create { create: "drop_collection5_7" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 148453 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 165ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.516-0400 m31100| 2015-07-09T13:55:42.515-0400 I COMMAND [conn20] command db2.drop_collection8_7 command: drop { drop: "drop_collection8_7" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:130 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 148321 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 149ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.517-0400 m30998| 2015-07-09T13:55:42.514-0400 I COMMAND [conn13] DROP: db2.drop_collection5_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.517-0400 m30998| 2015-07-09T13:55:42.515-0400 I COMMAND [conn13] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.518-0400 m31100| 2015-07-09T13:55:42.515-0400 I COMMAND [conn50] CMD: drop db2.drop_collection5_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.519-0400 m31100| 2015-07-09T13:55:42.515-0400 I COMMAND [conn46] command db2.drop_collection3_7 command: drop { drop: "drop_collection3_7" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:130 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 125337 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 126ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.519-0400 m31102| 2015-07-09T13:55:42.517-0400 I COMMAND [repl writer worker 12] CMD: drop db2.drop_collection4_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.520-0400 m31100| 2015-07-09T13:55:42.518-0400 I COMMAND [conn45] command db2.drop_collection4_7 command: drop { drop: "drop_collection4_7" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:130 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 108950 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 111ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.520-0400 m31101| 2015-07-09T13:55:42.519-0400 I COMMAND [repl writer worker 3] CMD: drop db2.drop_collection6_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.521-0400 m31102| 2015-07-09T13:55:42.520-0400 I COMMAND [repl writer worker 2] CMD: drop db2.drop_collection7_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.523-0400 m31102| 2015-07-09T13:55:42.523-0400 I COMMAND [repl writer worker 0] CMD: drop db2.drop_collection2_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.524-0400 m31102| 2015-07-09T13:55:42.524-0400 I COMMAND [repl writer worker 1] CMD: drop db2.drop_collection1_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.529-0400 m31102| 2015-07-09T13:55:42.528-0400 I COMMAND [repl writer worker 15] CMD: drop db2.drop_collection0_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.538-0400 m31101| 2015-07-09T13:55:42.538-0400 I COMMAND [repl writer worker 1] CMD: drop db2.drop_collection9_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.540-0400 m31101| 2015-07-09T13:55:42.539-0400 I COMMAND [repl writer worker 13] CMD: drop db2.drop_collection8_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.540-0400 m30999| 2015-07-09T13:55:42.540-0400 I COMMAND [conn14] DROP: db2.drop_collection6_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.541-0400 m30999| 2015-07-09T13:55:42.540-0400 I COMMAND [conn14] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.541-0400 m31100| 2015-07-09T13:55:42.540-0400 I COMMAND [conn41] CMD: drop db2.drop_collection6_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.542-0400 m31101| 2015-07-09T13:55:42.541-0400 I COMMAND [repl writer worker 15] CMD: drop db2.drop_collection3_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.547-0400 m31101| 2015-07-09T13:55:42.546-0400 I COMMAND [repl writer worker 4] CMD: drop db2.drop_collection4_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.549-0400 m31101| 2015-07-09T13:55:42.548-0400 I COMMAND [repl writer worker 8] CMD: drop db2.drop_collection7_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.550-0400 m31101| 2015-07-09T13:55:42.550-0400 I COMMAND [repl writer worker 7] CMD: drop db2.drop_collection2_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.554-0400 m31101| 2015-07-09T13:55:42.553-0400 I COMMAND [repl writer worker 14] CMD: drop db2.drop_collection1_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.556-0400 m30998| 2015-07-09T13:55:42.555-0400 I COMMAND [conn14] DROP: db2.drop_collection9_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.556-0400 m30998| 2015-07-09T13:55:42.555-0400 I COMMAND [conn14] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.556-0400 m31100| 2015-07-09T13:55:42.556-0400 I COMMAND [conn33] CMD: drop db2.drop_collection9_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.558-0400 m31101| 2015-07-09T13:55:42.557-0400 I COMMAND [repl writer worker 12] CMD: drop db2.drop_collection0_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.568-0400 m31102| 2015-07-09T13:55:42.568-0400 I COMMAND [repl writer worker 10] CMD: drop db2.drop_collection5_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.578-0400 m30999| 2015-07-09T13:55:42.577-0400 I COMMAND [conn12] DROP: db2.drop_collection8_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.578-0400 m30999| 2015-07-09T13:55:42.578-0400 I COMMAND [conn12] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.578-0400 m31100| 2015-07-09T13:55:42.578-0400 I COMMAND [conn20] CMD: drop db2.drop_collection8_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.599-0400 m30998| 2015-07-09T13:55:42.598-0400 I COMMAND [conn12] DROP: db2.drop_collection3_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.599-0400 m30998| 2015-07-09T13:55:42.598-0400 I COMMAND [conn12] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.599-0400 m31100| 2015-07-09T13:55:42.598-0400 I COMMAND [conn46] CMD: drop db2.drop_collection3_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.618-0400 m31101| 2015-07-09T13:55:42.617-0400 I COMMAND [repl writer worker 9] CMD: drop db2.drop_collection5_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.618-0400 m30999| 2015-07-09T13:55:42.618-0400 I COMMAND [conn13] DROP: db2.drop_collection4_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.619-0400 m30999| 2015-07-09T13:55:42.618-0400 I COMMAND [conn13] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.619-0400 m31100| 2015-07-09T13:55:42.618-0400 I COMMAND [conn45] CMD: drop db2.drop_collection4_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.632-0400 m31100| 2015-07-09T13:55:42.630-0400 I COMMAND [conn49] command db2.drop_collection7_8 command: create { create: "drop_collection7_8" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 96677 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 109ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.632-0400 m31100| 2015-07-09T13:55:42.631-0400 I COMMAND [conn49] CMD: drop db2.drop_collection7_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.632-0400 m30998| 2015-07-09T13:55:42.631-0400 I COMMAND [conn16] DROP: db2.drop_collection7_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.633-0400 m30998| 2015-07-09T13:55:42.631-0400 I COMMAND [conn16] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.651-0400 m31100| 2015-07-09T13:55:42.650-0400 I COMMAND [conn47] command db2.drop_collection2_8 command: create { create: "drop_collection2_8" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 107729 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 128ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.651-0400 m30999| 2015-07-09T13:55:42.651-0400 I COMMAND [conn16] DROP: db2.drop_collection2_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.652-0400 m30999| 2015-07-09T13:55:42.651-0400 I COMMAND [conn16] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.652-0400 m31100| 2015-07-09T13:55:42.651-0400 I COMMAND [conn47] CMD: drop db2.drop_collection2_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.674-0400 m31100| 2015-07-09T13:55:42.673-0400 I COMMAND [conn48] command db2.drop_collection1_8 command: create { create: "drop_collection1_8" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 127471 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 149ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.674-0400 m30998| 2015-07-09T13:55:42.674-0400 I COMMAND [conn15] DROP: db2.drop_collection1_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.674-0400 m30998| 2015-07-09T13:55:42.674-0400 I COMMAND [conn15] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.675-0400 m31100| 2015-07-09T13:55:42.674-0400 I COMMAND [conn48] CMD: drop db2.drop_collection1_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.698-0400 m31100| 2015-07-09T13:55:42.698-0400 I COMMAND [conn51] command db2.drop_collection0_8 command: create { create: "drop_collection0_8" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 148034 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 173ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.700-0400 m31100| 2015-07-09T13:55:42.698-0400 I COMMAND [conn41] command db2.drop_collection6_8 command: drop { drop: "drop_collection6_8" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:130 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 157263 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 157ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.701-0400 m30999| 2015-07-09T13:55:42.699-0400 I COMMAND [conn15] DROP: db2.drop_collection0_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.701-0400 m30999| 2015-07-09T13:55:42.699-0400 I COMMAND [conn15] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.701-0400 m31100| 2015-07-09T13:55:42.700-0400 I COMMAND [conn33] command db2.drop_collection9_8 command: drop { drop: "drop_collection9_8" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:130 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 142794 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 144ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.702-0400 m31100| 2015-07-09T13:55:42.701-0400 I COMMAND [conn41] CMD: drop db2.drop_collection0_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.703-0400 m30999| 2015-07-09T13:55:42.703-0400 I NETWORK [conn14] end connection 127.0.0.1:62657 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.705-0400 m30998| 2015-07-09T13:55:42.704-0400 I NETWORK [conn14] end connection 127.0.0.1:62655 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.714-0400 m31100| 2015-07-09T13:55:42.713-0400 I COMMAND [conn50] command db2.drop_collection5_8 command: create { create: "drop_collection5_8" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 143140 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 156ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.714-0400 m30998| 2015-07-09T13:55:42.714-0400 I COMMAND [conn13] DROP: db2.drop_collection5_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.715-0400 m30998| 2015-07-09T13:55:42.714-0400 I COMMAND [conn13] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.715-0400 m31100| 2015-07-09T13:55:42.714-0400 I COMMAND [conn50] CMD: drop db2.drop_collection5_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.720-0400 m31100| 2015-07-09T13:55:42.715-0400 I COMMAND [conn20] command db2.drop_collection8_8 command: drop { drop: "drop_collection8_8" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:130 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 136386 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 136ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.720-0400 m31100| 2015-07-09T13:55:42.716-0400 I COMMAND [conn46] command db2.drop_collection3_8 command: drop { drop: "drop_collection3_8" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:130 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 116225 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 117ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.722-0400 m30998| 2015-07-09T13:55:42.721-0400 I NETWORK [conn12] end connection 127.0.0.1:62653 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.730-0400 m30999| 2015-07-09T13:55:42.726-0400 I NETWORK [conn12] end connection 127.0.0.1:62652 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.741-0400 m30998| 2015-07-09T13:55:42.737-0400 I NETWORK [conn16] end connection 127.0.0.1:62659 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.741-0400 m30998| 2015-07-09T13:55:42.738-0400 I NETWORK [conn15] end connection 127.0.0.1:62658 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.743-0400 m31102| 2015-07-09T13:55:42.740-0400 I COMMAND [repl writer worker 9] CMD: drop db2.drop_collection6_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.746-0400 m30999| 2015-07-09T13:55:42.743-0400 I NETWORK [conn16] end connection 127.0.0.1:62661 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.746-0400 m30999| 2015-07-09T13:55:42.743-0400 I NETWORK [conn13] end connection 127.0.0.1:62656 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.746-0400 m30999| 2015-07-09T13:55:42.743-0400 I NETWORK [conn15] end connection 127.0.0.1:62660 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.746-0400 m31102| 2015-07-09T13:55:42.743-0400 I COMMAND [repl writer worker 12] CMD: drop db2.drop_collection9_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.746-0400 m30998| 2015-07-09T13:55:42.745-0400 I NETWORK [conn13] end connection 127.0.0.1:62654 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.772-0400 m31102| 2015-07-09T13:55:42.771-0400 I COMMAND [repl writer worker 0] CMD: drop db2.drop_collection8_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.775-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.775-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.775-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.776-0400 jstests/concurrency/fsm_workloads/drop_collection.js: Workload completed in 2031 ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.776-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.776-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.776-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.777-0400 m31102| 2015-07-09T13:55:42.775-0400 I COMMAND [repl writer worker 1] CMD: drop db2.drop_collection3_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.777-0400 m30999| 2015-07-09T13:55:42.775-0400 I COMMAND [conn1] DROP: db2.coll2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.777-0400 m30999| 2015-07-09T13:55:42.775-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:42.775-0400-559eb59eca4787b9985d1ba5", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464542775), what: "dropCollection.start", ns: "db2.coll2", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.778-0400 m31101| 2015-07-09T13:55:42.777-0400 I COMMAND [repl writer worker 15] CMD: drop db2.drop_collection6_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.779-0400 m31102| 2015-07-09T13:55:42.778-0400 I COMMAND [repl writer worker 15] CMD: drop db2.drop_collection4_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.779-0400 m31102| 2015-07-09T13:55:42.779-0400 I COMMAND [repl writer worker 14] CMD: drop db2.drop_collection7_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.779-0400 m31101| 2015-07-09T13:55:42.779-0400 I COMMAND [repl writer worker 4] CMD: drop db2.drop_collection9_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.780-0400 m31102| 2015-07-09T13:55:42.780-0400 I COMMAND [repl writer worker 4] CMD: drop db2.drop_collection2_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.782-0400 m31102| 2015-07-09T13:55:42.782-0400 I COMMAND [repl writer worker 10] CMD: drop db2.drop_collection1_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.785-0400 m31102| 2015-07-09T13:55:42.784-0400 I COMMAND [repl writer worker 7] CMD: drop db2.drop_collection0_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.785-0400 m31102| 2015-07-09T13:55:42.784-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62669 #9 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.786-0400 m31202| 2015-07-09T13:55:42.786-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62670 #9 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.791-0400 m31101| 2015-07-09T13:55:42.791-0400 I COMMAND [repl writer worker 7] CMD: drop db2.drop_collection8_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.794-0400 m31101| 2015-07-09T13:55:42.794-0400 I COMMAND [repl writer worker 14] CMD: drop db2.drop_collection3_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.796-0400 m31101| 2015-07-09T13:55:42.796-0400 I COMMAND [repl writer worker 12] CMD: drop db2.drop_collection4_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.799-0400 m31101| 2015-07-09T13:55:42.798-0400 I COMMAND [repl writer worker 6] CMD: drop db2.drop_collection7_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.801-0400 m31101| 2015-07-09T13:55:42.800-0400 I COMMAND [repl writer worker 10] CMD: drop db2.drop_collection2_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.804-0400 m31101| 2015-07-09T13:55:42.803-0400 I COMMAND [repl writer worker 9] CMD: drop db2.drop_collection1_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.808-0400 m31101| 2015-07-09T13:55:42.807-0400 I COMMAND [repl writer worker 2] CMD: drop db2.drop_collection0_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.812-0400 m31102| 2015-07-09T13:55:42.812-0400 I COMMAND [repl writer worker 3] CMD: drop db2.drop_collection5_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.832-0400 m30999| 2015-07-09T13:55:42.832-0400 I SHARDING [conn1] distributed lock 'db2.coll2/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb59eca4787b9985d1ba6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.834-0400 m31100| 2015-07-09T13:55:42.832-0400 I COMMAND [conn15] CMD: drop db2.coll2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.834-0400 m31200| 2015-07-09T13:55:42.834-0400 I COMMAND [conn18] CMD: drop db2.coll2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.838-0400 m31202| 2015-07-09T13:55:42.838-0400 I COMMAND [repl writer worker 13] CMD: drop db2.coll2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.838-0400 m31201| 2015-07-09T13:55:42.838-0400 I COMMAND [repl writer worker 12] CMD: drop db2.coll2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.843-0400 m31101| 2015-07-09T13:55:42.843-0400 I COMMAND [repl writer worker 3] CMD: drop db2.drop_collection5_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.888-0400 m31100| 2015-07-09T13:55:42.887-0400 I SHARDING [conn15] remotely refreshing metadata for db2.coll2 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559eb59cca4787b9985d1ba3, current metadata version is 2|3||559eb59cca4787b9985d1ba3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.889-0400 m31100| 2015-07-09T13:55:42.889-0400 W SHARDING [conn15] no chunks found when reloading db2.coll2, previous version was 0|0||559eb59cca4787b9985d1ba3, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.890-0400 m31100| 2015-07-09T13:55:42.889-0400 I SHARDING [conn15] dropping metadata for db2.coll2 at shard version 2|3||559eb59cca4787b9985d1ba3, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.891-0400 m31200| 2015-07-09T13:55:42.890-0400 I SHARDING [conn18] remotely refreshing metadata for db2.coll2 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559eb59cca4787b9985d1ba3, current metadata version is 2|5||559eb59cca4787b9985d1ba3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.892-0400 m31200| 2015-07-09T13:55:42.891-0400 W SHARDING [conn18] no chunks found when reloading db2.coll2, previous version was 0|0||559eb59cca4787b9985d1ba3, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.892-0400 m31200| 2015-07-09T13:55:42.891-0400 I SHARDING [conn18] dropping metadata for db2.coll2 at shard version 2|5||559eb59cca4787b9985d1ba3, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.893-0400 m30999| 2015-07-09T13:55:42.891-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:42.891-0400-559eb59eca4787b9985d1ba7", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464542891), what: "dropCollection", ns: "db2.coll2", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.930-0400 m31102| 2015-07-09T13:55:42.930-0400 I COMMAND [repl writer worker 1] CMD: drop db2.drop_collection6_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.937-0400 m31102| 2015-07-09T13:55:42.936-0400 I COMMAND [repl writer worker 15] CMD: drop db2.drop_collection9_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.946-0400 m30999| 2015-07-09T13:55:42.945-0400 I SHARDING [conn1] distributed lock 'db2.coll2/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.960-0400 m31102| 2015-07-09T13:55:42.960-0400 I COMMAND [repl writer worker 4] CMD: drop db2.drop_collection8_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.961-0400 m31101| 2015-07-09T13:55:42.961-0400 I COMMAND [repl writer worker 14] CMD: drop db2.drop_collection6_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.963-0400 m31101| 2015-07-09T13:55:42.963-0400 I COMMAND [repl writer worker 12] CMD: drop db2.drop_collection9_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.964-0400 m31102| 2015-07-09T13:55:42.963-0400 I COMMAND [repl writer worker 10] CMD: drop db2.drop_collection3_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.965-0400 m31102| 2015-07-09T13:55:42.965-0400 I COMMAND [repl writer worker 7] CMD: drop db2.drop_collection4_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.966-0400 m31102| 2015-07-09T13:55:42.966-0400 I COMMAND [repl writer worker 8] CMD: drop db2.drop_collection7_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.968-0400 m31102| 2015-07-09T13:55:42.968-0400 I COMMAND [repl writer worker 5] CMD: drop db2.drop_collection2_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.971-0400 m31102| 2015-07-09T13:55:42.971-0400 I COMMAND [repl writer worker 3] CMD: drop db2.drop_collection1_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.972-0400 m31102| 2015-07-09T13:55:42.972-0400 I COMMAND [repl writer worker 11] CMD: drop db2.drop_collection0_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.974-0400 m31101| 2015-07-09T13:55:42.974-0400 I COMMAND [repl writer worker 10] CMD: drop db2.drop_collection8_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.977-0400 m31101| 2015-07-09T13:55:42.977-0400 I COMMAND [repl writer worker 9] CMD: drop db2.drop_collection3_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.977-0400 m31102| 2015-07-09T13:55:42.977-0400 I COMMAND [repl writer worker 6] CMD: drop db2.drop_collection5_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.978-0400 m31102| 2015-07-09T13:55:42.977-0400 I COMMAND [repl writer worker 13] CMD: drop db2.coll2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.978-0400 m31101| 2015-07-09T13:55:42.978-0400 I COMMAND [repl writer worker 2] CMD: drop db2.drop_collection4_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.979-0400 m31101| 2015-07-09T13:55:42.978-0400 I COMMAND [repl writer worker 0] CMD: drop db2.drop_collection7_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.980-0400 m31101| 2015-07-09T13:55:42.980-0400 I COMMAND [repl writer worker 5] CMD: drop db2.drop_collection2_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.982-0400 m31101| 2015-07-09T13:55:42.981-0400 I COMMAND [repl writer worker 3] CMD: drop db2.drop_collection1_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.983-0400 m31101| 2015-07-09T13:55:42.983-0400 I COMMAND [repl writer worker 11] CMD: drop db2.drop_collection0_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.984-0400 m31101| 2015-07-09T13:55:42.984-0400 I COMMAND [repl writer worker 1] CMD: drop db2.drop_collection5_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:42.985-0400 m31101| 2015-07-09T13:55:42.985-0400 I COMMAND [repl writer worker 13] CMD: drop db2.coll2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.001-0400 m30999| 2015-07-09T13:55:43.001-0400 I COMMAND [conn1] DROP DATABASE: db2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.001-0400 m30999| 2015-07-09T13:55:43.001-0400 I SHARDING [conn1] DBConfig::dropDatabase: db2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.002-0400 m30999| 2015-07-09T13:55:43.001-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:43.001-0400-559eb59fca4787b9985d1ba8", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464543001), what: "dropDatabase.start", ns: "db2", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.108-0400 m30999| 2015-07-09T13:55:43.108-0400 I SHARDING [conn1] DBConfig::dropDatabase: db2 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.109-0400 m31100| 2015-07-09T13:55:43.108-0400 I COMMAND [conn28] dropDatabase db2 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.109-0400 m31100| 2015-07-09T13:55:43.108-0400 I COMMAND [conn28] dropDatabase db2 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.109-0400 m30999| 2015-07-09T13:55:43.109-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:43.109-0400-559eb59fca4787b9985d1ba9", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464543109), what: "dropDatabase", ns: "db2", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.110-0400 m31101| 2015-07-09T13:55:43.109-0400 I COMMAND [repl writer worker 15] dropDatabase db2 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.110-0400 m31101| 2015-07-09T13:55:43.109-0400 I COMMAND [repl writer worker 15] dropDatabase db2 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.110-0400 m31102| 2015-07-09T13:55:43.110-0400 I COMMAND [repl writer worker 9] dropDatabase db2 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.110-0400 m31102| 2015-07-09T13:55:43.110-0400 I COMMAND [repl writer worker 9] dropDatabase db2 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.195-0400 m31100| 2015-07-09T13:55:43.195-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.199-0400 m31102| 2015-07-09T13:55:43.198-0400 I COMMAND [repl writer worker 0] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.199-0400 m31101| 2015-07-09T13:55:43.199-0400 I COMMAND [repl writer worker 7] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.233-0400 m31200| 2015-07-09T13:55:43.232-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.235-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.236-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.236-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.236-0400 jstests/concurrency/fsm_workloads/update_multifield_isolated_multiupdate_noindex.js [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.236-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.236-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.236-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.236-0400 m31202| 2015-07-09T13:55:43.236-0400 I COMMAND [repl writer worker 3] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.237-0400 m31201| 2015-07-09T13:55:43.236-0400 I COMMAND [repl writer worker 15] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.242-0400 m30999| 2015-07-09T13:55:43.242-0400 I SHARDING [conn1] distributed lock 'db3/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb59fca4787b9985d1baa [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.246-0400 m30999| 2015-07-09T13:55:43.245-0400 I SHARDING [conn1] Placing [db3] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.246-0400 m30999| 2015-07-09T13:55:43.245-0400 I SHARDING [conn1] Enabling sharding for database [db3] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.300-0400 m30999| 2015-07-09T13:55:43.300-0400 I SHARDING [conn1] distributed lock 'db3/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.326-0400 m31100| 2015-07-09T13:55:43.325-0400 I INDEX [conn29] build index on: db3.coll3 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db3.coll3" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.326-0400 m31100| 2015-07-09T13:55:43.325-0400 I INDEX [conn29] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.332-0400 m31100| 2015-07-09T13:55:43.332-0400 I INDEX [conn29] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.334-0400 m30999| 2015-07-09T13:55:43.333-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db3.coll3", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.337-0400 m30999| 2015-07-09T13:55:43.336-0400 I SHARDING [conn1] distributed lock 'db3.coll3/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb59fca4787b9985d1bab [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.338-0400 m30999| 2015-07-09T13:55:43.337-0400 I SHARDING [conn1] enable sharding on: db3.coll3 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.338-0400 m30999| 2015-07-09T13:55:43.337-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:43.337-0400-559eb59fca4787b9985d1bac", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464543337), what: "shardCollection.start", ns: "db3.coll3", details: { shardKey: { _id: "hashed" }, collection: "db3.coll3", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.346-0400 m31102| 2015-07-09T13:55:43.345-0400 I INDEX [repl writer worker 15] build index on: db3.coll3 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db3.coll3" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.346-0400 m31102| 2015-07-09T13:55:43.345-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.354-0400 m31102| 2015-07-09T13:55:43.354-0400 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.355-0400 m31101| 2015-07-09T13:55:43.354-0400 I INDEX [repl writer worker 12] build index on: db3.coll3 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db3.coll3" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.355-0400 m31101| 2015-07-09T13:55:43.355-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.362-0400 m31101| 2015-07-09T13:55:43.362-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.390-0400 m30999| 2015-07-09T13:55:43.390-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db3.coll3 using new epoch 559eb59fca4787b9985d1bad [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.496-0400 m30999| 2015-07-09T13:55:43.496-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db3.coll3: 0ms sequenceNumber: 15 version: 1|1||559eb59fca4787b9985d1bad based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.553-0400 m30999| 2015-07-09T13:55:43.553-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db3.coll3: 0ms sequenceNumber: 16 version: 1|1||559eb59fca4787b9985d1bad based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.556-0400 m31100| 2015-07-09T13:55:43.555-0400 I SHARDING [conn41] remotely refreshing metadata for db3.coll3 with requested shard version 1|1||559eb59fca4787b9985d1bad, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.557-0400 m31100| 2015-07-09T13:55:43.557-0400 I SHARDING [conn41] collection db3.coll3 was previously unsharded, new metadata loaded with shard version 1|1||559eb59fca4787b9985d1bad [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.557-0400 m31100| 2015-07-09T13:55:43.557-0400 I SHARDING [conn41] collection version was loaded at version 1|1||559eb59fca4787b9985d1bad, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.558-0400 m30999| 2015-07-09T13:55:43.557-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:43.557-0400-559eb59fca4787b9985d1bae", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464543557), what: "shardCollection", ns: "db3.coll3", details: { version: "1|1||559eb59fca4787b9985d1bad" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.612-0400 m30999| 2015-07-09T13:55:43.611-0400 I SHARDING [conn1] distributed lock 'db3.coll3/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.613-0400 m30999| 2015-07-09T13:55:43.612-0400 I SHARDING [conn1] moving chunk ns: db3.coll3 moving ( ns: db3.coll3, shard: test-rs0, lastmod: 1|1||000000000000000000000000, min: { _id: 0 }, max: { _id: MaxKey }) test-rs0 -> test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.613-0400 m31100| 2015-07-09T13:55:43.613-0400 I SHARDING [conn15] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.615-0400 m31100| 2015-07-09T13:55:43.614-0400 I SHARDING [conn15] received moveChunk request: { moveChunk: "db3.coll3", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb59fca4787b9985d1bad') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.618-0400 m31100| 2015-07-09T13:55:43.617-0400 I SHARDING [conn15] distributed lock 'db3.coll3/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb59f792e00bb672748e7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.618-0400 m31100| 2015-07-09T13:55:43.618-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:43.618-0400-559eb59f792e00bb672748e8", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464543618), what: "moveChunk.start", ns: "db3.coll3", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.671-0400 m31100| 2015-07-09T13:55:43.671-0400 I SHARDING [conn15] remotely refreshing metadata for db3.coll3 based on current shard version 1|1||559eb59fca4787b9985d1bad, current metadata version is 1|1||559eb59fca4787b9985d1bad [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.672-0400 m31100| 2015-07-09T13:55:43.672-0400 I SHARDING [conn15] metadata of collection db3.coll3 already up to date (shard version : 1|1||559eb59fca4787b9985d1bad, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.673-0400 m31100| 2015-07-09T13:55:43.672-0400 I SHARDING [conn15] moveChunk request accepted at version 1|1||559eb59fca4787b9985d1bad [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.673-0400 m31100| 2015-07-09T13:55:43.673-0400 I SHARDING [conn15] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.673-0400 m31200| 2015-07-09T13:55:43.673-0400 I SHARDING [conn16] remotely refreshing metadata for db3.coll3, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.675-0400 m31200| 2015-07-09T13:55:43.675-0400 I SHARDING [conn16] collection db3.coll3 was previously unsharded, new metadata loaded with shard version 0|0||559eb59fca4787b9985d1bad [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.675-0400 m31200| 2015-07-09T13:55:43.675-0400 I SHARDING [conn16] collection version was loaded at version 1|1||559eb59fca4787b9985d1bad, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.675-0400 m31200| 2015-07-09T13:55:43.675-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: 0 } -> { _id: MaxKey } for collection db3.coll3 from test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 at epoch 559eb59fca4787b9985d1bad [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.678-0400 m31100| 2015-07-09T13:55:43.677-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db3.coll3", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.681-0400 m31100| 2015-07-09T13:55:43.680-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db3.coll3", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.687-0400 m31100| 2015-07-09T13:55:43.686-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db3.coll3", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.689-0400 m31200| 2015-07-09T13:55:43.689-0400 I INDEX [migrateThread] build index on: db3.coll3 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db3.coll3" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.690-0400 m31200| 2015-07-09T13:55:43.689-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.697-0400 m31100| 2015-07-09T13:55:43.696-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db3.coll3", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.697-0400 m31200| 2015-07-09T13:55:43.697-0400 I INDEX [migrateThread] build index on: db3.coll3 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db3.coll3" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.698-0400 m31200| 2015-07-09T13:55:43.697-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.710-0400 m31200| 2015-07-09T13:55:43.709-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.710-0400 m31200| 2015-07-09T13:55:43.710-0400 I SHARDING [migrateThread] Deleter starting delete for: db3.coll3 from { _id: 0 } -> { _id: MaxKey }, with opId: 573 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.712-0400 m31200| 2015-07-09T13:55:43.712-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db3.coll3 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.714-0400 m31100| 2015-07-09T13:55:43.713-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db3.coll3", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.719-0400 m31202| 2015-07-09T13:55:43.718-0400 I INDEX [repl writer worker 4] build index on: db3.coll3 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db3.coll3" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.719-0400 m31202| 2015-07-09T13:55:43.718-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.721-0400 m31201| 2015-07-09T13:55:43.721-0400 I INDEX [repl writer worker 0] build index on: db3.coll3 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db3.coll3" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.721-0400 m31201| 2015-07-09T13:55:43.721-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.725-0400 m31202| 2015-07-09T13:55:43.724-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.728-0400 m31200| 2015-07-09T13:55:43.727-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.728-0400 m31200| 2015-07-09T13:55:43.728-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db3.coll3' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.729-0400 m31201| 2015-07-09T13:55:43.729-0400 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.748-0400 m31100| 2015-07-09T13:55:43.747-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db3.coll3", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.748-0400 m31100| 2015-07-09T13:55:43.748-0400 I SHARDING [conn15] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.749-0400 m31100| 2015-07-09T13:55:43.748-0400 I SHARDING [conn15] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.749-0400 m31100| 2015-07-09T13:55:43.748-0400 I SHARDING [conn15] moveChunk setting version to: 2|0||559eb59fca4787b9985d1bad [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.752-0400 m31200| 2015-07-09T13:55:43.752-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db3.coll3' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.753-0400 m31200| 2015-07-09T13:55:43.752-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:43.752-0400-559eb59fd5a107a5b9c0da89", server: "bs-osx108-8", clientAddr: "", time: new Date(1436464543752), what: "moveChunk.to", ns: "db3.coll3", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 5: 34, step 2 of 5: 15, step 3 of 5: 1, step 4 of 5: 0, step 5 of 5: 24, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.806-0400 m31100| 2015-07-09T13:55:43.806-0400 I SHARDING [conn15] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db3.coll3", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.807-0400 m31100| 2015-07-09T13:55:43.806-0400 I SHARDING [conn15] moveChunk updating self version to: 2|1||559eb59fca4787b9985d1bad through { _id: MinKey } -> { _id: 0 } for collection 'db3.coll3' [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.808-0400 m31100| 2015-07-09T13:55:43.807-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:43.807-0400-559eb59f792e00bb672748e9", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464543807), what: "moveChunk.commit", ns: "db3.coll3", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.861-0400 m31100| 2015-07-09T13:55:43.861-0400 I SHARDING [conn15] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.861-0400 m31100| 2015-07-09T13:55:43.861-0400 I SHARDING [conn15] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.862-0400 m31100| 2015-07-09T13:55:43.861-0400 I SHARDING [conn15] Deleter starting delete for: db3.coll3 from { _id: 0 } -> { _id: MaxKey }, with opId: 2208 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.862-0400 m31100| 2015-07-09T13:55:43.861-0400 I SHARDING [conn15] rangeDeleter deleted 0 documents for db3.coll3 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.862-0400 m31100| 2015-07-09T13:55:43.861-0400 I SHARDING [conn15] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.863-0400 m31100| 2015-07-09T13:55:43.862-0400 I SHARDING [conn15] distributed lock 'db3.coll3/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.863-0400 m31100| 2015-07-09T13:55:43.862-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:43.862-0400-559eb59f792e00bb672748ea", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464543862), what: "moveChunk.from", ns: "db3.coll3", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 6: 0, step 2 of 6: 58, step 3 of 6: 2, step 4 of 6: 72, step 5 of 6: 113, step 6 of 6: 0, to: "test-rs1", from: "test-rs0", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.917-0400 m31100| 2015-07-09T13:55:43.916-0400 I COMMAND [conn15] command db3.coll3 command: moveChunk { moveChunk: "db3.coll3", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb59fca4787b9985d1bad') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 302ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.918-0400 m30999| 2015-07-09T13:55:43.918-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db3.coll3: 0ms sequenceNumber: 17 version: 2|1||559eb59fca4787b9985d1bad based on: 1|1||559eb59fca4787b9985d1bad [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.919-0400 m31100| 2015-07-09T13:55:43.919-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db3.coll3", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb59fca4787b9985d1bad') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.923-0400 m31100| 2015-07-09T13:55:43.922-0400 I SHARDING [conn15] distributed lock 'db3.coll3/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb59f792e00bb672748eb [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.923-0400 m31100| 2015-07-09T13:55:43.922-0400 I SHARDING [conn15] remotely refreshing metadata for db3.coll3 based on current shard version 2|0||559eb59fca4787b9985d1bad, current metadata version is 2|0||559eb59fca4787b9985d1bad [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.924-0400 m31100| 2015-07-09T13:55:43.924-0400 I SHARDING [conn15] updating metadata for db3.coll3 from shard version 2|0||559eb59fca4787b9985d1bad to shard version 2|1||559eb59fca4787b9985d1bad [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.924-0400 m31100| 2015-07-09T13:55:43.924-0400 I SHARDING [conn15] collection version was loaded at version 2|1||559eb59fca4787b9985d1bad, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.924-0400 m31100| 2015-07-09T13:55:43.924-0400 I SHARDING [conn15] splitChunk accepted at version 2|1||559eb59fca4787b9985d1bad [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.926-0400 m31100| 2015-07-09T13:55:43.925-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:43.925-0400-559eb59f792e00bb672748ec", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464543925), what: "split", ns: "db3.coll3", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559eb59fca4787b9985d1bad') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559eb59fca4787b9985d1bad') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.980-0400 m31100| 2015-07-09T13:55:43.979-0400 I SHARDING [conn15] distributed lock 'db3.coll3/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.981-0400 m30999| 2015-07-09T13:55:43.981-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db3.coll3: 0ms sequenceNumber: 18 version: 2|3||559eb59fca4787b9985d1bad based on: 2|1||559eb59fca4787b9985d1bad [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.982-0400 m31200| 2015-07-09T13:55:43.981-0400 I SHARDING [conn18] received splitChunk request: { splitChunk: "db3.coll3", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb59fca4787b9985d1bad') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.986-0400 m31200| 2015-07-09T13:55:43.985-0400 I SHARDING [conn18] distributed lock 'db3.coll3/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb59fd5a107a5b9c0da8a [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.986-0400 m31200| 2015-07-09T13:55:43.985-0400 I SHARDING [conn18] remotely refreshing metadata for db3.coll3 based on current shard version 0|0||559eb59fca4787b9985d1bad, current metadata version is 1|1||559eb59fca4787b9985d1bad [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.987-0400 m31200| 2015-07-09T13:55:43.987-0400 I SHARDING [conn18] updating metadata for db3.coll3 from shard version 0|0||559eb59fca4787b9985d1bad to shard version 2|0||559eb59fca4787b9985d1bad [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.987-0400 m31200| 2015-07-09T13:55:43.987-0400 I SHARDING [conn18] collection version was loaded at version 2|3||559eb59fca4787b9985d1bad, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.988-0400 m31200| 2015-07-09T13:55:43.987-0400 I SHARDING [conn18] splitChunk accepted at version 2|0||559eb59fca4787b9985d1bad [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:43.989-0400 m31200| 2015-07-09T13:55:43.988-0400 I SHARDING [conn18] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:43.988-0400-559eb59fd5a107a5b9c0da8b", server: "bs-osx108-8", clientAddr: "127.0.0.1:62585", time: new Date(1436464543988), what: "split", ns: "db3.coll3", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559eb59fca4787b9985d1bad') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559eb59fca4787b9985d1bad') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.042-0400 m31200| 2015-07-09T13:55:44.042-0400 I SHARDING [conn18] distributed lock 'db3.coll3/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.045-0400 m30999| 2015-07-09T13:55:44.044-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db3.coll3: 0ms sequenceNumber: 19 version: 2|5||559eb59fca4787b9985d1bad based on: 2|3||559eb59fca4787b9985d1bad [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.047-0400 m30999| 2015-07-09T13:55:44.046-0400 I SHARDING [conn1] sharded connection to test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.047-0400 m30999| 2015-07-09T13:55:44.047-0400 I SHARDING [conn1] retrying command: { listIndexes: "coll3" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.047-0400 m31100| 2015-07-09T13:55:44.047-0400 I NETWORK [conn41] end connection 127.0.0.1:62648 (45 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.060-0400 m31100| 2015-07-09T13:55:44.059-0400 I INDEX [conn47] build index on: db3.coll3 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db3.coll3" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.060-0400 m31100| 2015-07-09T13:55:44.059-0400 I INDEX [conn47] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.060-0400 m31200| 2015-07-09T13:55:44.060-0400 I INDEX [conn19] build index on: db3.coll3 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db3.coll3" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.061-0400 m31200| 2015-07-09T13:55:44.060-0400 I INDEX [conn19] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.067-0400 m31100| 2015-07-09T13:55:44.066-0400 I INDEX [conn47] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.069-0400 m31200| 2015-07-09T13:55:44.069-0400 I INDEX [conn19] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.079-0400 m31201| 2015-07-09T13:55:44.078-0400 I INDEX [repl writer worker 5] build index on: db3.coll3 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db3.coll3" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.079-0400 m31102| 2015-07-09T13:55:44.078-0400 I INDEX [repl writer worker 14] build index on: db3.coll3 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db3.coll3" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.079-0400 m31201| 2015-07-09T13:55:44.078-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.080-0400 m31102| 2015-07-09T13:55:44.079-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.082-0400 m31202| 2015-07-09T13:55:44.082-0400 I INDEX [repl writer worker 2] build index on: db3.coll3 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db3.coll3" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.083-0400 m31202| 2015-07-09T13:55:44.082-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.091-0400 m31200| 2015-07-09T13:55:44.090-0400 I INDEX [conn19] build index on: db3.coll3 properties: { v: 1, key: { y: 1.0 }, name: "y_1", ns: "db3.coll3" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.091-0400 m31200| 2015-07-09T13:55:44.090-0400 I INDEX [conn19] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.091-0400 m31101| 2015-07-09T13:55:44.090-0400 I INDEX [repl writer worker 6] build index on: db3.coll3 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db3.coll3" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.092-0400 m31101| 2015-07-09T13:55:44.090-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.095-0400 m31100| 2015-07-09T13:55:44.094-0400 I INDEX [conn47] build index on: db3.coll3 properties: { v: 1, key: { y: 1.0 }, name: "y_1", ns: "db3.coll3" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.095-0400 m31100| 2015-07-09T13:55:44.094-0400 I INDEX [conn47] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.096-0400 m31201| 2015-07-09T13:55:44.096-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.098-0400 m31202| 2015-07-09T13:55:44.098-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.102-0400 m31200| 2015-07-09T13:55:44.102-0400 I INDEX [conn19] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.109-0400 m31101| 2015-07-09T13:55:44.109-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.109-0400 m31102| 2015-07-09T13:55:44.109-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.112-0400 m31202| 2015-07-09T13:55:44.111-0400 I INDEX [repl writer worker 6] build index on: db3.coll3 properties: { v: 1, key: { y: 1.0 }, name: "y_1", ns: "db3.coll3" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.112-0400 m31202| 2015-07-09T13:55:44.111-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.113-0400 m31100| 2015-07-09T13:55:44.112-0400 I INDEX [conn47] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.121-0400 m31201| 2015-07-09T13:55:44.120-0400 I INDEX [repl writer worker 8] build index on: db3.coll3 properties: { v: 1, key: { y: 1.0 }, name: "y_1", ns: "db3.coll3" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.122-0400 m31201| 2015-07-09T13:55:44.121-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.124-0400 m31202| 2015-07-09T13:55:44.124-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.131-0400 m31102| 2015-07-09T13:55:44.130-0400 I INDEX [repl writer worker 4] build index on: db3.coll3 properties: { v: 1, key: { y: 1.0 }, name: "y_1", ns: "db3.coll3" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.131-0400 m31102| 2015-07-09T13:55:44.130-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.131-0400 m31101| 2015-07-09T13:55:44.130-0400 I INDEX [repl writer worker 10] build index on: db3.coll3 properties: { v: 1, key: { y: 1.0 }, name: "y_1", ns: "db3.coll3" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.131-0400 m31101| 2015-07-09T13:55:44.130-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.132-0400 m31200| 2015-07-09T13:55:44.130-0400 I INDEX [conn19] build index on: db3.coll3 properties: { v: 1, key: { z: 1.0 }, name: "z_1", ns: "db3.coll3" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.132-0400 m31200| 2015-07-09T13:55:44.130-0400 I INDEX [conn19] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.135-0400 m31201| 2015-07-09T13:55:44.134-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.135-0400 m31100| 2015-07-09T13:55:44.135-0400 I INDEX [conn47] build index on: db3.coll3 properties: { v: 1, key: { z: 1.0 }, name: "z_1", ns: "db3.coll3" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.136-0400 m31100| 2015-07-09T13:55:44.135-0400 I INDEX [conn47] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.142-0400 m31101| 2015-07-09T13:55:44.142-0400 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.144-0400 m31200| 2015-07-09T13:55:44.144-0400 I INDEX [conn19] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.145-0400 m31102| 2015-07-09T13:55:44.145-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.149-0400 m31100| 2015-07-09T13:55:44.148-0400 I INDEX [conn47] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.151-0400 m31201| 2015-07-09T13:55:44.151-0400 I INDEX [repl writer worker 4] build index on: db3.coll3 properties: { v: 1, key: { z: 1.0 }, name: "z_1", ns: "db3.coll3" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.152-0400 m31201| 2015-07-09T13:55:44.151-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.152-0400 m31202| 2015-07-09T13:55:44.151-0400 I INDEX [repl writer worker 7] build index on: db3.coll3 properties: { v: 1, key: { z: 1.0 }, name: "z_1", ns: "db3.coll3" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.152-0400 m31202| 2015-07-09T13:55:44.151-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.163-0400 m31101| 2015-07-09T13:55:44.163-0400 I INDEX [repl writer worker 9] build index on: db3.coll3 properties: { v: 1, key: { z: 1.0 }, name: "z_1", ns: "db3.coll3" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.164-0400 m31100| 2015-07-09T13:55:44.163-0400 I INDEX [conn47] build index on: db3.coll3 properties: { v: 1, key: { x: 1.0, y: 1.0, z: 1.0 }, name: "x_1_y_1_z_1", ns: "db3.coll3" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.164-0400 m31101| 2015-07-09T13:55:44.163-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.164-0400 m31100| 2015-07-09T13:55:44.163-0400 I INDEX [conn47] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.170-0400 m31102| 2015-07-09T13:55:44.170-0400 I INDEX [repl writer worker 10] build index on: db3.coll3 properties: { v: 1, key: { z: 1.0 }, name: "z_1", ns: "db3.coll3" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.171-0400 m31102| 2015-07-09T13:55:44.170-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.173-0400 m31202| 2015-07-09T13:55:44.172-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.173-0400 m31200| 2015-07-09T13:55:44.172-0400 I INDEX [conn19] build index on: db3.coll3 properties: { v: 1, key: { x: 1.0, y: 1.0, z: 1.0 }, name: "x_1_y_1_z_1", ns: "db3.coll3" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.173-0400 m31200| 2015-07-09T13:55:44.173-0400 I INDEX [conn19] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.173-0400 m31201| 2015-07-09T13:55:44.173-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.174-0400 m31100| 2015-07-09T13:55:44.173-0400 I INDEX [conn47] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.178-0400 m31101| 2015-07-09T13:55:44.178-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.185-0400 m31102| 2015-07-09T13:55:44.185-0400 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.187-0400 m31200| 2015-07-09T13:55:44.186-0400 I INDEX [conn19] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.188-0400 m31101| 2015-07-09T13:55:44.188-0400 I INDEX [repl writer worker 2] build index on: db3.coll3 properties: { v: 1, key: { x: 1.0, y: 1.0, z: 1.0 }, name: "x_1_y_1_z_1", ns: "db3.coll3" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.188-0400 m31101| 2015-07-09T13:55:44.188-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.193-0400 m31102| 2015-07-09T13:55:44.193-0400 I INDEX [repl writer worker 7] build index on: db3.coll3 properties: { v: 1, key: { x: 1.0, y: 1.0, z: 1.0 }, name: "x_1_y_1_z_1", ns: "db3.coll3" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.193-0400 m31102| 2015-07-09T13:55:44.193-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.199-0400 m31201| 2015-07-09T13:55:44.198-0400 I INDEX [repl writer worker 9] build index on: db3.coll3 properties: { v: 1, key: { x: 1.0, y: 1.0, z: 1.0 }, name: "x_1_y_1_z_1", ns: "db3.coll3" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.199-0400 m31201| 2015-07-09T13:55:44.198-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.201-0400 m31202| 2015-07-09T13:55:44.200-0400 I INDEX [repl writer worker 9] build index on: db3.coll3 properties: { v: 1, key: { x: 1.0, y: 1.0, z: 1.0 }, name: "x_1_y_1_z_1", ns: "db3.coll3" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.201-0400 m31202| 2015-07-09T13:55:44.200-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.207-0400 m31101| 2015-07-09T13:55:44.207-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.209-0400 m31201| 2015-07-09T13:55:44.209-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.213-0400 m31102| 2015-07-09T13:55:44.212-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.216-0400 m31202| 2015-07-09T13:55:44.216-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.220-0400 m31100| 2015-07-09T13:55:44.219-0400 I COMMAND [conn15] CMD: dropIndexes db3.coll3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.220-0400 m31200| 2015-07-09T13:55:44.219-0400 I COMMAND [conn18] CMD: dropIndexes db3.coll3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.221-0400 m31100| 2015-07-09T13:55:44.221-0400 I COMMAND [conn15] CMD: dropIndexes db3.coll3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.222-0400 m31201| 2015-07-09T13:55:44.221-0400 I COMMAND [repl writer worker 11] CMD: dropIndexes db3.coll3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.222-0400 m31200| 2015-07-09T13:55:44.221-0400 I COMMAND [conn18] CMD: dropIndexes db3.coll3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.222-0400 m31202| 2015-07-09T13:55:44.221-0400 I COMMAND [repl writer worker 8] CMD: dropIndexes db3.coll3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.222-0400 m31100| 2015-07-09T13:55:44.222-0400 I COMMAND [conn15] CMD: dropIndexes db3.coll3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.222-0400 m31200| 2015-07-09T13:55:44.222-0400 I COMMAND [conn18] CMD: dropIndexes db3.coll3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.223-0400 m31102| 2015-07-09T13:55:44.223-0400 I COMMAND [repl writer worker 8] CMD: dropIndexes db3.coll3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.223-0400 m31101| 2015-07-09T13:55:44.223-0400 I COMMAND [repl writer worker 0] CMD: dropIndexes db3.coll3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.224-0400 m31100| 2015-07-09T13:55:44.223-0400 I COMMAND [conn15] CMD: dropIndexes db3.coll3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.224-0400 m31200| 2015-07-09T13:55:44.223-0400 I COMMAND [conn18] CMD: dropIndexes db3.coll3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.224-0400 m31202| 2015-07-09T13:55:44.224-0400 I COMMAND [repl writer worker 10] CMD: dropIndexes db3.coll3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.224-0400 m31102| 2015-07-09T13:55:44.224-0400 I COMMAND [repl writer worker 5] CMD: dropIndexes db3.coll3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.224-0400 m31201| 2015-07-09T13:55:44.224-0400 I COMMAND [repl writer worker 6] CMD: dropIndexes db3.coll3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.225-0400 Using 10 threads (requested 10) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.249-0400 m31202| 2015-07-09T13:55:44.225-0400 I COMMAND [repl writer worker 13] CMD: dropIndexes db3.coll3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.267-0400 m31201| 2015-07-09T13:55:44.236-0400 I COMMAND [repl writer worker 12] CMD: dropIndexes db3.coll3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.267-0400 m31201| 2015-07-09T13:55:44.246-0400 I COMMAND [repl writer worker 13] CMD: dropIndexes db3.coll3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.278-0400 m31101| 2015-07-09T13:55:44.247-0400 I COMMAND [repl writer worker 5] CMD: dropIndexes db3.coll3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.279-0400 m31202| 2015-07-09T13:55:44.247-0400 I COMMAND [repl writer worker 15] CMD: dropIndexes db3.coll3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.279-0400 m31101| 2015-07-09T13:55:44.269-0400 I COMMAND [repl writer worker 3] CMD: dropIndexes db3.coll3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.279-0400 m31101| 2015-07-09T13:55:44.269-0400 I COMMAND [repl writer worker 11] CMD: dropIndexes db3.coll3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.282-0400 m31102| 2015-07-09T13:55:44.280-0400 I COMMAND [repl writer worker 3] CMD: dropIndexes db3.coll3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.287-0400 m30999| 2015-07-09T13:55:44.287-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62671 #17 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.314-0400 m30998| 2015-07-09T13:55:44.314-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62672 #17 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.322-0400 m30998| 2015-07-09T13:55:44.321-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62673 #18 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.332-0400 m31102| 2015-07-09T13:55:44.325-0400 I COMMAND [repl writer worker 11] CMD: dropIndexes db3.coll3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.343-0400 m30999| 2015-07-09T13:55:44.342-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62674 #18 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.344-0400 m30999| 2015-07-09T13:55:44.343-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62675 #19 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.344-0400 m30999| 2015-07-09T13:55:44.343-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62676 #20 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.344-0400 m30999| 2015-07-09T13:55:44.344-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62677 #21 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.354-0400 m30998| 2015-07-09T13:55:44.354-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62678 #19 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.360-0400 m30998| 2015-07-09T13:55:44.359-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62679 #20 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.365-0400 m30998| 2015-07-09T13:55:44.365-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62680 #21 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.374-0400 setting random seed: 1361816232092 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.375-0400 setting random seed: 9446765375323 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.375-0400 setting random seed: 1785736996680 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.375-0400 setting random seed: 8045423435978 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.376-0400 setting random seed: 3020727774128 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.376-0400 setting random seed: 555452187545 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.377-0400 setting random seed: 9431751454249 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.378-0400 m31101| 2015-07-09T13:55:44.378-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62681 #8 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.379-0400 setting random seed: 2881717183627 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.379-0400 setting random seed: 5484920423477 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.381-0400 setting random seed: 9371723411604 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.384-0400 m30998| 2015-07-09T13:55:44.383-0400 I SHARDING [conn18] ChunkManager: time to load chunks for db3.coll3: 0ms sequenceNumber: 5 version: 2|5||559eb59fca4787b9985d1bad based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.389-0400 m31201| 2015-07-09T13:55:44.389-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62682 #8 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.393-0400 m31200| 2015-07-09T13:55:44.393-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62683 #28 (24 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.413-0400 m31200| 2015-07-09T13:55:44.412-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62684 #29 (25 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.426-0400 m31200| 2015-07-09T13:55:44.425-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62685 #30 (26 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.432-0400 m31100| 2015-07-09T13:55:44.432-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62686 #52 (46 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.433-0400 m31200| 2015-07-09T13:55:44.432-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62687 #31 (27 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.443-0400 m31200| 2015-07-09T13:55:44.442-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62688 #32 (28 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.459-0400 m31200| 2015-07-09T13:55:44.458-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62689 #33 (29 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.475-0400 m30998| 2015-07-09T13:55:44.475-0400 I NETWORK [conn18] end connection 127.0.0.1:62673 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.476-0400 m30999| 2015-07-09T13:55:44.476-0400 I NETWORK [conn20] end connection 127.0.0.1:62676 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.483-0400 m30998| 2015-07-09T13:55:44.482-0400 I NETWORK [conn21] end connection 127.0.0.1:62680 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.493-0400 m30998| 2015-07-09T13:55:44.491-0400 I NETWORK [conn20] end connection 127.0.0.1:62679 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.499-0400 m30998| 2015-07-09T13:55:44.499-0400 I NETWORK [conn17] end connection 127.0.0.1:62672 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.500-0400 m30999| 2015-07-09T13:55:44.499-0400 I NETWORK [conn17] end connection 127.0.0.1:62671 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.513-0400 m30999| 2015-07-09T13:55:44.511-0400 I NETWORK [conn19] end connection 127.0.0.1:62675 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.530-0400 m29000| 2015-07-09T13:55:44.530-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62690 #35 (35 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.533-0400 m30999| 2015-07-09T13:55:44.532-0400 I NETWORK [conn21] end connection 127.0.0.1:62677 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.539-0400 m30999| 2015-07-09T13:55:44.539-0400 I NETWORK [conn18] end connection 127.0.0.1:62674 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.552-0400 m30998| 2015-07-09T13:55:44.551-0400 I NETWORK [conn19] end connection 127.0.0.1:62678 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.572-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.572-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.572-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.572-0400 jstests/concurrency/fsm_workloads/update_multifield_isolated_multiupdate_noindex.js: Workload completed in 348 ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.572-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.573-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.573-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.573-0400 m30999| 2015-07-09T13:55:44.572-0400 I COMMAND [conn1] DROP: db3.coll3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.573-0400 m30999| 2015-07-09T13:55:44.572-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:44.572-0400-559eb5a0ca4787b9985d1baf", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464544572), what: "dropCollection.start", ns: "db3.coll3", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.630-0400 m30999| 2015-07-09T13:55:44.630-0400 I SHARDING [conn1] distributed lock 'db3.coll3/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5a0ca4787b9985d1bb0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.631-0400 m31100| 2015-07-09T13:55:44.631-0400 I COMMAND [conn15] CMD: drop db3.coll3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.634-0400 m31200| 2015-07-09T13:55:44.633-0400 I COMMAND [conn18] CMD: drop db3.coll3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.635-0400 m31101| 2015-07-09T13:55:44.635-0400 I COMMAND [repl writer worker 1] CMD: drop db3.coll3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.635-0400 m31102| 2015-07-09T13:55:44.635-0400 I COMMAND [repl writer worker 6] CMD: drop db3.coll3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.637-0400 m31202| 2015-07-09T13:55:44.637-0400 I COMMAND [repl writer worker 14] CMD: drop db3.coll3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.637-0400 m31201| 2015-07-09T13:55:44.637-0400 I COMMAND [repl writer worker 5] CMD: drop db3.coll3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.689-0400 m31100| 2015-07-09T13:55:44.688-0400 I SHARDING [conn15] remotely refreshing metadata for db3.coll3 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559eb59fca4787b9985d1bad, current metadata version is 2|3||559eb59fca4787b9985d1bad [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.691-0400 m31100| 2015-07-09T13:55:44.690-0400 W SHARDING [conn15] no chunks found when reloading db3.coll3, previous version was 0|0||559eb59fca4787b9985d1bad, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.691-0400 m31100| 2015-07-09T13:55:44.690-0400 I SHARDING [conn15] dropping metadata for db3.coll3 at shard version 2|3||559eb59fca4787b9985d1bad, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.692-0400 m31200| 2015-07-09T13:55:44.692-0400 I SHARDING [conn18] remotely refreshing metadata for db3.coll3 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559eb59fca4787b9985d1bad, current metadata version is 2|5||559eb59fca4787b9985d1bad [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.694-0400 m31200| 2015-07-09T13:55:44.693-0400 W SHARDING [conn18] no chunks found when reloading db3.coll3, previous version was 0|0||559eb59fca4787b9985d1bad, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.694-0400 m31200| 2015-07-09T13:55:44.693-0400 I SHARDING [conn18] dropping metadata for db3.coll3 at shard version 2|5||559eb59fca4787b9985d1bad, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.695-0400 m30999| 2015-07-09T13:55:44.694-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:44.694-0400-559eb5a0ca4787b9985d1bb1", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464544694), what: "dropCollection", ns: "db3.coll3", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.749-0400 m30999| 2015-07-09T13:55:44.748-0400 I SHARDING [conn1] distributed lock 'db3.coll3/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.804-0400 m30999| 2015-07-09T13:55:44.804-0400 I COMMAND [conn1] DROP DATABASE: db3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.804-0400 m30999| 2015-07-09T13:55:44.804-0400 I SHARDING [conn1] DBConfig::dropDatabase: db3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.805-0400 m30999| 2015-07-09T13:55:44.804-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:44.804-0400-559eb5a0ca4787b9985d1bb2", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464544804), what: "dropDatabase.start", ns: "db3", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.910-0400 m30999| 2015-07-09T13:55:44.909-0400 I SHARDING [conn1] DBConfig::dropDatabase: db3 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.910-0400 m31100| 2015-07-09T13:55:44.910-0400 I COMMAND [conn28] dropDatabase db3 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.910-0400 m31100| 2015-07-09T13:55:44.910-0400 I COMMAND [conn28] dropDatabase db3 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.911-0400 m30999| 2015-07-09T13:55:44.910-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:44.910-0400-559eb5a0ca4787b9985d1bb3", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464544910), what: "dropDatabase", ns: "db3", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.911-0400 m31102| 2015-07-09T13:55:44.911-0400 I COMMAND [repl writer worker 13] dropDatabase db3 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.911-0400 m31102| 2015-07-09T13:55:44.911-0400 I COMMAND [repl writer worker 13] dropDatabase db3 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.912-0400 m31101| 2015-07-09T13:55:44.912-0400 I COMMAND [repl writer worker 13] dropDatabase db3 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.912-0400 m31101| 2015-07-09T13:55:44.912-0400 I COMMAND [repl writer worker 13] dropDatabase db3 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.995-0400 m31100| 2015-07-09T13:55:44.995-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.998-0400 m31101| 2015-07-09T13:55:44.998-0400 I COMMAND [repl writer worker 8] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:44.999-0400 m31102| 2015-07-09T13:55:44.998-0400 I COMMAND [repl writer worker 2] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.033-0400 m31200| 2015-07-09T13:55:45.033-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.035-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.036-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.036-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.036-0400 jstests/concurrency/fsm_workloads/update_rename.js [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.036-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.036-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.036-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.037-0400 m31202| 2015-07-09T13:55:45.037-0400 I COMMAND [repl writer worker 10] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.037-0400 m31201| 2015-07-09T13:55:45.037-0400 I COMMAND [repl writer worker 3] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.043-0400 m30999| 2015-07-09T13:55:45.043-0400 I SHARDING [conn1] distributed lock 'db4/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5a1ca4787b9985d1bb4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.047-0400 m30999| 2015-07-09T13:55:45.047-0400 I SHARDING [conn1] Placing [db4] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.047-0400 m30999| 2015-07-09T13:55:45.047-0400 I SHARDING [conn1] Enabling sharding for database [db4] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.101-0400 m30999| 2015-07-09T13:55:45.100-0400 I SHARDING [conn1] distributed lock 'db4/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.124-0400 m31100| 2015-07-09T13:55:45.124-0400 I INDEX [conn30] build index on: db4.coll4 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db4.coll4" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.125-0400 m31100| 2015-07-09T13:55:45.124-0400 I INDEX [conn30] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.137-0400 m31100| 2015-07-09T13:55:45.137-0400 I INDEX [conn30] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.139-0400 m30999| 2015-07-09T13:55:45.138-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db4.coll4", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.142-0400 m30999| 2015-07-09T13:55:45.141-0400 I SHARDING [conn1] distributed lock 'db4.coll4/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5a1ca4787b9985d1bb5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.143-0400 m30999| 2015-07-09T13:55:45.142-0400 I SHARDING [conn1] enable sharding on: db4.coll4 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.143-0400 m30999| 2015-07-09T13:55:45.142-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:45.142-0400-559eb5a1ca4787b9985d1bb6", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464545142), what: "shardCollection.start", ns: "db4.coll4", details: { shardKey: { _id: "hashed" }, collection: "db4.coll4", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.150-0400 m31102| 2015-07-09T13:55:45.149-0400 I INDEX [repl writer worker 1] build index on: db4.coll4 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db4.coll4" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.150-0400 m31102| 2015-07-09T13:55:45.149-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.151-0400 m31101| 2015-07-09T13:55:45.149-0400 I INDEX [repl writer worker 14] build index on: db4.coll4 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db4.coll4" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.151-0400 m31101| 2015-07-09T13:55:45.150-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.157-0400 m31101| 2015-07-09T13:55:45.157-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.159-0400 m31102| 2015-07-09T13:55:45.158-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.196-0400 m30999| 2015-07-09T13:55:45.195-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db4.coll4 using new epoch 559eb5a1ca4787b9985d1bb7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.303-0400 m30999| 2015-07-09T13:55:45.302-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db4.coll4: 0ms sequenceNumber: 20 version: 1|1||559eb5a1ca4787b9985d1bb7 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.358-0400 m30999| 2015-07-09T13:55:45.358-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db4.coll4: 0ms sequenceNumber: 21 version: 1|1||559eb5a1ca4787b9985d1bb7 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.361-0400 m31100| 2015-07-09T13:55:45.360-0400 I SHARDING [conn52] remotely refreshing metadata for db4.coll4 with requested shard version 1|1||559eb5a1ca4787b9985d1bb7, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.362-0400 m31100| 2015-07-09T13:55:45.361-0400 I SHARDING [conn52] collection db4.coll4 was previously unsharded, new metadata loaded with shard version 1|1||559eb5a1ca4787b9985d1bb7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.362-0400 m31100| 2015-07-09T13:55:45.362-0400 I SHARDING [conn52] collection version was loaded at version 1|1||559eb5a1ca4787b9985d1bb7, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.363-0400 m30999| 2015-07-09T13:55:45.362-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:45.362-0400-559eb5a1ca4787b9985d1bb8", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464545362), what: "shardCollection", ns: "db4.coll4", details: { version: "1|1||559eb5a1ca4787b9985d1bb7" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.417-0400 m30999| 2015-07-09T13:55:45.416-0400 I SHARDING [conn1] distributed lock 'db4.coll4/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.418-0400 m30999| 2015-07-09T13:55:45.417-0400 I SHARDING [conn1] moving chunk ns: db4.coll4 moving ( ns: db4.coll4, shard: test-rs0, lastmod: 1|1||000000000000000000000000, min: { _id: 0 }, max: { _id: MaxKey }) test-rs0 -> test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.418-0400 m31100| 2015-07-09T13:55:45.418-0400 I SHARDING [conn15] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.419-0400 m31100| 2015-07-09T13:55:45.419-0400 I SHARDING [conn15] received moveChunk request: { moveChunk: "db4.coll4", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb5a1ca4787b9985d1bb7') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.423-0400 m31100| 2015-07-09T13:55:45.423-0400 I SHARDING [conn15] distributed lock 'db4.coll4/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb5a1792e00bb672748ee [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.423-0400 m31100| 2015-07-09T13:55:45.423-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:45.423-0400-559eb5a1792e00bb672748ef", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464545423), what: "moveChunk.start", ns: "db4.coll4", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.481-0400 m31100| 2015-07-09T13:55:45.481-0400 I SHARDING [conn15] remotely refreshing metadata for db4.coll4 based on current shard version 1|1||559eb5a1ca4787b9985d1bb7, current metadata version is 1|1||559eb5a1ca4787b9985d1bb7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.483-0400 m31100| 2015-07-09T13:55:45.482-0400 I SHARDING [conn15] metadata of collection db4.coll4 already up to date (shard version : 1|1||559eb5a1ca4787b9985d1bb7, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.483-0400 m31100| 2015-07-09T13:55:45.482-0400 I SHARDING [conn15] moveChunk request accepted at version 1|1||559eb5a1ca4787b9985d1bb7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.485-0400 m31100| 2015-07-09T13:55:45.485-0400 I SHARDING [conn15] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.486-0400 m31200| 2015-07-09T13:55:45.485-0400 I SHARDING [conn16] remotely refreshing metadata for db4.coll4, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.487-0400 m31200| 2015-07-09T13:55:45.487-0400 I SHARDING [conn16] collection db4.coll4 was previously unsharded, new metadata loaded with shard version 0|0||559eb5a1ca4787b9985d1bb7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.487-0400 m31200| 2015-07-09T13:55:45.487-0400 I SHARDING [conn16] collection version was loaded at version 1|1||559eb5a1ca4787b9985d1bb7, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.488-0400 m31200| 2015-07-09T13:55:45.487-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: 0 } -> { _id: MaxKey } for collection db4.coll4 from test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 at epoch 559eb5a1ca4787b9985d1bb7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.490-0400 m31100| 2015-07-09T13:55:45.489-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db4.coll4", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.494-0400 m31100| 2015-07-09T13:55:45.493-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db4.coll4", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.499-0400 m31100| 2015-07-09T13:55:45.498-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db4.coll4", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.506-0400 m31200| 2015-07-09T13:55:45.506-0400 I INDEX [migrateThread] build index on: db4.coll4 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db4.coll4" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.507-0400 m31200| 2015-07-09T13:55:45.506-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.509-0400 m31100| 2015-07-09T13:55:45.508-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db4.coll4", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.512-0400 m31200| 2015-07-09T13:55:45.512-0400 I INDEX [migrateThread] build index on: db4.coll4 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db4.coll4" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.513-0400 m31200| 2015-07-09T13:55:45.512-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.521-0400 m31200| 2015-07-09T13:55:45.521-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.521-0400 m31200| 2015-07-09T13:55:45.521-0400 I SHARDING [migrateThread] Deleter starting delete for: db4.coll4 from { _id: 0 } -> { _id: MaxKey }, with opId: 1009 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.522-0400 m31200| 2015-07-09T13:55:45.521-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db4.coll4 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.527-0400 m31100| 2015-07-09T13:55:45.525-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db4.coll4", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.527-0400 m31202| 2015-07-09T13:55:45.526-0400 I INDEX [repl writer worker 15] build index on: db4.coll4 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db4.coll4" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.527-0400 m31202| 2015-07-09T13:55:45.526-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.531-0400 m31202| 2015-07-09T13:55:45.531-0400 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.531-0400 m31201| 2015-07-09T13:55:45.531-0400 I INDEX [repl writer worker 11] build index on: db4.coll4 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db4.coll4" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.532-0400 m31201| 2015-07-09T13:55:45.531-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.533-0400 m31200| 2015-07-09T13:55:45.532-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.533-0400 m31200| 2015-07-09T13:55:45.533-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db4.coll4' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.539-0400 m31201| 2015-07-09T13:55:45.538-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.559-0400 m31100| 2015-07-09T13:55:45.558-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db4.coll4", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.559-0400 m31100| 2015-07-09T13:55:45.559-0400 I SHARDING [conn15] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.560-0400 m31100| 2015-07-09T13:55:45.560-0400 I SHARDING [conn15] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.560-0400 m31100| 2015-07-09T13:55:45.560-0400 I SHARDING [conn15] moveChunk setting version to: 2|0||559eb5a1ca4787b9985d1bb7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.567-0400 m31200| 2015-07-09T13:55:45.567-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db4.coll4' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.568-0400 m31200| 2015-07-09T13:55:45.567-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:45.567-0400-559eb5a1d5a107a5b9c0da8c", server: "bs-osx108-8", clientAddr: "", time: new Date(1436464545567), what: "moveChunk.to", ns: "db4.coll4", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 5: 33, step 2 of 5: 10, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 34, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.622-0400 m31100| 2015-07-09T13:55:45.621-0400 I SHARDING [conn15] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db4.coll4", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.622-0400 m31100| 2015-07-09T13:55:45.621-0400 I SHARDING [conn15] moveChunk updating self version to: 2|1||559eb5a1ca4787b9985d1bb7 through { _id: MinKey } -> { _id: 0 } for collection 'db4.coll4' [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.623-0400 m31100| 2015-07-09T13:55:45.622-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:45.622-0400-559eb5a1792e00bb672748f0", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464545622), what: "moveChunk.commit", ns: "db4.coll4", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.676-0400 m31100| 2015-07-09T13:55:45.676-0400 I SHARDING [conn15] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.677-0400 m31100| 2015-07-09T13:55:45.676-0400 I SHARDING [conn15] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.677-0400 m31100| 2015-07-09T13:55:45.676-0400 I SHARDING [conn15] Deleter starting delete for: db4.coll4 from { _id: 0 } -> { _id: MaxKey }, with opId: 2521 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.677-0400 m31100| 2015-07-09T13:55:45.676-0400 I SHARDING [conn15] rangeDeleter deleted 0 documents for db4.coll4 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.677-0400 m31100| 2015-07-09T13:55:45.676-0400 I SHARDING [conn15] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.678-0400 m31100| 2015-07-09T13:55:45.677-0400 I SHARDING [conn15] distributed lock 'db4.coll4/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.678-0400 m31100| 2015-07-09T13:55:45.678-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:45.678-0400-559eb5a1792e00bb672748f1", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464545678), what: "moveChunk.from", ns: "db4.coll4", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 6: 0, step 2 of 6: 63, step 3 of 6: 5, step 4 of 6: 71, step 5 of 6: 117, step 6 of 6: 0, to: "test-rs1", from: "test-rs0", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.732-0400 m31100| 2015-07-09T13:55:45.731-0400 I COMMAND [conn15] command db4.coll4 command: moveChunk { moveChunk: "db4.coll4", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb5a1ca4787b9985d1bb7') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 313ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.734-0400 m30999| 2015-07-09T13:55:45.734-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db4.coll4: 1ms sequenceNumber: 22 version: 2|1||559eb5a1ca4787b9985d1bb7 based on: 1|1||559eb5a1ca4787b9985d1bb7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.735-0400 m31100| 2015-07-09T13:55:45.735-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db4.coll4", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5a1ca4787b9985d1bb7') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.739-0400 m31100| 2015-07-09T13:55:45.739-0400 I SHARDING [conn15] distributed lock 'db4.coll4/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb5a1792e00bb672748f2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.739-0400 m31100| 2015-07-09T13:55:45.739-0400 I SHARDING [conn15] remotely refreshing metadata for db4.coll4 based on current shard version 2|0||559eb5a1ca4787b9985d1bb7, current metadata version is 2|0||559eb5a1ca4787b9985d1bb7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.740-0400 m31100| 2015-07-09T13:55:45.740-0400 I SHARDING [conn15] updating metadata for db4.coll4 from shard version 2|0||559eb5a1ca4787b9985d1bb7 to shard version 2|1||559eb5a1ca4787b9985d1bb7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.740-0400 m31100| 2015-07-09T13:55:45.740-0400 I SHARDING [conn15] collection version was loaded at version 2|1||559eb5a1ca4787b9985d1bb7, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.741-0400 m31100| 2015-07-09T13:55:45.740-0400 I SHARDING [conn15] splitChunk accepted at version 2|1||559eb5a1ca4787b9985d1bb7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.742-0400 m31100| 2015-07-09T13:55:45.741-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:45.741-0400-559eb5a1792e00bb672748f3", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464545741), what: "split", ns: "db4.coll4", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559eb5a1ca4787b9985d1bb7') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559eb5a1ca4787b9985d1bb7') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.796-0400 m31100| 2015-07-09T13:55:45.796-0400 I SHARDING [conn15] distributed lock 'db4.coll4/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.798-0400 m30999| 2015-07-09T13:55:45.798-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db4.coll4: 0ms sequenceNumber: 23 version: 2|3||559eb5a1ca4787b9985d1bb7 based on: 2|1||559eb5a1ca4787b9985d1bb7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.799-0400 m31200| 2015-07-09T13:55:45.798-0400 I SHARDING [conn18] received splitChunk request: { splitChunk: "db4.coll4", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5a1ca4787b9985d1bb7') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.803-0400 m31200| 2015-07-09T13:55:45.802-0400 I SHARDING [conn18] distributed lock 'db4.coll4/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb5a1d5a107a5b9c0da8d [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.803-0400 m31200| 2015-07-09T13:55:45.802-0400 I SHARDING [conn18] remotely refreshing metadata for db4.coll4 based on current shard version 0|0||559eb5a1ca4787b9985d1bb7, current metadata version is 1|1||559eb5a1ca4787b9985d1bb7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.804-0400 m31200| 2015-07-09T13:55:45.804-0400 I SHARDING [conn18] updating metadata for db4.coll4 from shard version 0|0||559eb5a1ca4787b9985d1bb7 to shard version 2|0||559eb5a1ca4787b9985d1bb7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.805-0400 m31200| 2015-07-09T13:55:45.804-0400 I SHARDING [conn18] collection version was loaded at version 2|3||559eb5a1ca4787b9985d1bb7, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.805-0400 m31200| 2015-07-09T13:55:45.804-0400 I SHARDING [conn18] splitChunk accepted at version 2|0||559eb5a1ca4787b9985d1bb7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.806-0400 m31200| 2015-07-09T13:55:45.805-0400 I SHARDING [conn18] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:45.805-0400-559eb5a1d5a107a5b9c0da8e", server: "bs-osx108-8", clientAddr: "127.0.0.1:62585", time: new Date(1436464545805), what: "split", ns: "db4.coll4", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559eb5a1ca4787b9985d1bb7') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559eb5a1ca4787b9985d1bb7') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.859-0400 m31200| 2015-07-09T13:55:45.859-0400 I SHARDING [conn18] distributed lock 'db4.coll4/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.862-0400 m30999| 2015-07-09T13:55:45.861-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db4.coll4: 0ms sequenceNumber: 24 version: 2|5||559eb5a1ca4787b9985d1bb7 based on: 2|3||559eb5a1ca4787b9985d1bb7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.871-0400 m31100| 2015-07-09T13:55:45.870-0400 I INDEX [conn52] build index on: db4.coll4 properties: { v: 1, key: { update_rename_y: 1.0 }, name: "update_rename_y_1", ns: "db4.coll4" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.872-0400 m31100| 2015-07-09T13:55:45.871-0400 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.872-0400 m31200| 2015-07-09T13:55:45.871-0400 I INDEX [conn31] build index on: db4.coll4 properties: { v: 1, key: { update_rename_y: 1.0 }, name: "update_rename_y_1", ns: "db4.coll4" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.872-0400 m31200| 2015-07-09T13:55:45.872-0400 I INDEX [conn31] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.877-0400 m31100| 2015-07-09T13:55:45.877-0400 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.880-0400 m31200| 2015-07-09T13:55:45.879-0400 I INDEX [conn31] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.885-0400 m31101| 2015-07-09T13:55:45.884-0400 I INDEX [repl writer worker 12] build index on: db4.coll4 properties: { v: 1, key: { update_rename_y: 1.0 }, name: "update_rename_y_1", ns: "db4.coll4" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.885-0400 m31101| 2015-07-09T13:55:45.884-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.891-0400 m31201| 2015-07-09T13:55:45.890-0400 I INDEX [repl writer worker 6] build index on: db4.coll4 properties: { v: 1, key: { update_rename_y: 1.0 }, name: "update_rename_y_1", ns: "db4.coll4" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.891-0400 m31201| 2015-07-09T13:55:45.890-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.892-0400 m31202| 2015-07-09T13:55:45.890-0400 I INDEX [repl writer worker 0] build index on: db4.coll4 properties: { v: 1, key: { update_rename_y: 1.0 }, name: "update_rename_y_1", ns: "db4.coll4" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.892-0400 m31202| 2015-07-09T13:55:45.890-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.893-0400 m31200| 2015-07-09T13:55:45.891-0400 I INDEX [conn31] build index on: db4.coll4 properties: { v: 1, key: { update_rename_z: 1.0 }, name: "update_rename_z_1", ns: "db4.coll4" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.893-0400 m31200| 2015-07-09T13:55:45.891-0400 I INDEX [conn31] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.899-0400 m31101| 2015-07-09T13:55:45.899-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.900-0400 m31102| 2015-07-09T13:55:45.899-0400 I INDEX [repl writer worker 15] build index on: db4.coll4 properties: { v: 1, key: { update_rename_y: 1.0 }, name: "update_rename_y_1", ns: "db4.coll4" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.900-0400 m31102| 2015-07-09T13:55:45.899-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.900-0400 m31201| 2015-07-09T13:55:45.899-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.902-0400 m31100| 2015-07-09T13:55:45.901-0400 I INDEX [conn52] build index on: db4.coll4 properties: { v: 1, key: { update_rename_z: 1.0 }, name: "update_rename_z_1", ns: "db4.coll4" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.902-0400 m31100| 2015-07-09T13:55:45.902-0400 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.902-0400 m31202| 2015-07-09T13:55:45.901-0400 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.906-0400 m31200| 2015-07-09T13:55:45.906-0400 I INDEX [conn31] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.908-0400 m31100| 2015-07-09T13:55:45.908-0400 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.911-0400 m31102| 2015-07-09T13:55:45.910-0400 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.914-0400 m31202| 2015-07-09T13:55:45.914-0400 I INDEX [repl writer worker 4] build index on: db4.coll4 properties: { v: 1, key: { update_rename_z: 1.0 }, name: "update_rename_z_1", ns: "db4.coll4" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.915-0400 m31202| 2015-07-09T13:55:45.914-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.920-0400 m31201| 2015-07-09T13:55:45.919-0400 I INDEX [repl writer worker 13] build index on: db4.coll4 properties: { v: 1, key: { update_rename_z: 1.0 }, name: "update_rename_z_1", ns: "db4.coll4" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.920-0400 m31201| 2015-07-09T13:55:45.919-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.923-0400 m31102| 2015-07-09T13:55:45.922-0400 I INDEX [repl writer worker 14] build index on: db4.coll4 properties: { v: 1, key: { update_rename_z: 1.0 }, name: "update_rename_z_1", ns: "db4.coll4" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.923-0400 m31102| 2015-07-09T13:55:45.922-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.923-0400 Using 20 threads (requested 20) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.963-0400 m31201| 2015-07-09T13:55:45.951-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.976-0400 m31101| 2015-07-09T13:55:45.953-0400 I INDEX [repl writer worker 6] build index on: db4.coll4 properties: { v: 1, key: { update_rename_z: 1.0 }, name: "update_rename_z_1", ns: "db4.coll4" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.976-0400 m31101| 2015-07-09T13:55:45.953-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:45.998-0400 m31202| 2015-07-09T13:55:45.988-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:46.022-0400 m31102| 2015-07-09T13:55:46.010-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:46.080-0400 m30999| 2015-07-09T13:55:46.064-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62692 #22 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:46.080-0400 m30998| 2015-07-09T13:55:46.071-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62694 #22 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:46.080-0400 m30999| 2015-07-09T13:55:46.074-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62691 #23 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:46.085-0400 m30999| 2015-07-09T13:55:46.084-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62693 #24 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:46.095-0400 m31101| 2015-07-09T13:55:46.093-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:46.095-0400 m30999| 2015-07-09T13:55:46.094-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62695 #25 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:46.101-0400 m30998| 2015-07-09T13:55:46.097-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62696 #23 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:46.144-0400 m30999| 2015-07-09T13:55:46.144-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62697 #26 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:46.151-0400 m30999| 2015-07-09T13:55:46.151-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62698 #27 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:46.161-0400 m30999| 2015-07-09T13:55:46.161-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62699 #28 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:46.162-0400 m30998| 2015-07-09T13:55:46.161-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62701 #24 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:46.166-0400 m30998| 2015-07-09T13:55:46.165-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62702 #25 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:46.172-0400 m30999| 2015-07-09T13:55:46.171-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62700 #29 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:46.172-0400 m30999| 2015-07-09T13:55:46.172-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62703 #30 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:46.182-0400 m30999| 2015-07-09T13:55:46.175-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62704 #31 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:46.183-0400 m30998| 2015-07-09T13:55:46.182-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62705 #26 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:46.183-0400 m30998| 2015-07-09T13:55:46.182-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62706 #27 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:46.188-0400 m30998| 2015-07-09T13:55:46.186-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62707 #28 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:46.188-0400 m30998| 2015-07-09T13:55:46.187-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62708 #29 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:46.188-0400 m30998| 2015-07-09T13:55:46.188-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62709 #30 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:46.188-0400 m30998| 2015-07-09T13:55:46.188-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62710 #31 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:46.194-0400 setting random seed: 5184155893512 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:46.196-0400 setting random seed: 5057600317522 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:46.196-0400 setting random seed: 9762619337998 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:46.196-0400 setting random seed: 6907240776345 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:46.200-0400 setting random seed: 9109608144499 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:46.200-0400 setting random seed: 1775750652886 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:46.200-0400 setting random seed: 343290143646 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:46.201-0400 setting random seed: 9677446945570 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:46.453-0400 m30998| 2015-07-09T13:55:46.202-0400 I SHARDING [conn22] ChunkManager: time to load chunks for db4.coll4: 0ms sequenceNumber: 6 version: 2|5||559eb5a1ca4787b9985d1bb7 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:46.453-0400 setting random seed: 108142830431 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:46.453-0400 setting random seed: 5041236965917 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:46.453-0400 setting random seed: 8069725618697 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:46.453-0400 setting random seed: 4564786534756 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:46.453-0400 setting random seed: 2967916685156 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:46.453-0400 setting random seed: 7556551904417 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:46.453-0400 setting random seed: 3088576188310 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:46.454-0400 setting random seed: 7878536605276 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:46.454-0400 setting random seed: 9700021110475 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:46.454-0400 setting random seed: 8850889937020 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:46.454-0400 setting random seed: 8254879666492 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:46.454-0400 m29000| 2015-07-09T13:55:46.247-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62712 #36 (36 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:46.454-0400 m29000| 2015-07-09T13:55:46.257-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62713 #37 (37 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:46.454-0400 setting random seed: 9995782086625 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:46.454-0400 m29000| 2015-07-09T13:55:46.265-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62714 #38 (38 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:46.454-0400 m29000| 2015-07-09T13:55:46.309-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62715 #39 (39 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:46.718-0400 m31102| 2015-07-09T13:55:46.717-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62716 #10 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:46.720-0400 m31100| 2015-07-09T13:55:46.720-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62717 #53 (47 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:46.723-0400 m31101| 2015-07-09T13:55:46.723-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62718 #9 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:46.726-0400 m31201| 2015-07-09T13:55:46.725-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62719 #9 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:46.857-0400 m31101| 2015-07-09T13:55:46.856-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62720 #10 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.301-0400 m30999| 2015-07-09T13:55:52.301-0400 I NETWORK [conn24] end connection 127.0.0.1:62693 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.302-0400 m30998| 2015-07-09T13:55:52.302-0400 I NETWORK [conn22] end connection 127.0.0.1:62694 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.304-0400 m30999| 2015-07-09T13:55:52.303-0400 I NETWORK [conn23] end connection 127.0.0.1:62691 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.324-0400 m30999| 2015-07-09T13:55:52.323-0400 I NETWORK [conn22] end connection 127.0.0.1:62692 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.335-0400 m30998| 2015-07-09T13:55:52.331-0400 I NETWORK [conn23] end connection 127.0.0.1:62696 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.336-0400 m30999| 2015-07-09T13:55:52.335-0400 I NETWORK [conn26] end connection 127.0.0.1:62697 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.336-0400 m30999| 2015-07-09T13:55:52.335-0400 I NETWORK [conn29] end connection 127.0.0.1:62700 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.348-0400 m30999| 2015-07-09T13:55:52.347-0400 I NETWORK [conn27] end connection 127.0.0.1:62698 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.356-0400 m30999| 2015-07-09T13:55:52.356-0400 I NETWORK [conn28] end connection 127.0.0.1:62699 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.356-0400 m30998| 2015-07-09T13:55:52.356-0400 I NETWORK [conn24] end connection 127.0.0.1:62701 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.368-0400 m30999| 2015-07-09T13:55:52.368-0400 I NETWORK [conn31] end connection 127.0.0.1:62704 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.369-0400 m30999| 2015-07-09T13:55:52.368-0400 I NETWORK [conn30] end connection 127.0.0.1:62703 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.382-0400 m30999| 2015-07-09T13:55:52.374-0400 I NETWORK [conn25] end connection 127.0.0.1:62695 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.382-0400 m30998| 2015-07-09T13:55:52.373-0400 I NETWORK [conn27] end connection 127.0.0.1:62706 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.382-0400 m30998| 2015-07-09T13:55:52.382-0400 I NETWORK [conn25] end connection 127.0.0.1:62702 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.384-0400 m30998| 2015-07-09T13:55:52.384-0400 I NETWORK [conn30] end connection 127.0.0.1:62709 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.396-0400 m30998| 2015-07-09T13:55:52.395-0400 I NETWORK [conn26] end connection 127.0.0.1:62705 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.400-0400 m30998| 2015-07-09T13:55:52.397-0400 I NETWORK [conn29] end connection 127.0.0.1:62708 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.401-0400 m30998| 2015-07-09T13:55:52.401-0400 I NETWORK [conn28] end connection 127.0.0.1:62707 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.408-0400 m30998| 2015-07-09T13:55:52.408-0400 I NETWORK [conn31] end connection 127.0.0.1:62710 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.434-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.434-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.435-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.435-0400 jstests/concurrency/fsm_workloads/update_rename.js: Workload completed in 6511 ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.435-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.435-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.435-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.435-0400 m30999| 2015-07-09T13:55:52.435-0400 I COMMAND [conn1] DROP: db4.coll4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.435-0400 m30999| 2015-07-09T13:55:52.435-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:52.435-0400-559eb5a8ca4787b9985d1bb9", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464552435), what: "dropCollection.start", ns: "db4.coll4", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.493-0400 m30999| 2015-07-09T13:55:52.492-0400 I SHARDING [conn1] distributed lock 'db4.coll4/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5a8ca4787b9985d1bba [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.494-0400 m31100| 2015-07-09T13:55:52.493-0400 I COMMAND [conn15] CMD: drop db4.coll4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.496-0400 m31200| 2015-07-09T13:55:52.495-0400 I COMMAND [conn18] CMD: drop db4.coll4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.497-0400 m31102| 2015-07-09T13:55:52.497-0400 I COMMAND [repl writer worker 8] CMD: drop db4.coll4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.497-0400 m31101| 2015-07-09T13:55:52.497-0400 I COMMAND [repl writer worker 0] CMD: drop db4.coll4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.499-0400 m31202| 2015-07-09T13:55:52.498-0400 I COMMAND [repl writer worker 5] CMD: drop db4.coll4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.499-0400 m31201| 2015-07-09T13:55:52.499-0400 I COMMAND [repl writer worker 2] CMD: drop db4.coll4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.550-0400 m31100| 2015-07-09T13:55:52.550-0400 I SHARDING [conn15] remotely refreshing metadata for db4.coll4 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559eb5a1ca4787b9985d1bb7, current metadata version is 2|3||559eb5a1ca4787b9985d1bb7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.551-0400 m31100| 2015-07-09T13:55:52.550-0400 W SHARDING [conn15] no chunks found when reloading db4.coll4, previous version was 0|0||559eb5a1ca4787b9985d1bb7, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.551-0400 m31100| 2015-07-09T13:55:52.550-0400 I SHARDING [conn15] dropping metadata for db4.coll4 at shard version 2|3||559eb5a1ca4787b9985d1bb7, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.551-0400 m31200| 2015-07-09T13:55:52.551-0400 I SHARDING [conn18] remotely refreshing metadata for db4.coll4 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559eb5a1ca4787b9985d1bb7, current metadata version is 2|5||559eb5a1ca4787b9985d1bb7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.551-0400 m31200| 2015-07-09T13:55:52.551-0400 W SHARDING [conn18] no chunks found when reloading db4.coll4, previous version was 0|0||559eb5a1ca4787b9985d1bb7, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.552-0400 m31200| 2015-07-09T13:55:52.551-0400 I SHARDING [conn18] dropping metadata for db4.coll4 at shard version 2|5||559eb5a1ca4787b9985d1bb7, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.552-0400 m30999| 2015-07-09T13:55:52.552-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:52.552-0400-559eb5a8ca4787b9985d1bbb", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464552552), what: "dropCollection", ns: "db4.coll4", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.606-0400 m30999| 2015-07-09T13:55:52.606-0400 I SHARDING [conn1] distributed lock 'db4.coll4/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.662-0400 m30999| 2015-07-09T13:55:52.661-0400 I COMMAND [conn1] DROP DATABASE: db4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.662-0400 m30999| 2015-07-09T13:55:52.661-0400 I SHARDING [conn1] DBConfig::dropDatabase: db4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.662-0400 m30999| 2015-07-09T13:55:52.661-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:52.661-0400-559eb5a8ca4787b9985d1bbc", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464552661), what: "dropDatabase.start", ns: "db4", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.768-0400 m30999| 2015-07-09T13:55:52.767-0400 I SHARDING [conn1] DBConfig::dropDatabase: db4 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.768-0400 m31100| 2015-07-09T13:55:52.768-0400 I COMMAND [conn28] dropDatabase db4 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.769-0400 m31100| 2015-07-09T13:55:52.768-0400 I COMMAND [conn28] dropDatabase db4 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.769-0400 m30999| 2015-07-09T13:55:52.769-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:52.769-0400-559eb5a8ca4787b9985d1bbd", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464552769), what: "dropDatabase", ns: "db4", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.769-0400 m31101| 2015-07-09T13:55:52.769-0400 I COMMAND [repl writer worker 5] dropDatabase db4 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.769-0400 m31101| 2015-07-09T13:55:52.769-0400 I COMMAND [repl writer worker 5] dropDatabase db4 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.770-0400 m31102| 2015-07-09T13:55:52.769-0400 I COMMAND [repl writer worker 5] dropDatabase db4 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.770-0400 m31102| 2015-07-09T13:55:52.769-0400 I COMMAND [repl writer worker 5] dropDatabase db4 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.864-0400 m31100| 2015-07-09T13:55:52.863-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.867-0400 m31101| 2015-07-09T13:55:52.867-0400 I COMMAND [repl writer worker 1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.867-0400 m31102| 2015-07-09T13:55:52.867-0400 I COMMAND [repl writer worker 6] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.894-0400 m31200| 2015-07-09T13:55:52.894-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.896-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.897-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.897-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.897-0400 jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname_noindex.js [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.897-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.897-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.897-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.898-0400 m31201| 2015-07-09T13:55:52.897-0400 I COMMAND [repl writer worker 1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.898-0400 m31202| 2015-07-09T13:55:52.897-0400 I COMMAND [repl writer worker 12] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.904-0400 m30999| 2015-07-09T13:55:52.904-0400 I SHARDING [conn1] distributed lock 'db5/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5a8ca4787b9985d1bbe [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.908-0400 m30999| 2015-07-09T13:55:52.908-0400 I SHARDING [conn1] Placing [db5] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.908-0400 m30999| 2015-07-09T13:55:52.908-0400 I SHARDING [conn1] Enabling sharding for database [db5] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.963-0400 m30999| 2015-07-09T13:55:52.962-0400 I SHARDING [conn1] distributed lock 'db5/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.993-0400 m31100| 2015-07-09T13:55:52.993-0400 I INDEX [conn30] build index on: db5.coll5 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db5.coll5" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:52.994-0400 m31100| 2015-07-09T13:55:52.993-0400 I INDEX [conn30] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.000-0400 m31100| 2015-07-09T13:55:53.000-0400 I INDEX [conn30] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.001-0400 m30999| 2015-07-09T13:55:53.000-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db5.coll5", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.003-0400 m30999| 2015-07-09T13:55:53.003-0400 I SHARDING [conn1] distributed lock 'db5.coll5/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5a9ca4787b9985d1bbf [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.004-0400 m31102| 2015-07-09T13:55:53.004-0400 I INDEX [repl writer worker 9] build index on: db5.coll5 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db5.coll5" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.004-0400 m31102| 2015-07-09T13:55:53.004-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.005-0400 m30999| 2015-07-09T13:55:53.004-0400 I SHARDING [conn1] enable sharding on: db5.coll5 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.005-0400 m30999| 2015-07-09T13:55:53.004-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:53.004-0400-559eb5a9ca4787b9985d1bc0", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464553004), what: "shardCollection.start", ns: "db5.coll5", details: { shardKey: { _id: "hashed" }, collection: "db5.coll5", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.009-0400 m31101| 2015-07-09T13:55:53.008-0400 I INDEX [repl writer worker 15] build index on: db5.coll5 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db5.coll5" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.009-0400 m31101| 2015-07-09T13:55:53.008-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.009-0400 m31102| 2015-07-09T13:55:53.008-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.016-0400 m31101| 2015-07-09T13:55:53.016-0400 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.058-0400 m30999| 2015-07-09T13:55:53.057-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db5.coll5 using new epoch 559eb5a9ca4787b9985d1bc1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.165-0400 m30999| 2015-07-09T13:55:53.164-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db5.coll5: 0ms sequenceNumber: 25 version: 1|1||559eb5a9ca4787b9985d1bc1 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.221-0400 m30999| 2015-07-09T13:55:53.221-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db5.coll5: 0ms sequenceNumber: 26 version: 1|1||559eb5a9ca4787b9985d1bc1 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.223-0400 m31100| 2015-07-09T13:55:53.222-0400 I SHARDING [conn52] remotely refreshing metadata for db5.coll5 with requested shard version 1|1||559eb5a9ca4787b9985d1bc1, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.225-0400 m31100| 2015-07-09T13:55:53.224-0400 I SHARDING [conn52] collection db5.coll5 was previously unsharded, new metadata loaded with shard version 1|1||559eb5a9ca4787b9985d1bc1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.225-0400 m31100| 2015-07-09T13:55:53.224-0400 I SHARDING [conn52] collection version was loaded at version 1|1||559eb5a9ca4787b9985d1bc1, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.225-0400 m30999| 2015-07-09T13:55:53.225-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:53.225-0400-559eb5a9ca4787b9985d1bc2", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464553225), what: "shardCollection", ns: "db5.coll5", details: { version: "1|1||559eb5a9ca4787b9985d1bc1" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.279-0400 m30999| 2015-07-09T13:55:53.279-0400 I SHARDING [conn1] distributed lock 'db5.coll5/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.280-0400 m30999| 2015-07-09T13:55:53.280-0400 I SHARDING [conn1] moving chunk ns: db5.coll5 moving ( ns: db5.coll5, shard: test-rs0, lastmod: 1|1||000000000000000000000000, min: { _id: 0 }, max: { _id: MaxKey }) test-rs0 -> test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.281-0400 m31100| 2015-07-09T13:55:53.280-0400 I SHARDING [conn15] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.282-0400 m31100| 2015-07-09T13:55:53.281-0400 I SHARDING [conn15] received moveChunk request: { moveChunk: "db5.coll5", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb5a9ca4787b9985d1bc1') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.286-0400 m31100| 2015-07-09T13:55:53.285-0400 I SHARDING [conn15] distributed lock 'db5.coll5/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb5a9792e00bb672748f5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.286-0400 m31100| 2015-07-09T13:55:53.286-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:53.286-0400-559eb5a9792e00bb672748f6", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464553286), what: "moveChunk.start", ns: "db5.coll5", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.339-0400 m31100| 2015-07-09T13:55:53.339-0400 I SHARDING [conn15] remotely refreshing metadata for db5.coll5 based on current shard version 1|1||559eb5a9ca4787b9985d1bc1, current metadata version is 1|1||559eb5a9ca4787b9985d1bc1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.341-0400 m31100| 2015-07-09T13:55:53.340-0400 I SHARDING [conn15] metadata of collection db5.coll5 already up to date (shard version : 1|1||559eb5a9ca4787b9985d1bc1, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.341-0400 m31100| 2015-07-09T13:55:53.340-0400 I SHARDING [conn15] moveChunk request accepted at version 1|1||559eb5a9ca4787b9985d1bc1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.341-0400 m31100| 2015-07-09T13:55:53.341-0400 I SHARDING [conn15] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.342-0400 m31200| 2015-07-09T13:55:53.341-0400 I SHARDING [conn16] remotely refreshing metadata for db5.coll5, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.343-0400 m31200| 2015-07-09T13:55:53.343-0400 I SHARDING [conn16] collection db5.coll5 was previously unsharded, new metadata loaded with shard version 0|0||559eb5a9ca4787b9985d1bc1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.344-0400 m31200| 2015-07-09T13:55:53.343-0400 I SHARDING [conn16] collection version was loaded at version 1|1||559eb5a9ca4787b9985d1bc1, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.344-0400 m31200| 2015-07-09T13:55:53.343-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: 0 } -> { _id: MaxKey } for collection db5.coll5 from test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 at epoch 559eb5a9ca4787b9985d1bc1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.346-0400 m31100| 2015-07-09T13:55:53.346-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db5.coll5", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.349-0400 m31100| 2015-07-09T13:55:53.349-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db5.coll5", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.354-0400 m31100| 2015-07-09T13:55:53.354-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db5.coll5", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.361-0400 m31200| 2015-07-09T13:55:53.361-0400 I INDEX [migrateThread] build index on: db5.coll5 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db5.coll5" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.362-0400 m31200| 2015-07-09T13:55:53.361-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.363-0400 m31100| 2015-07-09T13:55:53.363-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db5.coll5", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.372-0400 m31200| 2015-07-09T13:55:53.371-0400 I INDEX [migrateThread] build index on: db5.coll5 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db5.coll5" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.372-0400 m31200| 2015-07-09T13:55:53.372-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.381-0400 m31100| 2015-07-09T13:55:53.380-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db5.coll5", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.383-0400 m31200| 2015-07-09T13:55:53.383-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.384-0400 m31200| 2015-07-09T13:55:53.384-0400 I SHARDING [migrateThread] Deleter starting delete for: db5.coll5 from { _id: 0 } -> { _id: MaxKey }, with opId: 1081 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.385-0400 m31200| 2015-07-09T13:55:53.385-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db5.coll5 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.391-0400 m31201| 2015-07-09T13:55:53.391-0400 I INDEX [repl writer worker 8] build index on: db5.coll5 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db5.coll5" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.392-0400 m31201| 2015-07-09T13:55:53.391-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.394-0400 m31202| 2015-07-09T13:55:53.394-0400 I INDEX [repl writer worker 9] build index on: db5.coll5 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db5.coll5" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.394-0400 m31202| 2015-07-09T13:55:53.394-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.398-0400 m31201| 2015-07-09T13:55:53.398-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.399-0400 m31202| 2015-07-09T13:55:53.399-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.400-0400 m31200| 2015-07-09T13:55:53.400-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.401-0400 m31200| 2015-07-09T13:55:53.400-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db5.coll5' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.415-0400 m31100| 2015-07-09T13:55:53.414-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db5.coll5", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.415-0400 m31100| 2015-07-09T13:55:53.414-0400 I SHARDING [conn15] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.416-0400 m31100| 2015-07-09T13:55:53.415-0400 I SHARDING [conn15] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.416-0400 m31100| 2015-07-09T13:55:53.415-0400 I SHARDING [conn15] moveChunk setting version to: 2|0||559eb5a9ca4787b9985d1bc1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.425-0400 m31200| 2015-07-09T13:55:53.424-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db5.coll5' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.425-0400 m31200| 2015-07-09T13:55:53.425-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:53.425-0400-559eb5a9d5a107a5b9c0da8f", server: "bs-osx108-8", clientAddr: "", time: new Date(1436464553425), what: "moveChunk.to", ns: "db5.coll5", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 5: 40, step 2 of 5: 15, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 24, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.479-0400 m31100| 2015-07-09T13:55:53.478-0400 I SHARDING [conn15] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db5.coll5", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.479-0400 m31100| 2015-07-09T13:55:53.478-0400 I SHARDING [conn15] moveChunk updating self version to: 2|1||559eb5a9ca4787b9985d1bc1 through { _id: MinKey } -> { _id: 0 } for collection 'db5.coll5' [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.480-0400 m31100| 2015-07-09T13:55:53.479-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:53.479-0400-559eb5a9792e00bb672748f7", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464553479), what: "moveChunk.commit", ns: "db5.coll5", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.533-0400 m31100| 2015-07-09T13:55:53.533-0400 I SHARDING [conn15] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.533-0400 m31100| 2015-07-09T13:55:53.533-0400 I SHARDING [conn15] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.534-0400 m31100| 2015-07-09T13:55:53.533-0400 I SHARDING [conn15] Deleter starting delete for: db5.coll5 from { _id: 0 } -> { _id: MaxKey }, with opId: 2622 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.534-0400 m31100| 2015-07-09T13:55:53.533-0400 I SHARDING [conn15] rangeDeleter deleted 0 documents for db5.coll5 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.534-0400 m31100| 2015-07-09T13:55:53.533-0400 I SHARDING [conn15] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.534-0400 m31100| 2015-07-09T13:55:53.534-0400 I SHARDING [conn15] distributed lock 'db5.coll5/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.535-0400 m31100| 2015-07-09T13:55:53.534-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:53.534-0400-559eb5a9792e00bb672748f8", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464553534), what: "moveChunk.from", ns: "db5.coll5", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 6: 0, step 2 of 6: 58, step 3 of 6: 2, step 4 of 6: 71, step 5 of 6: 118, step 6 of 6: 0, to: "test-rs1", from: "test-rs0", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.590-0400 m31100| 2015-07-09T13:55:53.589-0400 I COMMAND [conn15] command db5.coll5 command: moveChunk { moveChunk: "db5.coll5", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb5a9ca4787b9985d1bc1') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 308ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.592-0400 m30999| 2015-07-09T13:55:53.591-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db5.coll5: 0ms sequenceNumber: 27 version: 2|1||559eb5a9ca4787b9985d1bc1 based on: 1|1||559eb5a9ca4787b9985d1bc1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.593-0400 m31100| 2015-07-09T13:55:53.592-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db5.coll5", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5a9ca4787b9985d1bc1') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.597-0400 m31100| 2015-07-09T13:55:53.596-0400 I SHARDING [conn15] distributed lock 'db5.coll5/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb5a9792e00bb672748f9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.597-0400 m31100| 2015-07-09T13:55:53.596-0400 I SHARDING [conn15] remotely refreshing metadata for db5.coll5 based on current shard version 2|0||559eb5a9ca4787b9985d1bc1, current metadata version is 2|0||559eb5a9ca4787b9985d1bc1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.598-0400 m31100| 2015-07-09T13:55:53.598-0400 I SHARDING [conn15] updating metadata for db5.coll5 from shard version 2|0||559eb5a9ca4787b9985d1bc1 to shard version 2|1||559eb5a9ca4787b9985d1bc1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.598-0400 m31100| 2015-07-09T13:55:53.598-0400 I SHARDING [conn15] collection version was loaded at version 2|1||559eb5a9ca4787b9985d1bc1, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.598-0400 m31100| 2015-07-09T13:55:53.598-0400 I SHARDING [conn15] splitChunk accepted at version 2|1||559eb5a9ca4787b9985d1bc1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.600-0400 m31100| 2015-07-09T13:55:53.599-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:53.599-0400-559eb5a9792e00bb672748fa", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464553599), what: "split", ns: "db5.coll5", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559eb5a9ca4787b9985d1bc1') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559eb5a9ca4787b9985d1bc1') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.654-0400 m31100| 2015-07-09T13:55:53.653-0400 I SHARDING [conn15] distributed lock 'db5.coll5/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.656-0400 m30999| 2015-07-09T13:55:53.656-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db5.coll5: 0ms sequenceNumber: 28 version: 2|3||559eb5a9ca4787b9985d1bc1 based on: 2|1||559eb5a9ca4787b9985d1bc1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.657-0400 m31200| 2015-07-09T13:55:53.656-0400 I SHARDING [conn18] received splitChunk request: { splitChunk: "db5.coll5", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5a9ca4787b9985d1bc1') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.660-0400 m31200| 2015-07-09T13:55:53.660-0400 I SHARDING [conn18] distributed lock 'db5.coll5/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb5a9d5a107a5b9c0da90 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.661-0400 m31200| 2015-07-09T13:55:53.660-0400 I SHARDING [conn18] remotely refreshing metadata for db5.coll5 based on current shard version 0|0||559eb5a9ca4787b9985d1bc1, current metadata version is 1|1||559eb5a9ca4787b9985d1bc1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.662-0400 m31200| 2015-07-09T13:55:53.662-0400 I SHARDING [conn18] updating metadata for db5.coll5 from shard version 0|0||559eb5a9ca4787b9985d1bc1 to shard version 2|0||559eb5a9ca4787b9985d1bc1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.663-0400 m31200| 2015-07-09T13:55:53.662-0400 I SHARDING [conn18] collection version was loaded at version 2|3||559eb5a9ca4787b9985d1bc1, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.663-0400 m31200| 2015-07-09T13:55:53.662-0400 I SHARDING [conn18] splitChunk accepted at version 2|0||559eb5a9ca4787b9985d1bc1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.664-0400 m31200| 2015-07-09T13:55:53.663-0400 I SHARDING [conn18] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:53.663-0400-559eb5a9d5a107a5b9c0da91", server: "bs-osx108-8", clientAddr: "127.0.0.1:62585", time: new Date(1436464553663), what: "split", ns: "db5.coll5", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559eb5a9ca4787b9985d1bc1') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559eb5a9ca4787b9985d1bc1') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.718-0400 m31200| 2015-07-09T13:55:53.718-0400 I SHARDING [conn18] distributed lock 'db5.coll5/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.720-0400 m30999| 2015-07-09T13:55:53.720-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db5.coll5: 1ms sequenceNumber: 29 version: 2|5||559eb5a9ca4787b9985d1bc1 based on: 2|3||559eb5a9ca4787b9985d1bc1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.731-0400 m31100| 2015-07-09T13:55:53.731-0400 I INDEX [conn52] build index on: db5.coll5 properties: { v: 1, key: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, name: "indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx_1", ns: "db5.coll5" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.732-0400 m31200| 2015-07-09T13:55:53.731-0400 I INDEX [conn31] build index on: db5.coll5 properties: { v: 1, key: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, name: "indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx_1", ns: "db5.coll5" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.732-0400 m31100| 2015-07-09T13:55:53.731-0400 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.732-0400 m31200| 2015-07-09T13:55:53.731-0400 I INDEX [conn31] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.738-0400 m31100| 2015-07-09T13:55:53.737-0400 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.740-0400 m31200| 2015-07-09T13:55:53.740-0400 I INDEX [conn31] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.741-0400 m31100| 2015-07-09T13:55:53.741-0400 I COMMAND [conn15] CMD: dropIndexes db5.coll5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.742-0400 m31200| 2015-07-09T13:55:53.741-0400 I COMMAND [conn18] CMD: dropIndexes db5.coll5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.742-0400 m31102| 2015-07-09T13:55:53.742-0400 I INDEX [repl writer worker 12] build index on: db5.coll5 properties: { v: 1, key: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, name: "indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx_1", ns: "db5.coll5" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.743-0400 m31102| 2015-07-09T13:55:53.742-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.746-0400 Using 20 threads (requested 20) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.747-0400 m31101| 2015-07-09T13:55:53.746-0400 I INDEX [repl writer worker 4] build index on: db5.coll5 properties: { v: 1, key: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, name: "indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx_1", ns: "db5.coll5" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.748-0400 m31101| 2015-07-09T13:55:53.746-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.813-0400 m31202| 2015-07-09T13:55:53.772-0400 I INDEX [repl writer worker 14] build index on: db5.coll5 properties: { v: 1, key: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, name: "indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx_1", ns: "db5.coll5" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.813-0400 m31202| 2015-07-09T13:55:53.772-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.822-0400 m31202| 2015-07-09T13:55:53.821-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.822-0400 m31201| 2015-07-09T13:55:53.822-0400 I INDEX [repl writer worker 5] build index on: db5.coll5 properties: { v: 1, key: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, name: "indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx_1", ns: "db5.coll5" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.822-0400 m31201| 2015-07-09T13:55:53.822-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.836-0400 m31102| 2015-07-09T13:55:53.834-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.841-0400 m31101| 2015-07-09T13:55:53.840-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.872-0400 m31102| 2015-07-09T13:55:53.865-0400 I COMMAND [repl writer worker 2] CMD: dropIndexes db5.coll5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.872-0400 m31101| 2015-07-09T13:55:53.872-0400 I COMMAND [repl writer worker 8] CMD: dropIndexes db5.coll5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.872-0400 m31201| 2015-07-09T13:55:53.872-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.938-0400 m30998| 2015-07-09T13:55:53.936-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62721 #32 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.939-0400 m30999| 2015-07-09T13:55:53.939-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62722 #32 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.948-0400 m30998| 2015-07-09T13:55:53.946-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62724 #33 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.949-0400 m30999| 2015-07-09T13:55:53.949-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62723 #33 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.957-0400 m30998| 2015-07-09T13:55:53.957-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62726 #34 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.959-0400 m30999| 2015-07-09T13:55:53.959-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62725 #34 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.969-0400 m30999| 2015-07-09T13:55:53.969-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62727 #35 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:53.979-0400 m30999| 2015-07-09T13:55:53.979-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62728 #36 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.000-0400 m30998| 2015-07-09T13:55:53.999-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62729 #35 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.000-0400 m30999| 2015-07-09T13:55:54.000-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62730 #37 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.000-0400 m30998| 2015-07-09T13:55:54.000-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62731 #36 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.001-0400 m30999| 2015-07-09T13:55:54.001-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62732 #38 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.001-0400 m30999| 2015-07-09T13:55:54.001-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62733 #39 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.005-0400 m30998| 2015-07-09T13:55:54.005-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62734 #37 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.006-0400 m30999| 2015-07-09T13:55:54.005-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62737 #40 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.008-0400 m30998| 2015-07-09T13:55:54.007-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62735 #38 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.008-0400 m30998| 2015-07-09T13:55:54.008-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62736 #39 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.016-0400 m30998| 2015-07-09T13:55:54.015-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62738 #40 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.019-0400 m30999| 2015-07-09T13:55:54.019-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62739 #41 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.020-0400 m30998| 2015-07-09T13:55:54.019-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62740 #41 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.034-0400 setting random seed: 7094790004193 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.034-0400 setting random seed: 3637609393335 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.034-0400 setting random seed: 7301102261990 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.036-0400 setting random seed: 4356433553621 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.036-0400 setting random seed: 2135656010359 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.036-0400 setting random seed: 2705460586585 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.040-0400 setting random seed: 390546545386 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.041-0400 setting random seed: 9625774407759 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.041-0400 m30998| 2015-07-09T13:55:54.041-0400 I SHARDING [conn32] ChunkManager: time to load chunks for db5.coll5: 0ms sequenceNumber: 7 version: 2|5||559eb5a9ca4787b9985d1bc1 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.043-0400 setting random seed: 3384131300263 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.050-0400 m31202| 2015-07-09T13:55:54.049-0400 I COMMAND [repl writer worker 13] CMD: dropIndexes db5.coll5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.050-0400 m31200| 2015-07-09T13:55:54.050-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62741 #34 (30 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.052-0400 m31201| 2015-07-09T13:55:54.051-0400 I COMMAND [repl writer worker 9] CMD: dropIndexes db5.coll5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.064-0400 setting random seed: 3211183743551 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.067-0400 setting random seed: 8578769802115 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.074-0400 setting random seed: 3970582825131 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.074-0400 setting random seed: 7966818371787 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.075-0400 setting random seed: 676099909469 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.094-0400 setting random seed: 8316436982713 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.098-0400 setting random seed: 367325376719 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.099-0400 setting random seed: 3915791362524 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.104-0400 setting random seed: 2726486148312 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.120-0400 setting random seed: 3585573062300 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.158-0400 m31200| 2015-07-09T13:55:54.155-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62742 #35 (31 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.159-0400 setting random seed: 365202999673 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.360-0400 m31200| 2015-07-09T13:55:54.359-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62743 #36 (32 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.363-0400 m31100| 2015-07-09T13:55:54.363-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62744 #54 (48 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.374-0400 m31100| 2015-07-09T13:55:54.373-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62745 #55 (49 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.379-0400 m31200| 2015-07-09T13:55:54.379-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62748 #37 (33 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.381-0400 m31200| 2015-07-09T13:55:54.380-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62749 #38 (34 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.382-0400 m31100| 2015-07-09T13:55:54.382-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62746 #56 (50 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.383-0400 m31100| 2015-07-09T13:55:54.382-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62747 #57 (51 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.384-0400 m31100| 2015-07-09T13:55:54.383-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62751 #58 (52 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.384-0400 m31100| 2015-07-09T13:55:54.384-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62752 #59 (53 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.386-0400 m31200| 2015-07-09T13:55:54.385-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62750 #39 (35 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.390-0400 m31100| 2015-07-09T13:55:54.390-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62755 #60 (54 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.396-0400 m31200| 2015-07-09T13:55:54.395-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62753 #40 (36 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.397-0400 m31200| 2015-07-09T13:55:54.396-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62754 #41 (37 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.486-0400 m30999| 2015-07-09T13:55:54.486-0400 I NETWORK [conn34] end connection 127.0.0.1:62725 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.519-0400 m30998| 2015-07-09T13:55:54.519-0400 I NETWORK [conn32] end connection 127.0.0.1:62721 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.527-0400 m30999| 2015-07-09T13:55:54.527-0400 I NETWORK [conn33] end connection 127.0.0.1:62723 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.540-0400 m30999| 2015-07-09T13:55:54.540-0400 I NETWORK [conn32] end connection 127.0.0.1:62722 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.544-0400 m30998| 2015-07-09T13:55:54.544-0400 I NETWORK [conn40] end connection 127.0.0.1:62738 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.560-0400 m30998| 2015-07-09T13:55:54.560-0400 I NETWORK [conn35] end connection 127.0.0.1:62729 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.572-0400 m30999| 2015-07-09T13:55:54.572-0400 I NETWORK [conn37] end connection 127.0.0.1:62730 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.598-0400 m30999| 2015-07-09T13:55:54.598-0400 I NETWORK [conn36] end connection 127.0.0.1:62728 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.601-0400 m30998| 2015-07-09T13:55:54.598-0400 I NETWORK [conn33] end connection 127.0.0.1:62724 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.609-0400 m30999| 2015-07-09T13:55:54.608-0400 I NETWORK [conn39] end connection 127.0.0.1:62733 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.637-0400 m30999| 2015-07-09T13:55:54.636-0400 I NETWORK [conn35] end connection 127.0.0.1:62727 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.643-0400 m30998| 2015-07-09T13:55:54.643-0400 I NETWORK [conn36] end connection 127.0.0.1:62731 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.658-0400 m30999| 2015-07-09T13:55:54.657-0400 I NETWORK [conn41] end connection 127.0.0.1:62739 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.671-0400 m30998| 2015-07-09T13:55:54.671-0400 I NETWORK [conn38] end connection 127.0.0.1:62735 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.679-0400 m30998| 2015-07-09T13:55:54.679-0400 I NETWORK [conn37] end connection 127.0.0.1:62734 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.687-0400 m30998| 2015-07-09T13:55:54.687-0400 I NETWORK [conn39] end connection 127.0.0.1:62736 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.704-0400 m30998| 2015-07-09T13:55:54.703-0400 I NETWORK [conn34] end connection 127.0.0.1:62726 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.717-0400 m30999| 2015-07-09T13:55:54.717-0400 I NETWORK [conn38] end connection 127.0.0.1:62732 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.734-0400 m30999| 2015-07-09T13:55:54.734-0400 I NETWORK [conn40] end connection 127.0.0.1:62737 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.753-0400 m30998| 2015-07-09T13:55:54.752-0400 I NETWORK [conn41] end connection 127.0.0.1:62740 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.782-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.783-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.783-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.783-0400 jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname_noindex.js: Workload completed in 1036 ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.783-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.783-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.783-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.783-0400 m30999| 2015-07-09T13:55:54.783-0400 I COMMAND [conn1] DROP: db5.coll5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.783-0400 m30999| 2015-07-09T13:55:54.783-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:54.783-0400-559eb5aaca4787b9985d1bc3", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464554783), what: "dropCollection.start", ns: "db5.coll5", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.840-0400 m30999| 2015-07-09T13:55:54.839-0400 I SHARDING [conn1] distributed lock 'db5.coll5/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5aaca4787b9985d1bc4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.841-0400 m31100| 2015-07-09T13:55:54.841-0400 I COMMAND [conn37] CMD: drop db5.coll5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.843-0400 m31200| 2015-07-09T13:55:54.843-0400 I COMMAND [conn18] CMD: drop db5.coll5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.845-0400 m31101| 2015-07-09T13:55:54.845-0400 I COMMAND [repl writer worker 12] CMD: drop db5.coll5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.845-0400 m31102| 2015-07-09T13:55:54.845-0400 I COMMAND [repl writer worker 13] CMD: drop db5.coll5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.846-0400 m31201| 2015-07-09T13:55:54.846-0400 I COMMAND [repl writer worker 15] CMD: drop db5.coll5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.846-0400 m31202| 2015-07-09T13:55:54.846-0400 I COMMAND [repl writer worker 2] CMD: drop db5.coll5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.899-0400 m31100| 2015-07-09T13:55:54.899-0400 I SHARDING [conn37] remotely refreshing metadata for db5.coll5 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559eb5a9ca4787b9985d1bc1, current metadata version is 2|3||559eb5a9ca4787b9985d1bc1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.900-0400 m31100| 2015-07-09T13:55:54.900-0400 W SHARDING [conn37] no chunks found when reloading db5.coll5, previous version was 0|0||559eb5a9ca4787b9985d1bc1, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.900-0400 m31100| 2015-07-09T13:55:54.900-0400 I SHARDING [conn37] dropping metadata for db5.coll5 at shard version 2|3||559eb5a9ca4787b9985d1bc1, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.901-0400 m31200| 2015-07-09T13:55:54.901-0400 I SHARDING [conn18] remotely refreshing metadata for db5.coll5 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559eb5a9ca4787b9985d1bc1, current metadata version is 2|5||559eb5a9ca4787b9985d1bc1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.903-0400 m31200| 2015-07-09T13:55:54.902-0400 W SHARDING [conn18] no chunks found when reloading db5.coll5, previous version was 0|0||559eb5a9ca4787b9985d1bc1, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.903-0400 m31200| 2015-07-09T13:55:54.902-0400 I SHARDING [conn18] dropping metadata for db5.coll5 at shard version 2|5||559eb5a9ca4787b9985d1bc1, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.904-0400 m30999| 2015-07-09T13:55:54.904-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:54.904-0400-559eb5aaca4787b9985d1bc5", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464554904), what: "dropCollection", ns: "db5.coll5", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:54.958-0400 m30999| 2015-07-09T13:55:54.958-0400 I SHARDING [conn1] distributed lock 'db5.coll5/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.015-0400 m30999| 2015-07-09T13:55:55.014-0400 I COMMAND [conn1] DROP DATABASE: db5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.015-0400 m30999| 2015-07-09T13:55:55.014-0400 I SHARDING [conn1] DBConfig::dropDatabase: db5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.015-0400 m30999| 2015-07-09T13:55:55.015-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:55.015-0400-559eb5abca4787b9985d1bc6", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464555015), what: "dropDatabase.start", ns: "db5", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.121-0400 m30999| 2015-07-09T13:55:55.120-0400 I SHARDING [conn1] DBConfig::dropDatabase: db5 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.121-0400 m31100| 2015-07-09T13:55:55.121-0400 I COMMAND [conn28] dropDatabase db5 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.122-0400 m31100| 2015-07-09T13:55:55.121-0400 I COMMAND [conn28] dropDatabase db5 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.122-0400 m30999| 2015-07-09T13:55:55.122-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:55.122-0400-559eb5abca4787b9985d1bc7", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464555122), what: "dropDatabase", ns: "db5", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.122-0400 m31101| 2015-07-09T13:55:55.122-0400 I COMMAND [repl writer worker 15] dropDatabase db5 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.123-0400 m31101| 2015-07-09T13:55:55.122-0400 I COMMAND [repl writer worker 15] dropDatabase db5 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.123-0400 m31102| 2015-07-09T13:55:55.122-0400 I COMMAND [repl writer worker 0] dropDatabase db5 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.123-0400 m31102| 2015-07-09T13:55:55.122-0400 I COMMAND [repl writer worker 0] dropDatabase db5 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.214-0400 m31100| 2015-07-09T13:55:55.213-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.217-0400 m31102| 2015-07-09T13:55:55.217-0400 I COMMAND [repl writer worker 10] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.217-0400 m31101| 2015-07-09T13:55:55.217-0400 I COMMAND [repl writer worker 4] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.258-0400 m31200| 2015-07-09T13:55:55.258-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.261-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.261-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.262-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.262-0400 jstests/concurrency/fsm_workloads/map_reduce_replace_nonexistent.js [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.262-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.262-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.262-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.262-0400 m31202| 2015-07-09T13:55:55.261-0400 I COMMAND [repl writer worker 12] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.262-0400 m31201| 2015-07-09T13:55:55.261-0400 I COMMAND [repl writer worker 7] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.271-0400 m30999| 2015-07-09T13:55:55.271-0400 I SHARDING [conn1] distributed lock 'db6/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5abca4787b9985d1bc8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.275-0400 m30999| 2015-07-09T13:55:55.274-0400 I SHARDING [conn1] Placing [db6] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.275-0400 m30999| 2015-07-09T13:55:55.275-0400 I SHARDING [conn1] Enabling sharding for database [db6] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.330-0400 m30999| 2015-07-09T13:55:55.329-0400 I SHARDING [conn1] distributed lock 'db6/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.354-0400 m31100| 2015-07-09T13:55:55.353-0400 I INDEX [conn29] build index on: db6.coll6 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.354-0400 m31100| 2015-07-09T13:55:55.353-0400 I INDEX [conn29] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.362-0400 m31100| 2015-07-09T13:55:55.362-0400 I INDEX [conn29] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.364-0400 m30999| 2015-07-09T13:55:55.363-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db6.coll6", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.366-0400 m30999| 2015-07-09T13:55:55.366-0400 I SHARDING [conn1] distributed lock 'db6.coll6/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5abca4787b9985d1bc9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.367-0400 m30999| 2015-07-09T13:55:55.367-0400 I SHARDING [conn1] enable sharding on: db6.coll6 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.368-0400 m30999| 2015-07-09T13:55:55.367-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:55.367-0400-559eb5abca4787b9985d1bca", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464555367), what: "shardCollection.start", ns: "db6.coll6", details: { shardKey: { _id: "hashed" }, collection: "db6.coll6", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.375-0400 m31102| 2015-07-09T13:55:55.375-0400 I INDEX [repl writer worker 9] build index on: db6.coll6 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.376-0400 m31102| 2015-07-09T13:55:55.375-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.377-0400 m31101| 2015-07-09T13:55:55.376-0400 I INDEX [repl writer worker 3] build index on: db6.coll6 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.377-0400 m31101| 2015-07-09T13:55:55.377-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.383-0400 m31102| 2015-07-09T13:55:55.382-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.386-0400 m31101| 2015-07-09T13:55:55.386-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.420-0400 m30999| 2015-07-09T13:55:55.419-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db6.coll6 using new epoch 559eb5abca4787b9985d1bcb [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.527-0400 m30999| 2015-07-09T13:55:55.527-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db6.coll6: 1ms sequenceNumber: 30 version: 1|1||559eb5abca4787b9985d1bcb based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.540-0400 m29000| 2015-07-09T13:55:55.540-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62756 #40 (40 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.587-0400 m30999| 2015-07-09T13:55:55.586-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db6.coll6: 0ms sequenceNumber: 31 version: 1|1||559eb5abca4787b9985d1bcb based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.589-0400 m31100| 2015-07-09T13:55:55.589-0400 I SHARDING [conn59] remotely refreshing metadata for db6.coll6 with requested shard version 1|1||559eb5abca4787b9985d1bcb, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.591-0400 m31100| 2015-07-09T13:55:55.590-0400 I SHARDING [conn59] collection db6.coll6 was previously unsharded, new metadata loaded with shard version 1|1||559eb5abca4787b9985d1bcb [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.591-0400 m31100| 2015-07-09T13:55:55.591-0400 I SHARDING [conn59] collection version was loaded at version 1|1||559eb5abca4787b9985d1bcb, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.591-0400 m30999| 2015-07-09T13:55:55.591-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:55.591-0400-559eb5abca4787b9985d1bcc", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464555591), what: "shardCollection", ns: "db6.coll6", details: { version: "1|1||559eb5abca4787b9985d1bcb" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.648-0400 m30999| 2015-07-09T13:55:55.647-0400 I SHARDING [conn1] distributed lock 'db6.coll6/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.649-0400 m30999| 2015-07-09T13:55:55.649-0400 I SHARDING [conn1] moving chunk ns: db6.coll6 moving ( ns: db6.coll6, shard: test-rs0, lastmod: 1|1||000000000000000000000000, min: { _id: 0 }, max: { _id: MaxKey }) test-rs0 -> test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.649-0400 m31100| 2015-07-09T13:55:55.649-0400 I SHARDING [conn37] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.651-0400 m31100| 2015-07-09T13:55:55.650-0400 I SHARDING [conn37] received moveChunk request: { moveChunk: "db6.coll6", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb5abca4787b9985d1bcb') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.655-0400 m31100| 2015-07-09T13:55:55.655-0400 I SHARDING [conn37] distributed lock 'db6.coll6/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb5ab792e00bb672748fc [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.656-0400 m31100| 2015-07-09T13:55:55.655-0400 I SHARDING [conn37] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:55.655-0400-559eb5ab792e00bb672748fd", server: "bs-osx108-8", clientAddr: "127.0.0.1:62639", time: new Date(1436464555655), what: "moveChunk.start", ns: "db6.coll6", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.708-0400 m31100| 2015-07-09T13:55:55.708-0400 I SHARDING [conn37] remotely refreshing metadata for db6.coll6 based on current shard version 1|1||559eb5abca4787b9985d1bcb, current metadata version is 1|1||559eb5abca4787b9985d1bcb [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.710-0400 m31100| 2015-07-09T13:55:55.710-0400 I SHARDING [conn37] metadata of collection db6.coll6 already up to date (shard version : 1|1||559eb5abca4787b9985d1bcb, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.710-0400 m31100| 2015-07-09T13:55:55.710-0400 I SHARDING [conn37] moveChunk request accepted at version 1|1||559eb5abca4787b9985d1bcb [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.711-0400 m31100| 2015-07-09T13:55:55.711-0400 I SHARDING [conn37] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.712-0400 m31200| 2015-07-09T13:55:55.711-0400 I SHARDING [conn16] remotely refreshing metadata for db6.coll6, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.713-0400 m31200| 2015-07-09T13:55:55.713-0400 I SHARDING [conn16] collection db6.coll6 was previously unsharded, new metadata loaded with shard version 0|0||559eb5abca4787b9985d1bcb [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.714-0400 m31200| 2015-07-09T13:55:55.713-0400 I SHARDING [conn16] collection version was loaded at version 1|1||559eb5abca4787b9985d1bcb, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.714-0400 m31200| 2015-07-09T13:55:55.713-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: 0 } -> { _id: MaxKey } for collection db6.coll6 from test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 at epoch 559eb5abca4787b9985d1bcb [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.716-0400 m31100| 2015-07-09T13:55:55.716-0400 I SHARDING [conn37] moveChunk data transfer progress: { active: true, ns: "db6.coll6", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.720-0400 m31100| 2015-07-09T13:55:55.719-0400 I SHARDING [conn37] moveChunk data transfer progress: { active: true, ns: "db6.coll6", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.725-0400 m31100| 2015-07-09T13:55:55.724-0400 I SHARDING [conn37] moveChunk data transfer progress: { active: true, ns: "db6.coll6", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.732-0400 m31200| 2015-07-09T13:55:55.732-0400 I INDEX [migrateThread] build index on: db6.coll6 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.733-0400 m31200| 2015-07-09T13:55:55.732-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.734-0400 m31100| 2015-07-09T13:55:55.733-0400 I SHARDING [conn37] moveChunk data transfer progress: { active: true, ns: "db6.coll6", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.746-0400 m31200| 2015-07-09T13:55:55.746-0400 I INDEX [migrateThread] build index on: db6.coll6 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.746-0400 m31200| 2015-07-09T13:55:55.746-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.751-0400 m31100| 2015-07-09T13:55:55.750-0400 I SHARDING [conn37] moveChunk data transfer progress: { active: true, ns: "db6.coll6", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.754-0400 m31200| 2015-07-09T13:55:55.754-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.755-0400 m31200| 2015-07-09T13:55:55.754-0400 I SHARDING [migrateThread] Deleter starting delete for: db6.coll6 from { _id: 0 } -> { _id: MaxKey }, with opId: 2860 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.756-0400 m31200| 2015-07-09T13:55:55.756-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db6.coll6 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.766-0400 m31202| 2015-07-09T13:55:55.766-0400 I INDEX [repl writer worker 11] build index on: db6.coll6 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.766-0400 m31202| 2015-07-09T13:55:55.766-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.766-0400 m31201| 2015-07-09T13:55:55.766-0400 I INDEX [repl writer worker 12] build index on: db6.coll6 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db6.coll6" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.767-0400 m31201| 2015-07-09T13:55:55.766-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.770-0400 m31202| 2015-07-09T13:55:55.769-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.772-0400 m31200| 2015-07-09T13:55:55.771-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.772-0400 m31200| 2015-07-09T13:55:55.771-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db6.coll6' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.775-0400 m31201| 2015-07-09T13:55:55.775-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.785-0400 m31100| 2015-07-09T13:55:55.784-0400 I SHARDING [conn37] moveChunk data transfer progress: { active: true, ns: "db6.coll6", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.785-0400 m31100| 2015-07-09T13:55:55.784-0400 I SHARDING [conn37] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.785-0400 m31100| 2015-07-09T13:55:55.785-0400 I SHARDING [conn37] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.786-0400 m31100| 2015-07-09T13:55:55.785-0400 I SHARDING [conn37] moveChunk setting version to: 2|0||559eb5abca4787b9985d1bcb [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.796-0400 m31200| 2015-07-09T13:55:55.795-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db6.coll6' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.796-0400 m31200| 2015-07-09T13:55:55.795-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:55.795-0400-559eb5abd5a107a5b9c0da92", server: "bs-osx108-8", clientAddr: "", time: new Date(1436464555795), what: "moveChunk.to", ns: "db6.coll6", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 5: 40, step 2 of 5: 15, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 24, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.850-0400 m31100| 2015-07-09T13:55:55.849-0400 I SHARDING [conn37] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db6.coll6", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.850-0400 m31100| 2015-07-09T13:55:55.850-0400 I SHARDING [conn37] moveChunk updating self version to: 2|1||559eb5abca4787b9985d1bcb through { _id: MinKey } -> { _id: 0 } for collection 'db6.coll6' [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.851-0400 m31100| 2015-07-09T13:55:55.851-0400 I SHARDING [conn37] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:55.851-0400-559eb5ab792e00bb672748fe", server: "bs-osx108-8", clientAddr: "127.0.0.1:62639", time: new Date(1436464555851), what: "moveChunk.commit", ns: "db6.coll6", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.905-0400 m31100| 2015-07-09T13:55:55.904-0400 I SHARDING [conn37] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.905-0400 m31100| 2015-07-09T13:55:55.905-0400 I SHARDING [conn37] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.905-0400 m31100| 2015-07-09T13:55:55.905-0400 I SHARDING [conn37] Deleter starting delete for: db6.coll6 from { _id: 0 } -> { _id: MaxKey }, with opId: 4302 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.905-0400 m31100| 2015-07-09T13:55:55.905-0400 I SHARDING [conn37] rangeDeleter deleted 0 documents for db6.coll6 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.905-0400 m31100| 2015-07-09T13:55:55.905-0400 I SHARDING [conn37] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.906-0400 m31100| 2015-07-09T13:55:55.906-0400 I SHARDING [conn37] distributed lock 'db6.coll6/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.907-0400 m31100| 2015-07-09T13:55:55.906-0400 I SHARDING [conn37] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:55.906-0400-559eb5ab792e00bb672748ff", server: "bs-osx108-8", clientAddr: "127.0.0.1:62639", time: new Date(1436464555906), what: "moveChunk.from", ns: "db6.coll6", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 6: 0, step 2 of 6: 59, step 3 of 6: 3, step 4 of 6: 70, step 5 of 6: 120, step 6 of 6: 0, to: "test-rs1", from: "test-rs0", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.960-0400 m31100| 2015-07-09T13:55:55.959-0400 I COMMAND [conn37] command db6.coll6 command: moveChunk { moveChunk: "db6.coll6", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb5abca4787b9985d1bcb') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 310ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.962-0400 m30999| 2015-07-09T13:55:55.962-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db6.coll6: 1ms sequenceNumber: 32 version: 2|1||559eb5abca4787b9985d1bcb based on: 1|1||559eb5abca4787b9985d1bcb [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.963-0400 m31100| 2015-07-09T13:55:55.963-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db6.coll6", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5abca4787b9985d1bcb') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.967-0400 m31100| 2015-07-09T13:55:55.967-0400 I SHARDING [conn37] distributed lock 'db6.coll6/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb5ab792e00bb67274900 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.968-0400 m31100| 2015-07-09T13:55:55.967-0400 I SHARDING [conn37] remotely refreshing metadata for db6.coll6 based on current shard version 2|0||559eb5abca4787b9985d1bcb, current metadata version is 2|0||559eb5abca4787b9985d1bcb [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.969-0400 m31100| 2015-07-09T13:55:55.969-0400 I SHARDING [conn37] updating metadata for db6.coll6 from shard version 2|0||559eb5abca4787b9985d1bcb to shard version 2|1||559eb5abca4787b9985d1bcb [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.969-0400 m31100| 2015-07-09T13:55:55.969-0400 I SHARDING [conn37] collection version was loaded at version 2|1||559eb5abca4787b9985d1bcb, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.969-0400 m31100| 2015-07-09T13:55:55.969-0400 I SHARDING [conn37] splitChunk accepted at version 2|1||559eb5abca4787b9985d1bcb [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:55.971-0400 m31100| 2015-07-09T13:55:55.970-0400 I SHARDING [conn37] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:55.970-0400-559eb5ab792e00bb67274901", server: "bs-osx108-8", clientAddr: "127.0.0.1:62639", time: new Date(1436464555970), what: "split", ns: "db6.coll6", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559eb5abca4787b9985d1bcb') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559eb5abca4787b9985d1bcb') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:56.025-0400 m31100| 2015-07-09T13:55:56.025-0400 I SHARDING [conn37] distributed lock 'db6.coll6/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:56.028-0400 m30999| 2015-07-09T13:55:56.027-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db6.coll6: 0ms sequenceNumber: 33 version: 2|3||559eb5abca4787b9985d1bcb based on: 2|1||559eb5abca4787b9985d1bcb [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:56.029-0400 m31200| 2015-07-09T13:55:56.028-0400 I SHARDING [conn18] received splitChunk request: { splitChunk: "db6.coll6", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5abca4787b9985d1bcb') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:56.032-0400 m31200| 2015-07-09T13:55:56.032-0400 I SHARDING [conn18] distributed lock 'db6.coll6/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb5acd5a107a5b9c0da93 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:56.033-0400 m31200| 2015-07-09T13:55:56.032-0400 I SHARDING [conn18] remotely refreshing metadata for db6.coll6 based on current shard version 0|0||559eb5abca4787b9985d1bcb, current metadata version is 1|1||559eb5abca4787b9985d1bcb [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:56.034-0400 m31200| 2015-07-09T13:55:56.034-0400 I SHARDING [conn18] updating metadata for db6.coll6 from shard version 0|0||559eb5abca4787b9985d1bcb to shard version 2|0||559eb5abca4787b9985d1bcb [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:56.034-0400 m31200| 2015-07-09T13:55:56.034-0400 I SHARDING [conn18] collection version was loaded at version 2|3||559eb5abca4787b9985d1bcb, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:56.034-0400 m31200| 2015-07-09T13:55:56.034-0400 I SHARDING [conn18] splitChunk accepted at version 2|0||559eb5abca4787b9985d1bcb [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:56.036-0400 m31200| 2015-07-09T13:55:56.035-0400 I SHARDING [conn18] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:55:56.035-0400-559eb5acd5a107a5b9c0da94", server: "bs-osx108-8", clientAddr: "127.0.0.1:62585", time: new Date(1436464556035), what: "split", ns: "db6.coll6", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559eb5abca4787b9985d1bcb') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559eb5abca4787b9985d1bcb') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:56.089-0400 m31200| 2015-07-09T13:55:56.089-0400 I SHARDING [conn18] distributed lock 'db6.coll6/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:56.092-0400 m30999| 2015-07-09T13:55:56.091-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db6.coll6: 0ms sequenceNumber: 34 version: 2|5||559eb5abca4787b9985d1bcb based on: 2|3||559eb5abca4787b9985d1bcb [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:56.422-0400 m31100| 2015-07-09T13:55:56.421-0400 I COMMAND [conn29] command db6.$cmd command: insert { insert: "coll6", documents: 484, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 2000|3, ObjectId('559eb5abca4787b9985d1bcb') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 491, w: 491 } }, Database: { acquireCount: { w: 491 } }, Collection: { acquireCount: { w: 7 } }, Metadata: { acquireCount: { w: 484 } }, oplog: { acquireCount: { w: 484 } } } protocol:op_command 126ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:56.424-0400 Using 5 threads (requested 5) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:56.479-0400 m30998| 2015-07-09T13:55:56.479-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62758 #42 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:56.929-0400 m30999| 2015-07-09T13:55:56.479-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62757 #42 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:56.930-0400 m30998| 2015-07-09T13:55:56.483-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62760 #43 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:56.930-0400 m30999| 2015-07-09T13:55:56.483-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62759 #43 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:56.930-0400 m30999| 2015-07-09T13:55:56.493-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62761 #44 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:56.930-0400 setting random seed: 2217107331380 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:56.930-0400 setting random seed: 2922935858368 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:56.930-0400 setting random seed: 4788252818398 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:56.930-0400 setting random seed: 5782994478940 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:56.930-0400 setting random seed: 6165156289935 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:56.931-0400 m30998| 2015-07-09T13:55:56.504-0400 I SHARDING [conn43] ChunkManager: time to load chunks for db6.coll6: 0ms sequenceNumber: 8 version: 2|5||559eb5abca4787b9985d1bcb based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:56.931-0400 m31200| 2015-07-09T13:55:56.523-0400 I COMMAND [conn40] CMD: drop db6.tmp.mr.coll6_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:56.931-0400 m31100| 2015-07-09T13:55:56.527-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:56.931-0400 m31100| 2015-07-09T13:55:56.528-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:56.931-0400 m31100| 2015-07-09T13:55:56.546-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:56.931-0400 m31100| 2015-07-09T13:55:56.551-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:56.931-0400 m31200| 2015-07-09T13:55:56.584-0400 I COMMAND [conn36] CMD: drop db6.tmp.mr.coll6_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:56.931-0400 m31100| 2015-07-09T13:55:56.586-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:56.931-0400 m31200| 2015-07-09T13:55:56.587-0400 I COMMAND [conn39] CMD: drop db6.tmp.mr.coll6_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:56.932-0400 m31200| 2015-07-09T13:55:56.588-0400 I COMMAND [conn31] CMD: drop db6.tmp.mr.coll6_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:56.932-0400 m31200| 2015-07-09T13:55:56.588-0400 I COMMAND [conn33] CMD: drop db6.tmp.mr.coll6_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.251-0400 m31100| 2015-07-09T13:55:57.251-0400 I COMMAND [conn52] CMD: drop db6.tmp.mrs.coll6_1436464556_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.256-0400 m31100| 2015-07-09T13:55:57.255-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.256-0400 m31100| 2015-07-09T13:55:57.256-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.258-0400 m31100| 2015-07-09T13:55:57.258-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.260-0400 m31100| 2015-07-09T13:55:57.259-0400 I COMMAND [conn52] command db6.tmp.mrs.coll6_1436464556_0 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.260-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.260-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.260-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.261-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464556_0", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:11 reslen:210 locks:{ Global: { acquireCount: { r: 173, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 322 } }, Database: { acquireCount: { r: 26, w: 66, R: 22, W: 11 }, acquireWaitCount: { w: 11, R: 8, W: 7 }, timeAcquiringMicros: { w: 73329, R: 114658, W: 34003 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 751ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.261-0400 m31100| 2015-07-09T13:55:57.260-0400 I COMMAND [conn59] CMD: drop db6.tmp.mrs.coll6_1436464556_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.268-0400 m31100| 2015-07-09T13:55:57.268-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.269-0400 m31100| 2015-07-09T13:55:57.269-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.270-0400 m31100| 2015-07-09T13:55:57.269-0400 I COMMAND [conn57] CMD: drop db6.tmp.mrs.coll6_1436464556_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.286-0400 m31100| 2015-07-09T13:55:57.286-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.286-0400 m31100| 2015-07-09T13:55:57.286-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.287-0400 m31100| 2015-07-09T13:55:57.287-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.288-0400 m31100| 2015-07-09T13:55:57.288-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.289-0400 m31100| 2015-07-09T13:55:57.289-0400 I COMMAND [conn57] command db6.tmp.mrs.coll6_1436464556_2 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.289-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.290-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.290-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.291-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464556_2", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:12 reslen:210 locks:{ Global: { acquireCount: { r: 175, w: 74, W: 3 }, acquireWaitCount: { r: 2, W: 1 }, timeAcquiringMicros: { r: 14264, W: 509 } }, Database: { acquireCount: { r: 26, w: 66, R: 23, W: 11 }, acquireWaitCount: { r: 3, w: 9, R: 5, W: 8 }, timeAcquiringMicros: { r: 15909, w: 43867, R: 69737, W: 89570 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 772ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.292-0400 m31100| 2015-07-09T13:55:57.290-0400 I COMMAND [conn59] command db6.tmp.mrs.coll6_1436464556_1 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.292-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.292-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.293-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.293-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464556_1", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:13 reslen:210 locks:{ Global: { acquireCount: { r: 177, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 5430, w: 17086, W: 942 } }, Database: { acquireCount: { r: 26, w: 66, R: 24, W: 11 }, acquireWaitCount: { r: 2, w: 9, R: 6, W: 9 }, timeAcquiringMicros: { r: 1066, w: 31735, R: 77384, W: 88621 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 785ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.294-0400 m31100| 2015-07-09T13:55:57.292-0400 I COMMAND [conn48] CMD: drop db6.tmp.mrs.coll6_1436464556_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.303-0400 m31100| 2015-07-09T13:55:57.303-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.304-0400 m31100| 2015-07-09T13:55:57.303-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.305-0400 m31100| 2015-07-09T13:55:57.305-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.306-0400 m31100| 2015-07-09T13:55:57.306-0400 I COMMAND [conn48] command db6.tmp.mrs.coll6_1436464556_0 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.307-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.307-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.307-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.307-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464556_0", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:14 reslen:210 locks:{ Global: { acquireCount: { r: 180, w: 75, W: 3 }, acquireWaitCount: { r: 2, w: 1, W: 1 }, timeAcquiringMicros: { r: 22609, w: 9642, W: 1211 } }, Database: { acquireCount: { r: 26, w: 67, R: 25, W: 11 }, acquireWaitCount: { r: 4, w: 6, R: 6, W: 7 }, timeAcquiringMicros: { r: 33158, w: 66758, R: 23568, W: 114838 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_query 777ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.308-0400 m31100| 2015-07-09T13:55:57.308-0400 I COMMAND [conn58] CMD: drop db6.tmp.mrs.coll6_1436464556_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.313-0400 m31102| 2015-07-09T13:55:57.312-0400 I COMMAND [repl writer worker 4] CMD: drop db6.tmp.mrs.coll6_1436464556_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.315-0400 m31100| 2015-07-09T13:55:57.314-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.315-0400 m31100| 2015-07-09T13:55:57.315-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.316-0400 m31100| 2015-07-09T13:55:57.315-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.316-0400 m31100| 2015-07-09T13:55:57.316-0400 I COMMAND [conn58] command db6.tmp.mrs.coll6_1436464556_1 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.316-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.316-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.316-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.317-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464556_1", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:17 reslen:210 locks:{ Global: { acquireCount: { r: 186, w: 75, W: 3 }, acquireWaitCount: { r: 2, w: 2 }, timeAcquiringMicros: { r: 19983, w: 22512 } }, Database: { acquireCount: { r: 26, w: 67, R: 28, W: 11 }, acquireWaitCount: { r: 6, w: 8, R: 8, W: 5 }, timeAcquiringMicros: { r: 15416, w: 57163, R: 40509, W: 98294 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_query 807ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.317-0400 m31101| 2015-07-09T13:55:57.317-0400 I COMMAND [repl writer worker 3] CMD: drop db6.tmp.mrs.coll6_1436464556_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.330-0400 m31102| 2015-07-09T13:55:57.330-0400 I COMMAND [repl writer worker 11] CMD: drop db6.tmp.mrs.coll6_1436464556_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.332-0400 m31200| 2015-07-09T13:55:57.331-0400 I COMMAND [conn40] CMD: drop db6.tmp.mrs.coll6_1436464556_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.334-0400 m31101| 2015-07-09T13:55:57.334-0400 I COMMAND [repl writer worker 12] CMD: drop db6.tmp.mrs.coll6_1436464556_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.335-0400 m31200| 2015-07-09T13:55:57.334-0400 I COMMAND [conn40] CMD: drop db6.tmp.mr.coll6_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.335-0400 m31200| 2015-07-09T13:55:57.334-0400 I COMMAND [conn40] CMD: drop db6.tmp.mr.coll6_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.337-0400 m31200| 2015-07-09T13:55:57.337-0400 I COMMAND [conn40] CMD: drop db6.tmp.mr.coll6_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.337-0400 m31200| 2015-07-09T13:55:57.337-0400 I COMMAND [conn31] CMD: drop db6.tmp.mrs.coll6_1436464556_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.348-0400 m31200| 2015-07-09T13:55:57.348-0400 I COMMAND [conn31] CMD: drop db6.tmp.mr.coll6_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.349-0400 m31200| 2015-07-09T13:55:57.348-0400 I COMMAND [conn31] CMD: drop db6.tmp.mr.coll6_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.351-0400 m31200| 2015-07-09T13:55:57.350-0400 I COMMAND [conn40] command db6.tmp.mrs.coll6_1436464556_1 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.351-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.351-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.352-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.352-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464556_1", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:12 reslen:210 locks:{ Global: { acquireCount: { r: 177, w: 74, W: 3 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 11310, W: 335 } }, Database: { acquireCount: { r: 26, w: 66, R: 24, W: 11 }, acquireWaitCount: { w: 15, R: 6, W: 5 }, timeAcquiringMicros: { w: 148649, R: 123842, W: 1967 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 844ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.353-0400 m31200| 2015-07-09T13:55:57.351-0400 I COMMAND [conn31] CMD: drop db6.tmp.mr.coll6_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.353-0400 m31100| 2015-07-09T13:55:57.352-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.356-0400 m31200| 2015-07-09T13:55:57.355-0400 I COMMAND [conn31] command db6.tmp.mrs.coll6_1436464556_0 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.356-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.356-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.356-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.357-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464556_0", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:15 reslen:210 locks:{ Global: { acquireCount: { r: 183, w: 74, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 3449, W: 819 } }, Database: { acquireCount: { r: 26, w: 66, R: 27, W: 11 }, acquireWaitCount: { r: 3, w: 6, R: 7, W: 9 }, timeAcquiringMicros: { r: 18760, w: 49141, R: 15334, W: 96567 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 826ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.358-0400 m31100| 2015-07-09T13:55:57.358-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.362-0400 m31200| 2015-07-09T13:55:57.361-0400 I COMMAND [conn39] CMD: drop db6.tmp.mrs.coll6_1436464556_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.368-0400 m31200| 2015-07-09T13:55:57.368-0400 I COMMAND [conn39] CMD: drop db6.tmp.mr.coll6_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.368-0400 m31200| 2015-07-09T13:55:57.368-0400 I COMMAND [conn39] CMD: drop db6.tmp.mr.coll6_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.369-0400 m31200| 2015-07-09T13:55:57.369-0400 I COMMAND [conn39] CMD: drop db6.tmp.mr.coll6_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.370-0400 m31200| 2015-07-09T13:55:57.369-0400 I COMMAND [conn39] command db6.tmp.mrs.coll6_1436464556_2 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.370-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.370-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.370-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.372-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464556_2", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:19 reslen:210 locks:{ Global: { acquireCount: { r: 191, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 11743, w: 3417, W: 799 } }, Database: { acquireCount: { r: 26, w: 66, R: 31, W: 11 }, acquireWaitCount: { r: 5, w: 8, R: 11, W: 8 }, timeAcquiringMicros: { r: 20496, w: 45010, R: 57687, W: 75852 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 841ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.372-0400 m31100| 2015-07-09T13:55:57.371-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.374-0400 m31200| 2015-07-09T13:55:57.374-0400 I COMMAND [conn36] CMD: drop db6.tmp.mrs.coll6_1436464556_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.376-0400 m31100| 2015-07-09T13:55:57.375-0400 I SHARDING [conn59] ChunkManager: time to load chunks for db6.coll6: 0ms sequenceNumber: 2 version: 2|5||559eb5abca4787b9985d1bcb based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.377-0400 m31100| 2015-07-09T13:55:57.376-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62763 #61 (55 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.379-0400 m31200| 2015-07-09T13:55:57.379-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62764 #42 (38 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.397-0400 m31200| 2015-07-09T13:55:57.396-0400 I COMMAND [conn36] CMD: drop db6.tmp.mr.coll6_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.397-0400 m31200| 2015-07-09T13:55:57.397-0400 I COMMAND [conn36] CMD: drop db6.tmp.mr.coll6_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.398-0400 m31200| 2015-07-09T13:55:57.397-0400 I COMMAND [conn36] CMD: drop db6.tmp.mr.coll6_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.399-0400 m31200| 2015-07-09T13:55:57.398-0400 I COMMAND [conn36] command db6.tmp.mrs.coll6_1436464556_1 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.400-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.400-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.400-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.401-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464556_1", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:17 reslen:210 locks:{ Global: { acquireCount: { r: 188, w: 75, W: 3 }, acquireWaitCount: { r: 1, w: 3, W: 1 }, timeAcquiringMicros: { r: 6284, w: 15125, W: 118 } }, Database: { acquireCount: { r: 26, w: 67, R: 29, W: 11 }, acquireWaitCount: { r: 3, w: 10, R: 11, W: 7 }, timeAcquiringMicros: { r: 30495, w: 19236, R: 49794, W: 101966 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_query 874ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.402-0400 m31202| 2015-07-09T13:55:57.399-0400 I COMMAND [repl writer worker 1] CMD: drop db6.tmp.mrs.coll6_1436464556_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.402-0400 m31201| 2015-07-09T13:55:57.401-0400 I COMMAND [repl writer worker 3] CMD: drop db6.tmp.mrs.coll6_1436464556_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.404-0400 m31100| 2015-07-09T13:55:57.403-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.406-0400 m31100| 2015-07-09T13:55:57.405-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62765 #62 (56 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.409-0400 m31200| 2015-07-09T13:55:57.409-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62766 #43 (39 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.417-0400 m31200| 2015-07-09T13:55:57.416-0400 I COMMAND [conn33] CMD: drop db6.tmp.mrs.coll6_1436464556_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.428-0400 m31100| 2015-07-09T13:55:57.428-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62767 #63 (57 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.429-0400 m31200| 2015-07-09T13:55:57.429-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62768 #44 (40 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.431-0400 m31200| 2015-07-09T13:55:57.429-0400 I COMMAND [conn33] CMD: drop db6.tmp.mr.coll6_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.431-0400 m31200| 2015-07-09T13:55:57.430-0400 I COMMAND [conn33] CMD: drop db6.tmp.mr.coll6_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.431-0400 m31200| 2015-07-09T13:55:57.430-0400 I COMMAND [conn33] CMD: drop db6.tmp.mr.coll6_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.432-0400 m31200| 2015-07-09T13:55:57.431-0400 I COMMAND [conn33] command db6.tmp.mrs.coll6_1436464556_0 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.432-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.432-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.432-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.432-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464556_0", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:17 reslen:210 locks:{ Global: { acquireCount: { r: 188, w: 75, W: 3 }, acquireWaitCount: { r: 2, w: 3, W: 1 }, timeAcquiringMicros: { r: 31731, w: 16413, W: 15499 } }, Database: { acquireCount: { r: 26, w: 67, R: 29, W: 11 }, acquireWaitCount: { r: 3, w: 12, R: 9, W: 5 }, timeAcquiringMicros: { r: 23055, w: 67105, R: 69244, W: 18569 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_query 901ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.433-0400 m31201| 2015-07-09T13:55:57.432-0400 I COMMAND [repl writer worker 11] CMD: drop db6.tmp.mrs.coll6_1436464556_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.433-0400 m31100| 2015-07-09T13:55:57.433-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.434-0400 m31202| 2015-07-09T13:55:57.434-0400 I COMMAND [repl writer worker 10] CMD: drop db6.tmp.mrs.coll6_1436464556_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.452-0400 m31100| 2015-07-09T13:55:57.452-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62769 #64 (58 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.454-0400 m31200| 2015-07-09T13:55:57.454-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62770 #45 (41 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.480-0400 m31100| 2015-07-09T13:55:57.479-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62771 #65 (59 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.482-0400 m31200| 2015-07-09T13:55:57.481-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62772 #46 (42 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.539-0400 m31100| 2015-07-09T13:55:57.538-0400 I COMMAND [conn52] CMD: drop db6.map_reduce_replace_nonexistent0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.544-0400 m31100| 2015-07-09T13:55:57.544-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.545-0400 m31100| 2015-07-09T13:55:57.545-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.549-0400 m31100| 2015-07-09T13:55:57.548-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.551-0400 m31100| 2015-07-09T13:55:57.550-0400 I COMMAND [conn52] command db6.map_reduce_replace_nonexistent0 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.551-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.552-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.552-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.552-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.552-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.555-0400 m31100| }, out: { replace: "map_reduce_replace_nonexistent0" }, query: { key: { $exists: true }, value: { $exists: true } } }, inputDB: "db6", shardedOutputCollection: "tmp.mrs.coll6_1436464556_0", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll6_1436464556_0", timeMillis: 749, counts: { input: 970, emit: 970, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464557000|60, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll6_1436464556_0", timeMillis: 819, counts: { input: 1030, emit: 1030, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464557000|42, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 970, emit: 970, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1030, emit: 1030, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:247 locks:{ Global: { acquireCount: { r: 54, w: 49, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 242 } }, Database: { acquireCount: { r: 1, w: 44, W: 7 }, acquireWaitCount: { w: 3, W: 5 }, timeAcquiringMicros: { w: 46254, W: 19451 } }, Collection: { acquireCount: { r: 1, w: 24 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 192ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.555-0400 m31100| 2015-07-09T13:55:57.551-0400 I COMMAND [conn37] CMD: drop db6.tmp.mrs.coll6_1436464556_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.555-0400 m31200| 2015-07-09T13:55:57.553-0400 I COMMAND [conn18] CMD: drop db6.tmp.mrs.coll6_1436464556_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.556-0400 m31100| 2015-07-09T13:55:57.554-0400 I COMMAND [conn58] CMD: drop db6.map_reduce_replace_nonexistent1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.557-0400 m30999| 2015-07-09T13:55:57.556-0400 I COMMAND [conn42] DROP: db6.map_reduce_replace_nonexistent0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.557-0400 m30999| 2015-07-09T13:55:57.556-0400 I COMMAND [conn42] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.557-0400 m31100| 2015-07-09T13:55:57.556-0400 I COMMAND [conn52] CMD: drop db6.map_reduce_replace_nonexistent0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.558-0400 m31201| 2015-07-09T13:55:57.557-0400 I COMMAND [repl writer worker 14] CMD: drop db6.tmp.mrs.coll6_1436464556_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.561-0400 m31202| 2015-07-09T13:55:57.561-0400 I COMMAND [repl writer worker 9] CMD: drop db6.tmp.mrs.coll6_1436464556_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.562-0400 m31100| 2015-07-09T13:55:57.562-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.563-0400 m31100| 2015-07-09T13:55:57.562-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.565-0400 m31101| 2015-07-09T13:55:57.564-0400 I COMMAND [repl writer worker 2] CMD: drop db6.tmp.mrs.coll6_1436464556_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.565-0400 m31102| 2015-07-09T13:55:57.565-0400 I COMMAND [repl writer worker 3] CMD: drop db6.tmp.mrs.coll6_1436464556_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.566-0400 m31100| 2015-07-09T13:55:57.566-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.566-0400 m31100| 2015-07-09T13:55:57.566-0400 I COMMAND [conn59] CMD: drop db6.map_reduce_replace_nonexistent2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.570-0400 m31100| 2015-07-09T13:55:57.569-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.570-0400 m31100| 2015-07-09T13:55:57.569-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.570-0400 m31100| 2015-07-09T13:55:57.570-0400 I COMMAND [conn48] CMD: drop db6.map_reduce_replace_nonexistent3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.578-0400 m31100| 2015-07-09T13:55:57.578-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.578-0400 m31100| 2015-07-09T13:55:57.578-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.579-0400 m31100| 2015-07-09T13:55:57.578-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.579-0400 m31100| 2015-07-09T13:55:57.578-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.580-0400 m31100| 2015-07-09T13:55:57.579-0400 I COMMAND [conn48] command db6.map_reduce_replace_nonexistent3 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.580-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.583-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.583-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.583-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.583-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.585-0400 m31100| }, out: { replace: "map_reduce_replace_nonexistent3" }, query: { key: { $exists: true }, value: { $exists: true } } }, inputDB: "db6", shardedOutputCollection: "tmp.mrs.coll6_1436464556_0", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll6_1436464556_0", timeMillis: 774, counts: { input: 970, emit: 970, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464557000|89, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll6_1436464556_0", timeMillis: 899, counts: { input: 1030, emit: 1030, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464557000|107, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 970, emit: 970, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1030, emit: 1030, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:247 locks:{ Global: { acquireCount: { r: 54, w: 49, W: 3 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 5162, W: 3951 } }, Database: { acquireCount: { r: 1, w: 44, W: 7 }, acquireWaitCount: { w: 4, W: 3 }, timeAcquiringMicros: { w: 7141, W: 17642 } }, Collection: { acquireCount: { r: 1, w: 24 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 146ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.586-0400 m31100| 2015-07-09T13:55:57.579-0400 I COMMAND [conn58] command db6.map_reduce_replace_nonexistent1 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.586-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.586-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.586-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.586-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.586-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.588-0400 m31100| }, out: { replace: "map_reduce_replace_nonexistent1" }, query: { key: { $exists: true }, value: { $exists: true } } }, inputDB: "db6", shardedOutputCollection: "tmp.mrs.coll6_1436464556_1", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll6_1436464556_1", timeMillis: 806, counts: { input: 970, emit: 970, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464557000|107, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll6_1436464556_1", timeMillis: 872, counts: { input: 1030, emit: 1030, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464557000|102, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 970, emit: 970, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1030, emit: 1030, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:247 locks:{ Global: { acquireCount: { r: 54, w: 49, W: 3 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 18124, W: 810 } }, Database: { acquireCount: { r: 1, w: 44, W: 7 }, acquireWaitCount: { w: 2, W: 3 }, timeAcquiringMicros: { w: 2211, W: 27359 } }, Collection: { acquireCount: { r: 1, w: 24 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 176ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.589-0400 m31200| 2015-07-09T13:55:57.580-0400 I COMMAND [conn31] CMD: drop db6.tmp.mr.coll6_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.590-0400 m31100| 2015-07-09T13:55:57.579-0400 I COMMAND [conn59] command db6.map_reduce_replace_nonexistent2 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.590-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.590-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.590-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.590-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.590-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.592-0400 m31100| }, out: { replace: "map_reduce_replace_nonexistent2" }, query: { key: { $exists: true }, value: { $exists: true } } }, inputDB: "db6", shardedOutputCollection: "tmp.mrs.coll6_1436464556_1", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll6_1436464556_1", timeMillis: 763, counts: { input: 970, emit: 970, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464557000|67, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll6_1436464556_1", timeMillis: 828, counts: { input: 1030, emit: 1030, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464557000|39, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 970, emit: 970, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1030, emit: 1030, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:247 locks:{ Global: { acquireCount: { r: 54, w: 49, W: 3 }, acquireWaitCount: { w: 3, W: 1 }, timeAcquiringMicros: { w: 21789, W: 2896 } }, Database: { acquireCount: { r: 1, w: 44, W: 7 }, acquireWaitCount: { w: 4, W: 2 }, timeAcquiringMicros: { w: 53650, W: 656 } }, Collection: { acquireCount: { r: 1, w: 24 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 227ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.592-0400 m31100| 2015-07-09T13:55:57.579-0400 I COMMAND [conn35] CMD: drop db6.tmp.mrs.coll6_1436464556_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.592-0400 m31100| 2015-07-09T13:55:57.580-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_10 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.592-0400 m31100| 2015-07-09T13:55:57.580-0400 I COMMAND [conn32] CMD: drop db6.tmp.mrs.coll6_1436464556_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.593-0400 m31200| 2015-07-09T13:55:57.580-0400 I COMMAND [conn34] CMD: drop db6.tmp.mrs.coll6_1436464556_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.593-0400 m31100| 2015-07-09T13:55:57.581-0400 I COMMAND [conn37] CMD: drop db6.tmp.mrs.coll6_1436464556_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.593-0400 m31101| 2015-07-09T13:55:57.581-0400 I COMMAND [repl writer worker 15] CMD: drop db6.map_reduce_replace_nonexistent0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.593-0400 m31102| 2015-07-09T13:55:57.583-0400 I COMMAND [repl writer worker 1] CMD: drop db6.map_reduce_replace_nonexistent0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.606-0400 m30998| 2015-07-09T13:55:57.606-0400 I COMMAND [conn42] DROP: db6.map_reduce_replace_nonexistent3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.606-0400 m30998| 2015-07-09T13:55:57.606-0400 I COMMAND [conn42] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.606-0400 m31100| 2015-07-09T13:55:57.606-0400 I COMMAND [conn48] CMD: drop db6.map_reduce_replace_nonexistent3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.625-0400 m31200| 2015-07-09T13:55:57.624-0400 I COMMAND [conn18] CMD: drop db6.tmp.mrs.coll6_1436464556_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.625-0400 m31200| 2015-07-09T13:55:57.624-0400 I COMMAND [conn34] CMD: drop db6.tmp.mrs.coll6_1436464556_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.625-0400 m31100| 2015-07-09T13:55:57.625-0400 I COMMAND [conn57] CMD: drop db6.map_reduce_replace_nonexistent4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.630-0400 m31100| 2015-07-09T13:55:57.630-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.631-0400 m31100| 2015-07-09T13:55:57.630-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.632-0400 m31100| 2015-07-09T13:55:57.630-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.632-0400 m31100| 2015-07-09T13:55:57.631-0400 I COMMAND [conn57] command db6.map_reduce_replace_nonexistent4 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.632-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.633-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.633-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.633-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.633-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.635-0400 m31100| }, out: { replace: "map_reduce_replace_nonexistent4" }, query: { key: { $exists: true }, value: { $exists: true } } }, inputDB: "db6", shardedOutputCollection: "tmp.mrs.coll6_1436464556_2", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll6_1436464556_2", timeMillis: 770, counts: { input: 970, emit: 970, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464557000|74, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll6_1436464556_2", timeMillis: 840, counts: { input: 1030, emit: 1030, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464557000|86, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 970, emit: 970, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1030, emit: 1030, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:247 locks:{ Global: { acquireCount: { r: 54, w: 49, W: 3 }, acquireWaitCount: { w: 3, W: 1 }, timeAcquiringMicros: { w: 28455, W: 7065 } }, Database: { acquireCount: { r: 1, w: 44, W: 7 }, acquireWaitCount: { w: 6, W: 3 }, timeAcquiringMicros: { w: 66000, W: 33694 } }, Collection: { acquireCount: { r: 1, w: 24 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 260ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.635-0400 m31101| 2015-07-09T13:55:57.632-0400 I COMMAND [repl writer worker 1] CMD: drop db6.tmp.mrs.coll6_1436464556_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.635-0400 m31102| 2015-07-09T13:55:57.632-0400 I COMMAND [repl writer worker 14] CMD: drop db6.tmp.mrs.coll6_1436464556_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.636-0400 m31100| 2015-07-09T13:55:57.632-0400 I COMMAND [conn37] CMD: drop db6.tmp.mrs.coll6_1436464556_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.637-0400 m31101| 2015-07-09T13:55:57.635-0400 I COMMAND [repl writer worker 5] CMD: drop db6.map_reduce_replace_nonexistent3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.639-0400 m31102| 2015-07-09T13:55:57.637-0400 I COMMAND [repl writer worker 5] CMD: drop db6.map_reduce_replace_nonexistent3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.639-0400 m30999| 2015-07-09T13:55:57.639-0400 I COMMAND [conn43] DROP: db6.map_reduce_replace_nonexistent2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.640-0400 m30999| 2015-07-09T13:55:57.639-0400 I COMMAND [conn43] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.640-0400 m31100| 2015-07-09T13:55:57.640-0400 I COMMAND [conn59] CMD: drop db6.map_reduce_replace_nonexistent2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.641-0400 m30998| 2015-07-09T13:55:57.641-0400 I COMMAND [conn43] DROP: db6.map_reduce_replace_nonexistent1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.641-0400 m30998| 2015-07-09T13:55:57.641-0400 I COMMAND [conn43] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.641-0400 m31100| 2015-07-09T13:55:57.641-0400 I COMMAND [conn58] CMD: drop db6.map_reduce_replace_nonexistent1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.646-0400 m31200| 2015-07-09T13:55:57.646-0400 I COMMAND [conn18] CMD: drop db6.tmp.mrs.coll6_1436464556_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.662-0400 m31200| 2015-07-09T13:55:57.661-0400 I COMMAND [conn36] CMD: drop db6.tmp.mr.coll6_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.662-0400 m30999| 2015-07-09T13:55:57.662-0400 I COMMAND [conn44] DROP: db6.map_reduce_replace_nonexistent4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.662-0400 m30999| 2015-07-09T13:55:57.662-0400 I COMMAND [conn44] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.663-0400 m31100| 2015-07-09T13:55:57.662-0400 I COMMAND [conn59] CMD: drop db6.map_reduce_replace_nonexistent4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.663-0400 m31202| 2015-07-09T13:55:57.663-0400 I COMMAND [repl writer worker 4] CMD: drop db6.tmp.mrs.coll6_1436464556_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.671-0400 m31101| 2015-07-09T13:55:57.670-0400 I COMMAND [repl writer worker 13] CMD: drop db6.tmp.mrs.coll6_1436464556_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.674-0400 m31100| 2015-07-09T13:55:57.672-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.674-0400 m31100| 2015-07-09T13:55:57.672-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_12 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.674-0400 m31102| 2015-07-09T13:55:57.674-0400 I COMMAND [repl writer worker 11] CMD: drop db6.tmp.mrs.coll6_1436464556_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.677-0400 m31101| 2015-07-09T13:55:57.677-0400 I COMMAND [repl writer worker 12] CMD: drop db6.map_reduce_replace_nonexistent2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.680-0400 m31202| 2015-07-09T13:55:57.680-0400 I COMMAND [repl writer worker 15] CMD: drop db6.tmp.mrs.coll6_1436464556_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.681-0400 m31102| 2015-07-09T13:55:57.681-0400 I COMMAND [repl writer worker 0] CMD: drop db6.map_reduce_replace_nonexistent2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.681-0400 m31201| 2015-07-09T13:55:57.681-0400 I COMMAND [repl writer worker 5] CMD: drop db6.tmp.mrs.coll6_1436464556_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.682-0400 m31100| 2015-07-09T13:55:57.682-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_13 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.684-0400 m31102| 2015-07-09T13:55:57.684-0400 I COMMAND [repl writer worker 4] CMD: drop db6.map_reduce_replace_nonexistent1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.685-0400 m31100| 2015-07-09T13:55:57.684-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_14 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.685-0400 m31101| 2015-07-09T13:55:57.684-0400 I COMMAND [repl writer worker 9] CMD: drop db6.map_reduce_replace_nonexistent1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.686-0400 m31201| 2015-07-09T13:55:57.686-0400 I COMMAND [repl writer worker 0] CMD: drop db6.tmp.mrs.coll6_1436464556_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.688-0400 m31102| 2015-07-09T13:55:57.688-0400 I COMMAND [repl writer worker 6] CMD: drop db6.map_reduce_replace_nonexistent4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.689-0400 m31101| 2015-07-09T13:55:57.689-0400 I COMMAND [repl writer worker 8] CMD: drop db6.map_reduce_replace_nonexistent4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.690-0400 m31200| 2015-07-09T13:55:57.690-0400 I COMMAND [conn39] CMD: drop db6.tmp.mr.coll6_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.691-0400 m31200| 2015-07-09T13:55:57.691-0400 I COMMAND [conn40] CMD: drop db6.tmp.mr.coll6_9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:57.696-0400 m31200| 2015-07-09T13:55:57.696-0400 I COMMAND [conn33] CMD: drop db6.tmp.mr.coll6_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.160-0400 m31200| 2015-07-09T13:55:58.160-0400 I COMMAND [conn31] CMD: drop db6.tmp.mrs.coll6_1436464557_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.163-0400 m31200| 2015-07-09T13:55:58.163-0400 I COMMAND [conn31] CMD: drop db6.tmp.mr.coll6_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.164-0400 m31200| 2015-07-09T13:55:58.163-0400 I COMMAND [conn31] CMD: drop db6.tmp.mr.coll6_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.165-0400 m31200| 2015-07-09T13:55:58.164-0400 I COMMAND [conn31] CMD: drop db6.tmp.mr.coll6_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.169-0400 m31200| 2015-07-09T13:55:58.168-0400 I COMMAND [conn31] command db6.tmp.mrs.coll6_1436464557_3 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.169-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.169-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.169-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.170-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464557_3", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:210 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 714 } }, Database: { acquireCount: { r: 25, w: 66, R: 14, W: 11 }, acquireWaitCount: { w: 19, R: 11, W: 2 }, timeAcquiringMicros: { w: 190956, R: 134388, W: 3816 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 588ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.170-0400 m31200| 2015-07-09T13:55:58.169-0400 I COMMAND [conn36] CMD: drop db6.tmp.mrs.coll6_1436464557_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.179-0400 m31100| 2015-07-09T13:55:58.179-0400 I COMMAND [conn52] CMD: drop db6.tmp.mrs.coll6_1436464557_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.183-0400 m31200| 2015-07-09T13:55:58.183-0400 I COMMAND [conn36] CMD: drop db6.tmp.mr.coll6_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.184-0400 m31200| 2015-07-09T13:55:58.183-0400 I COMMAND [conn36] CMD: drop db6.tmp.mr.coll6_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.186-0400 m31100| 2015-07-09T13:55:58.185-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_10 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.186-0400 m31100| 2015-07-09T13:55:58.186-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_10 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.188-0400 m31200| 2015-07-09T13:55:58.188-0400 I COMMAND [conn36] CMD: drop db6.tmp.mr.coll6_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.190-0400 m31200| 2015-07-09T13:55:58.189-0400 I COMMAND [conn36] command db6.tmp.mrs.coll6_1436464557_2 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.190-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.190-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.190-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.191-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464557_2", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:8 reslen:210 locks:{ Global: { acquireCount: { r: 167, w: 74, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 3851, W: 444 } }, Database: { acquireCount: { r: 25, w: 66, R: 20, W: 11 }, acquireWaitCount: { r: 3, w: 8, R: 15, W: 8 }, timeAcquiringMicros: { r: 12771, w: 34180, R: 115673, W: 15549 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 541ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.197-0400 m31100| 2015-07-09T13:55:58.196-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_10 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.198-0400 m31200| 2015-07-09T13:55:58.197-0400 I COMMAND [conn40] CMD: drop db6.tmp.mrs.coll6_1436464557_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.200-0400 m31100| 2015-07-09T13:55:58.200-0400 I COMMAND [conn52] command db6.tmp.mrs.coll6_1436464557_3 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.200-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.201-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.201-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.202-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464557_3", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:3 reslen:210 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 5532, W: 62 } }, Database: { acquireCount: { r: 25, w: 66, R: 14, W: 11 }, acquireWaitCount: { w: 17, R: 11, W: 4 }, timeAcquiringMicros: { w: 193117, R: 141295, W: 10890 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 620ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.203-0400 m31100| 2015-07-09T13:55:58.201-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_15 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.203-0400 m31100| 2015-07-09T13:55:58.203-0400 I COMMAND [conn57] CMD: drop db6.tmp.mrs.coll6_1436464557_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.206-0400 m31200| 2015-07-09T13:55:58.205-0400 I COMMAND [conn40] CMD: drop db6.tmp.mr.coll6_9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.206-0400 m31200| 2015-07-09T13:55:58.206-0400 I COMMAND [conn40] CMD: drop db6.tmp.mr.coll6_9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.207-0400 m31100| 2015-07-09T13:55:58.207-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_12 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.207-0400 m31100| 2015-07-09T13:55:58.207-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_12 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.208-0400 m31200| 2015-07-09T13:55:58.208-0400 I COMMAND [conn40] CMD: drop db6.tmp.mr.coll6_9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.209-0400 m31200| 2015-07-09T13:55:58.208-0400 I COMMAND [conn33] CMD: drop db6.tmp.mrs.coll6_1436464557_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.211-0400 m31100| 2015-07-09T13:55:58.210-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_12 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.237-0400 m31200| 2015-07-09T13:55:58.236-0400 I COMMAND [conn33] CMD: drop db6.tmp.mr.coll6_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.237-0400 m31200| 2015-07-09T13:55:58.237-0400 I COMMAND [conn33] CMD: drop db6.tmp.mr.coll6_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.238-0400 m31200| 2015-07-09T13:55:58.237-0400 I COMMAND [conn40] command db6.tmp.mrs.coll6_1436464557_5 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.238-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.238-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.238-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.239-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464557_5", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:7 reslen:210 locks:{ Global: { acquireCount: { r: 165, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 1, W: 1 }, timeAcquiringMicros: { r: 20129, w: 28525, W: 441 } }, Database: { acquireCount: { r: 25, w: 66, R: 19, W: 11 }, acquireWaitCount: { r: 4, w: 5, R: 15, W: 8 }, timeAcquiringMicros: { r: 15347, w: 24151, R: 53343, W: 79648 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 559ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.239-0400 m31200| 2015-07-09T13:55:58.238-0400 I COMMAND [conn33] CMD: drop db6.tmp.mr.coll6_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.240-0400 m31200| 2015-07-09T13:55:58.239-0400 I COMMAND [conn33] command db6.tmp.mrs.coll6_1436464557_3 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.240-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.241-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.241-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.241-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464557_3", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:4 reslen:210 locks:{ Global: { acquireCount: { r: 160, w: 75, W: 3 }, acquireWaitCount: { r: 2, w: 1, W: 1 }, timeAcquiringMicros: { r: 13093, w: 14391, W: 68 } }, Database: { acquireCount: { r: 25, w: 67, R: 16, W: 11 }, acquireWaitCount: { r: 6, w: 5, R: 10, W: 9 }, timeAcquiringMicros: { r: 25789, w: 89976, R: 34897, W: 57965 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_query 565ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.241-0400 m31201| 2015-07-09T13:55:58.240-0400 I COMMAND [repl writer worker 1] CMD: drop db6.tmp.mrs.coll6_1436464557_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.242-0400 m31200| 2015-07-09T13:55:58.241-0400 I COMMAND [conn39] CMD: drop db6.tmp.mrs.coll6_1436464557_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.242-0400 m31202| 2015-07-09T13:55:58.241-0400 I COMMAND [repl writer worker 7] CMD: drop db6.tmp.mrs.coll6_1436464557_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.247-0400 m31100| 2015-07-09T13:55:58.246-0400 I COMMAND [conn57] command db6.tmp.mrs.coll6_1436464557_4 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.247-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.248-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.248-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.248-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464557_4", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:6 reslen:210 locks:{ Global: { acquireCount: { r: 161, w: 74, W: 3 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 6489, W: 265 } }, Database: { acquireCount: { r: 25, w: 66, R: 17, W: 11 }, acquireWaitCount: { r: 5, w: 8, R: 13, W: 9 }, timeAcquiringMicros: { r: 9630, w: 35292, R: 92099, W: 106238 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 578ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.249-0400 m31200| 2015-07-09T13:55:58.249-0400 I COMMAND [conn39] CMD: drop db6.tmp.mr.coll6_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.250-0400 m31200| 2015-07-09T13:55:58.249-0400 I COMMAND [conn39] CMD: drop db6.tmp.mr.coll6_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.251-0400 m31200| 2015-07-09T13:55:58.251-0400 I COMMAND [conn39] CMD: drop db6.tmp.mr.coll6_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.251-0400 m31200| 2015-07-09T13:55:58.251-0400 I COMMAND [conn39] command db6.tmp.mrs.coll6_1436464557_4 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.252-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.252-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.253-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.253-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464557_4", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:6 reslen:210 locks:{ Global: { acquireCount: { r: 163, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 3 }, timeAcquiringMicros: { r: 8779, w: 48889 } }, Database: { acquireCount: { r: 25, w: 66, R: 18, W: 11 }, acquireWaitCount: { r: 4, w: 9, R: 15, W: 5 }, timeAcquiringMicros: { r: 2379, w: 52119, R: 85940, W: 51867 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 582ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.254-0400 m31100| 2015-07-09T13:55:58.254-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_16 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.306-0400 m31100| 2015-07-09T13:55:58.305-0400 I COMMAND [conn59] CMD: drop db6.tmp.mrs.coll6_1436464557_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.312-0400 m31100| 2015-07-09T13:55:58.311-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_14 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.312-0400 m31100| 2015-07-09T13:55:58.312-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_14 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.314-0400 m31100| 2015-07-09T13:55:58.314-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_14 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.316-0400 m31100| 2015-07-09T13:55:58.316-0400 I COMMAND [conn59] command db6.tmp.mrs.coll6_1436464557_5 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.316-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.316-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.316-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.318-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464557_5", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:9 reslen:210 locks:{ Global: { acquireCount: { r: 167, w: 74, W: 3 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 6145, W: 3105 } }, Database: { acquireCount: { r: 25, w: 66, R: 20, W: 11 }, acquireWaitCount: { r: 6, w: 12, R: 13, W: 9 }, timeAcquiringMicros: { r: 38135, w: 95134, R: 46505, W: 105870 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 634ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.318-0400 m31100| 2015-07-09T13:55:58.317-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_17 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.333-0400 m31100| 2015-07-09T13:55:58.333-0400 I COMMAND [conn58] CMD: drop db6.tmp.mrs.coll6_1436464557_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.337-0400 m31100| 2015-07-09T13:55:58.337-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_13 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.338-0400 m31100| 2015-07-09T13:55:58.338-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_13 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.339-0400 m31100| 2015-07-09T13:55:58.339-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_13 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.340-0400 m31100| 2015-07-09T13:55:58.340-0400 I COMMAND [conn48] CMD: drop db6.tmp.mrs.coll6_1436464557_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.348-0400 m31100| 2015-07-09T13:55:58.348-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.348-0400 m31100| 2015-07-09T13:55:58.348-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.349-0400 m31100| 2015-07-09T13:55:58.348-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.352-0400 m31100| 2015-07-09T13:55:58.351-0400 I COMMAND [conn58] command db6.tmp.mrs.coll6_1436464557_3 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.352-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.352-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.352-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.355-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464557_3", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:9 reslen:210 locks:{ Global: { acquireCount: { r: 168, w: 75, W: 3 }, acquireWaitCount: { r: 2, w: 2, W: 1 }, timeAcquiringMicros: { r: 8332, w: 13478, W: 460 } }, Database: { acquireCount: { r: 25, w: 67, R: 20, W: 11 }, acquireWaitCount: { r: 5, w: 16, R: 15, W: 9 }, timeAcquiringMicros: { r: 38818, w: 111849, R: 73937, W: 76356 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_query 677ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.355-0400 m31100| 2015-07-09T13:55:58.352-0400 I COMMAND [conn48] command db6.tmp.mrs.coll6_1436464557_2 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.355-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.356-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.356-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.357-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464557_2", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:10 reslen:210 locks:{ Global: { acquireCount: { r: 169, w: 74, W: 3 }, acquireWaitCount: { r: 3, w: 1, W: 1 }, timeAcquiringMicros: { r: 18112, w: 4408, W: 259 } }, Database: { acquireCount: { r: 25, w: 66, R: 21, W: 11 }, acquireWaitCount: { r: 7, w: 18, R: 15, W: 7 }, timeAcquiringMicros: { r: 35121, w: 133334, R: 80911, W: 53436 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 705ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.357-0400 m31100| 2015-07-09T13:55:58.354-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_18 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.357-0400 m31100| 2015-07-09T13:55:58.355-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_19 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.363-0400 m31102| 2015-07-09T13:55:58.363-0400 I COMMAND [repl writer worker 2] CMD: drop db6.tmp.mrs.coll6_1436464557_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.363-0400 m31101| 2015-07-09T13:55:58.363-0400 I COMMAND [repl writer worker 7] CMD: drop db6.tmp.mrs.coll6_1436464557_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.397-0400 m31100| 2015-07-09T13:55:58.396-0400 I COMMAND [conn52] CMD: drop db6.map_reduce_replace_nonexistent0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.404-0400 m31100| 2015-07-09T13:55:58.404-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_15 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.405-0400 m31100| 2015-07-09T13:55:58.404-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_15 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.406-0400 m31100| 2015-07-09T13:55:58.404-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_15 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.406-0400 m31100| 2015-07-09T13:55:58.405-0400 I COMMAND [conn52] command db6.map_reduce_replace_nonexistent0 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.406-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.407-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.408-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.408-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.408-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.412-0400 m31100| }, out: { replace: "map_reduce_replace_nonexistent0" }, query: { key: { $exists: true }, value: { $exists: true } } }, inputDB: "db6", shardedOutputCollection: "tmp.mrs.coll6_1436464557_3", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll6_1436464557_3", timeMillis: 606, counts: { input: 970, emit: 970, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464558000|36, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll6_1436464557_3", timeMillis: 584, counts: { input: 1030, emit: 1030, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464558000|62, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 970, emit: 970, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1030, emit: 1030, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:247 locks:{ Global: { acquireCount: { r: 54, w: 49, W: 3 }, acquireWaitCount: { w: 4, W: 1 }, timeAcquiringMicros: { w: 23918, W: 2757 } }, Database: { acquireCount: { r: 1, w: 44, W: 7 }, acquireWaitCount: { w: 11, W: 5 }, timeAcquiringMicros: { w: 87007, W: 4915 } }, Collection: { acquireCount: { r: 1, w: 24 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 204ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.413-0400 m31100| 2015-07-09T13:55:58.405-0400 I COMMAND [conn37] CMD: drop db6.tmp.mrs.coll6_1436464557_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.413-0400 m31200| 2015-07-09T13:55:58.409-0400 I COMMAND [conn18] CMD: drop db6.tmp.mrs.coll6_1436464557_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.414-0400 m30999| 2015-07-09T13:55:58.411-0400 I COMMAND [conn42] DROP: db6.map_reduce_replace_nonexistent0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.415-0400 m30999| 2015-07-09T13:55:58.411-0400 I COMMAND [conn42] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.415-0400 m31100| 2015-07-09T13:55:58.411-0400 I COMMAND [conn52] CMD: drop db6.map_reduce_replace_nonexistent0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.415-0400 m31202| 2015-07-09T13:55:58.413-0400 I COMMAND [repl writer worker 15] CMD: drop db6.tmp.mrs.coll6_1436464557_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.419-0400 m31100| 2015-07-09T13:55:58.419-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_20 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.421-0400 m31200| 2015-07-09T13:55:58.420-0400 I COMMAND [conn31] CMD: drop db6.tmp.mr.coll6_10 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.421-0400 m31100| 2015-07-09T13:55:58.421-0400 I COMMAND [conn57] CMD: drop db6.map_reduce_replace_nonexistent2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.425-0400 m31100| 2015-07-09T13:55:58.425-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_16 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.426-0400 m31100| 2015-07-09T13:55:58.425-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_16 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.426-0400 m31100| 2015-07-09T13:55:58.425-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_16 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.430-0400 m31100| 2015-07-09T13:55:58.428-0400 I COMMAND [conn57] command db6.map_reduce_replace_nonexistent2 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.430-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.430-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.431-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.431-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.431-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.433-0400 m31100| }, out: { replace: "map_reduce_replace_nonexistent2" }, query: { key: { $exists: true }, value: { $exists: true } } }, inputDB: "db6", shardedOutputCollection: "tmp.mrs.coll6_1436464557_4", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll6_1436464557_4", timeMillis: 538, counts: { input: 970, emit: 970, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464558000|42, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll6_1436464557_4", timeMillis: 580, counts: { input: 1030, emit: 1030, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464558000|106, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 970, emit: 970, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1030, emit: 1030, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:247 locks:{ Global: { acquireCount: { r: 54, w: 49, W: 3 }, acquireWaitCount: { w: 4, W: 1 }, timeAcquiringMicros: { w: 31957, W: 43 } }, Database: { acquireCount: { r: 1, w: 44, W: 7 }, acquireWaitCount: { w: 8, W: 4 }, timeAcquiringMicros: { w: 49866, W: 4159 } }, Collection: { acquireCount: { r: 1, w: 24 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 174ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.433-0400 m31100| 2015-07-09T13:55:58.430-0400 I COMMAND [conn37] CMD: drop db6.tmp.mrs.coll6_1436464557_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.443-0400 m31102| 2015-07-09T13:55:58.442-0400 I COMMAND [repl writer worker 0] CMD: drop db6.tmp.mrs.coll6_1436464557_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.451-0400 m31200| 2015-07-09T13:55:58.451-0400 I COMMAND [conn18] CMD: drop db6.tmp.mrs.coll6_1436464557_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.456-0400 m31102| 2015-07-09T13:55:58.455-0400 I COMMAND [repl writer worker 8] CMD: drop db6.map_reduce_replace_nonexistent0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.463-0400 m30999| 2015-07-09T13:55:58.463-0400 I COMMAND [conn43] DROP: db6.map_reduce_replace_nonexistent2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.463-0400 m30999| 2015-07-09T13:55:58.463-0400 I COMMAND [conn43] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.463-0400 m31100| 2015-07-09T13:55:58.463-0400 I COMMAND [conn57] CMD: drop db6.map_reduce_replace_nonexistent2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.467-0400 m31101| 2015-07-09T13:55:58.467-0400 I COMMAND [repl writer worker 14] CMD: drop db6.tmp.mrs.coll6_1436464557_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.471-0400 m31201| 2015-07-09T13:55:58.471-0400 I COMMAND [repl writer worker 3] CMD: drop db6.tmp.mrs.coll6_1436464557_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.481-0400 m31102| 2015-07-09T13:55:58.481-0400 I COMMAND [repl writer worker 8] CMD: drop db6.tmp.mrs.coll6_1436464557_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.481-0400 m31200| 2015-07-09T13:55:58.481-0400 I COMMAND [conn39] CMD: drop db6.tmp.mr.coll6_11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.488-0400 m31101| 2015-07-09T13:55:58.488-0400 I COMMAND [repl writer worker 7] CMD: drop db6.map_reduce_replace_nonexistent0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.494-0400 m31100| 2015-07-09T13:55:58.494-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_21 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.506-0400 m31100| 2015-07-09T13:55:58.505-0400 I COMMAND [conn59] CMD: drop db6.map_reduce_replace_nonexistent4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.506-0400 m31201| 2015-07-09T13:55:58.506-0400 I COMMAND [repl writer worker 4] CMD: drop db6.tmp.mrs.coll6_1436464557_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.508-0400 m31100| 2015-07-09T13:55:58.508-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_17 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.509-0400 m31100| 2015-07-09T13:55:58.508-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_17 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.509-0400 m31100| 2015-07-09T13:55:58.508-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_17 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.509-0400 m31100| 2015-07-09T13:55:58.509-0400 I COMMAND [conn59] command db6.map_reduce_replace_nonexistent4 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.509-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.509-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.509-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.510-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.510-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.511-0400 m31100| }, out: { replace: "map_reduce_replace_nonexistent4" }, query: { key: { $exists: true }, value: { $exists: true } } }, inputDB: "db6", shardedOutputCollection: "tmp.mrs.coll6_1436464557_5", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll6_1436464557_5", timeMillis: 630, counts: { input: 970, emit: 970, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464558000|103, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll6_1436464557_5", timeMillis: 527, counts: { input: 1030, emit: 1030, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464558000|90, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 970, emit: 970, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1030, emit: 1030, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:247 locks:{ Global: { acquireCount: { r: 54, w: 49, W: 3 }, acquireWaitCount: { w: 4, W: 1 }, timeAcquiringMicros: { w: 24622, W: 5275 } }, Database: { acquireCount: { r: 1, w: 44, W: 7 }, acquireWaitCount: { w: 12, W: 3 }, timeAcquiringMicros: { w: 93015, W: 2482 } }, Collection: { acquireCount: { r: 1, w: 24 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 192ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.511-0400 m31101| 2015-07-09T13:55:58.511-0400 I COMMAND [repl writer worker 7] CMD: drop db6.tmp.mrs.coll6_1436464557_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.515-0400 m31100| 2015-07-09T13:55:58.515-0400 I COMMAND [conn37] CMD: drop db6.tmp.mrs.coll6_1436464557_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.522-0400 m31200| 2015-07-09T13:55:58.522-0400 I COMMAND [conn18] CMD: drop db6.tmp.mrs.coll6_1436464557_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.523-0400 m31102| 2015-07-09T13:55:58.522-0400 I COMMAND [repl writer worker 3] CMD: drop db6.map_reduce_replace_nonexistent2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.533-0400 m31202| 2015-07-09T13:55:58.531-0400 I COMMAND [repl writer worker 13] CMD: drop db6.tmp.mrs.coll6_1436464557_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.541-0400 m31101| 2015-07-09T13:55:58.541-0400 I COMMAND [repl writer worker 0] CMD: drop db6.map_reduce_replace_nonexistent2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.554-0400 m30999| 2015-07-09T13:55:58.553-0400 I COMMAND [conn44] DROP: db6.map_reduce_replace_nonexistent4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.554-0400 m30999| 2015-07-09T13:55:58.553-0400 I COMMAND [conn44] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.554-0400 m31100| 2015-07-09T13:55:58.553-0400 I COMMAND [conn59] CMD: drop db6.map_reduce_replace_nonexistent4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.554-0400 m31100| 2015-07-09T13:55:58.553-0400 I COMMAND [conn48] CMD: drop db6.map_reduce_replace_nonexistent3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.557-0400 m31201| 2015-07-09T13:55:58.557-0400 I COMMAND [repl writer worker 1] CMD: drop db6.tmp.mrs.coll6_1436464557_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.558-0400 m31100| 2015-07-09T13:55:58.558-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_19 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.561-0400 m31100| 2015-07-09T13:55:58.561-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_19 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.562-0400 m31102| 2015-07-09T13:55:58.561-0400 I COMMAND [repl writer worker 15] CMD: drop db6.tmp.mrs.coll6_1436464557_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.570-0400 m31100| 2015-07-09T13:55:58.569-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_19 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.572-0400 m31200| 2015-07-09T13:55:58.571-0400 I COMMAND [conn40] CMD: drop db6.tmp.mr.coll6_12 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.576-0400 m31101| 2015-07-09T13:55:58.576-0400 I COMMAND [repl writer worker 12] CMD: drop db6.tmp.mrs.coll6_1436464557_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.576-0400 m31202| 2015-07-09T13:55:58.576-0400 I COMMAND [repl writer worker 7] CMD: drop db6.tmp.mrs.coll6_1436464557_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.584-0400 m31102| 2015-07-09T13:55:58.583-0400 I COMMAND [repl writer worker 3] CMD: drop db6.map_reduce_replace_nonexistent4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.592-0400 m31101| 2015-07-09T13:55:58.592-0400 I COMMAND [repl writer worker 0] CMD: drop db6.map_reduce_replace_nonexistent4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.594-0400 m31100| 2015-07-09T13:55:58.592-0400 I COMMAND [conn48] command db6.map_reduce_replace_nonexistent3 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.595-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.595-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.595-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.595-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.596-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.597-0400 m31100| }, out: { replace: "map_reduce_replace_nonexistent3" }, query: { key: { $exists: true }, value: { $exists: true } } }, inputDB: "db6", shardedOutputCollection: "tmp.mrs.coll6_1436464557_2", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll6_1436464557_2", timeMillis: 701, counts: { input: 970, emit: 970, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464558000|137, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll6_1436464557_2", timeMillis: 535, counts: { input: 1030, emit: 1030, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464558000|68, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 970, emit: 970, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1030, emit: 1030, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:247 locks:{ Global: { acquireCount: { r: 54, w: 49, W: 3 }, acquireWaitCount: { w: 3, W: 1 }, timeAcquiringMicros: { w: 21447, W: 33466 } }, Database: { acquireCount: { r: 1, w: 44, W: 7 }, acquireWaitCount: { w: 11, W: 4 }, timeAcquiringMicros: { w: 61077, W: 42012 } }, Collection: { acquireCount: { r: 1, w: 24 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 239ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.597-0400 m31100| 2015-07-09T13:55:58.593-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_22 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.598-0400 m31100| 2015-07-09T13:55:58.593-0400 I COMMAND [conn32] CMD: drop db6.tmp.mrs.coll6_1436464557_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.608-0400 m31100| 2015-07-09T13:55:58.607-0400 I COMMAND [conn58] CMD: drop db6.map_reduce_replace_nonexistent1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.614-0400 m31100| 2015-07-09T13:55:58.611-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_18 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.614-0400 m31100| 2015-07-09T13:55:58.611-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_18 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.637-0400 m31100| 2015-07-09T13:55:58.636-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_18 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.642-0400 m31200| 2015-07-09T13:55:58.640-0400 I COMMAND [conn34] CMD: drop db6.tmp.mrs.coll6_1436464557_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.642-0400 m31101| 2015-07-09T13:55:58.641-0400 I COMMAND [repl writer worker 2] CMD: drop db6.tmp.mrs.coll6_1436464557_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.642-0400 m31100| 2015-07-09T13:55:58.641-0400 I COMMAND [conn58] command db6.map_reduce_replace_nonexistent1 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.643-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.643-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.643-0400 m31102| 2015-07-09T13:55:58.642-0400 I COMMAND [repl writer worker 6] CMD: drop db6.tmp.mrs.coll6_1436464557_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.643-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.643-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.644-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.645-0400 m31100| }, out: { replace: "map_reduce_replace_nonexistent1" }, query: { key: { $exists: true }, value: { $exists: true } } }, inputDB: "db6", shardedOutputCollection: "tmp.mrs.coll6_1436464557_3", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll6_1436464557_3", timeMillis: 663, counts: { input: 970, emit: 970, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464558000|132, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll6_1436464557_3", timeMillis: 562, counts: { input: 1030, emit: 1030, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464558000|93, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 970, emit: 970, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1030, emit: 1030, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:247 locks:{ Global: { acquireCount: { r: 54, w: 49, W: 3 }, acquireWaitCount: { w: 5, W: 1 }, timeAcquiringMicros: { w: 35521, W: 14372 } }, Database: { acquireCount: { r: 1, w: 44, W: 7 }, acquireWaitCount: { w: 15, W: 2 }, timeAcquiringMicros: { w: 146993, W: 29682 } }, Collection: { acquireCount: { r: 1, w: 24 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 288ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.646-0400 m31100| 2015-07-09T13:55:58.642-0400 I COMMAND [conn32] CMD: drop db6.tmp.mrs.coll6_1436464557_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.654-0400 m31200| 2015-07-09T13:55:58.653-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62773 #47 (43 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.655-0400 m31200| 2015-07-09T13:55:58.654-0400 I COMMAND [conn47] CMD: drop db6.tmp.mrs.coll6_1436464557_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.659-0400 m30998| 2015-07-09T13:55:58.658-0400 I COMMAND [conn42] DROP: db6.map_reduce_replace_nonexistent3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.659-0400 m30998| 2015-07-09T13:55:58.659-0400 I COMMAND [conn42] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.660-0400 m31100| 2015-07-09T13:55:58.659-0400 I COMMAND [conn48] CMD: drop db6.map_reduce_replace_nonexistent3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.660-0400 m31201| 2015-07-09T13:55:58.660-0400 I COMMAND [repl writer worker 10] CMD: drop db6.tmp.mrs.coll6_1436464557_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.661-0400 m31202| 2015-07-09T13:55:58.660-0400 I COMMAND [repl writer worker 0] CMD: drop db6.tmp.mrs.coll6_1436464557_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.668-0400 m30998| 2015-07-09T13:55:58.668-0400 I COMMAND [conn43] DROP: db6.map_reduce_replace_nonexistent1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.668-0400 m30998| 2015-07-09T13:55:58.668-0400 I COMMAND [conn43] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.668-0400 m31100| 2015-07-09T13:55:58.668-0400 I COMMAND [conn58] CMD: drop db6.map_reduce_replace_nonexistent1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.688-0400 m31101| 2015-07-09T13:55:58.687-0400 I COMMAND [repl writer worker 13] CMD: drop db6.map_reduce_replace_nonexistent3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.689-0400 m31102| 2015-07-09T13:55:58.688-0400 I COMMAND [repl writer worker 13] CMD: drop db6.map_reduce_replace_nonexistent3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.692-0400 m31200| 2015-07-09T13:55:58.690-0400 I COMMAND [conn33] CMD: drop db6.tmp.mr.coll6_13 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.692-0400 m31100| 2015-07-09T13:55:58.692-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_24 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.692-0400 m31100| 2015-07-09T13:55:58.692-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.698-0400 m31200| 2015-07-09T13:55:58.696-0400 I COMMAND [conn36] CMD: drop db6.tmp.mr.coll6_14 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.702-0400 m31101| 2015-07-09T13:55:58.701-0400 I COMMAND [repl writer worker 7] CMD: drop db6.map_reduce_replace_nonexistent1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.704-0400 m31102| 2015-07-09T13:55:58.703-0400 I COMMAND [repl writer worker 8] CMD: drop db6.map_reduce_replace_nonexistent1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.885-0400 m31100| 2015-07-09T13:55:58.885-0400 I COMMAND [conn52] CMD: drop db6.tmp.mrs.coll6_1436464558_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.890-0400 m31100| 2015-07-09T13:55:58.890-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_20 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.890-0400 m31100| 2015-07-09T13:55:58.890-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_20 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.891-0400 m31100| 2015-07-09T13:55:58.891-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_20 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.901-0400 m31100| 2015-07-09T13:55:58.900-0400 I COMMAND [conn52] command db6.tmp.mrs.coll6_1436464558_6 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.901-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.902-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.902-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.903-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464558_6", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:210 locks:{ Global: { acquireCount: { r: 149, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 1, W: 1 }, timeAcquiringMicros: { r: 23343, w: 4226, W: 35 } }, Database: { acquireCount: { r: 25, w: 66, R: 11, W: 11 }, acquireWaitCount: { w: 21, R: 9, W: 5 }, timeAcquiringMicros: { w: 232394, R: 44815, W: 14074 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 482ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.923-0400 m31200| 2015-07-09T13:55:58.923-0400 I COMMAND [conn39] CMD: drop db6.tmp.mrs.coll6_1436464558_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.928-0400 m31200| 2015-07-09T13:55:58.928-0400 I COMMAND [conn39] CMD: drop db6.tmp.mr.coll6_11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.929-0400 m31200| 2015-07-09T13:55:58.928-0400 I COMMAND [conn39] CMD: drop db6.tmp.mr.coll6_11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.931-0400 m31200| 2015-07-09T13:55:58.931-0400 I COMMAND [conn39] CMD: drop db6.tmp.mr.coll6_11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.932-0400 m31200| 2015-07-09T13:55:58.931-0400 I COMMAND [conn31] CMD: drop db6.tmp.mrs.coll6_1436464558_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.939-0400 m31200| 2015-07-09T13:55:58.938-0400 I COMMAND [conn31] CMD: drop db6.tmp.mr.coll6_10 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.939-0400 m31200| 2015-07-09T13:55:58.939-0400 I COMMAND [conn31] CMD: drop db6.tmp.mr.coll6_10 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.941-0400 m31200| 2015-07-09T13:55:58.940-0400 I COMMAND [conn39] command db6.tmp.mrs.coll6_1436464558_7 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.941-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.941-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.941-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.941-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464558_7", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:210 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 7701, W: 2096 } }, Database: { acquireCount: { r: 25, w: 66, R: 13, W: 11 }, acquireWaitCount: { w: 28, R: 10, W: 5 }, timeAcquiringMicros: { w: 165178, R: 71799, W: 8260 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 459ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.944-0400 m31200| 2015-07-09T13:55:58.944-0400 I COMMAND [conn31] CMD: drop db6.tmp.mr.coll6_10 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.950-0400 m31100| 2015-07-09T13:55:58.950-0400 I COMMAND [conn57] CMD: drop db6.tmp.mrs.coll6_1436464558_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.954-0400 m31200| 2015-07-09T13:55:58.954-0400 I COMMAND [conn31] command db6.tmp.mrs.coll6_1436464558_6 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.955-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.955-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.955-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.956-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464558_6", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:210 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 5165, W: 1340 } }, Database: { acquireCount: { r: 25, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 1, w: 33, R: 8, W: 4 }, timeAcquiringMicros: { r: 478, w: 271069, R: 51653, W: 11278 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 536ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.956-0400 m31100| 2015-07-09T13:55:58.955-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_25 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.956-0400 m31100| 2015-07-09T13:55:58.956-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_21 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.956-0400 m31100| 2015-07-09T13:55:58.956-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_21 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.957-0400 m31100| 2015-07-09T13:55:58.957-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_21 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.961-0400 m31200| 2015-07-09T13:55:58.960-0400 I COMMAND [conn40] CMD: drop db6.tmp.mrs.coll6_1436464558_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.966-0400 m31200| 2015-07-09T13:55:58.966-0400 I COMMAND [conn40] CMD: drop db6.tmp.mr.coll6_12 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.966-0400 m31200| 2015-07-09T13:55:58.966-0400 I COMMAND [conn40] CMD: drop db6.tmp.mr.coll6_12 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.970-0400 m31200| 2015-07-09T13:55:58.969-0400 I COMMAND [conn40] CMD: drop db6.tmp.mr.coll6_12 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.973-0400 m31200| 2015-07-09T13:55:58.973-0400 I COMMAND [conn40] command db6.tmp.mrs.coll6_1436464558_8 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.973-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.974-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.974-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.974-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464558_8", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:210 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 2, W: 1 }, timeAcquiringMicros: { r: 13042, W: 818 } }, Database: { acquireCount: { r: 25, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 3, w: 20, R: 11, W: 6 }, timeAcquiringMicros: { r: 9544, w: 99088, R: 55271, W: 18843 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 402ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.978-0400 m31200| 2015-07-09T13:55:58.977-0400 I COMMAND [conn33] CMD: drop db6.tmp.mrs.coll6_1436464558_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.982-0400 m31100| 2015-07-09T13:55:58.982-0400 I COMMAND [conn57] command db6.tmp.mrs.coll6_1436464558_7 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.982-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.983-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.983-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.983-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464558_7", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:3 reslen:210 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 2, W: 1 }, timeAcquiringMicros: { r: 10373, w: 11952, W: 40 } }, Database: { acquireCount: { r: 25, w: 66, R: 14, W: 11 }, acquireWaitCount: { r: 2, w: 9, R: 14, W: 6 }, timeAcquiringMicros: { r: 3203, w: 82192, R: 109154, W: 48427 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 501ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.985-0400 m31200| 2015-07-09T13:55:58.984-0400 I COMMAND [conn33] CMD: drop db6.tmp.mr.coll6_13 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.985-0400 m31200| 2015-07-09T13:55:58.984-0400 I COMMAND [conn33] CMD: drop db6.tmp.mr.coll6_13 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.986-0400 m31200| 2015-07-09T13:55:58.986-0400 I COMMAND [conn33] CMD: drop db6.tmp.mr.coll6_13 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.987-0400 m31200| 2015-07-09T13:55:58.987-0400 I COMMAND [conn33] command db6.tmp.mrs.coll6_1436464558_4 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.987-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.987-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.987-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.989-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464558_4", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:210 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 2, W: 1 }, timeAcquiringMicros: { r: 5818, w: 13493, W: 511 } }, Database: { acquireCount: { r: 25, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 3, w: 13, R: 12, W: 7 }, timeAcquiringMicros: { r: 2114, w: 33500, R: 47013, W: 35573 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 298ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.989-0400 m31100| 2015-07-09T13:55:58.989-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_26 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.989-0400 m31200| 2015-07-09T13:55:58.989-0400 I COMMAND [conn36] CMD: drop db6.tmp.mrs.coll6_1436464558_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.996-0400 m31200| 2015-07-09T13:55:58.995-0400 I COMMAND [conn36] CMD: drop db6.tmp.mr.coll6_14 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:58.997-0400 m31200| 2015-07-09T13:55:58.995-0400 I COMMAND [conn36] CMD: drop db6.tmp.mr.coll6_14 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.001-0400 m31200| 2015-07-09T13:55:59.000-0400 I COMMAND [conn36] CMD: drop db6.tmp.mr.coll6_14 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.002-0400 m31200| 2015-07-09T13:55:59.000-0400 I COMMAND [conn36] command db6.tmp.mrs.coll6_1436464558_5 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.002-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.003-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.003-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.004-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464558_5", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:210 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 2 }, timeAcquiringMicros: { r: 14393, w: 12029 } }, Database: { acquireCount: { r: 25, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 5, w: 14, R: 12, W: 5 }, timeAcquiringMicros: { r: 6851, w: 34090, R: 24522, W: 48819 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 308ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.033-0400 m31100| 2015-07-09T13:55:59.033-0400 I COMMAND [conn59] CMD: drop db6.tmp.mrs.coll6_1436464558_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.037-0400 m31100| 2015-07-09T13:55:59.037-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_22 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.038-0400 m31100| 2015-07-09T13:55:59.038-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_22 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.042-0400 m31100| 2015-07-09T13:55:59.041-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_22 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.044-0400 m31100| 2015-07-09T13:55:59.044-0400 I COMMAND [conn59] command db6.tmp.mrs.coll6_1436464558_8 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.045-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.045-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.045-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.045-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464558_8", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:5 reslen:210 locks:{ Global: { acquireCount: { r: 159, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 1 }, timeAcquiringMicros: { r: 6458, w: 18000 } }, Database: { acquireCount: { r: 25, w: 66, R: 16, W: 11 }, acquireWaitCount: { r: 5, w: 7, R: 16, W: 6 }, timeAcquiringMicros: { r: 70573, w: 26951, R: 91382, W: 21347 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 473ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.045-0400 m31100| 2015-07-09T13:55:59.044-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_27 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.089-0400 m31100| 2015-07-09T13:55:59.089-0400 I COMMAND [conn52] CMD: drop db6.map_reduce_replace_nonexistent0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.096-0400 m31100| 2015-07-09T13:55:59.095-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_25 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.096-0400 m31100| 2015-07-09T13:55:59.096-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_25 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.096-0400 m31100| 2015-07-09T13:55:59.096-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_25 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.097-0400 m31100| 2015-07-09T13:55:59.096-0400 I COMMAND [conn48] CMD: drop db6.tmp.mrs.coll6_1436464558_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.106-0400 m31100| 2015-07-09T13:55:59.106-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.107-0400 m31100| 2015-07-09T13:55:59.106-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.108-0400 m31100| 2015-07-09T13:55:59.107-0400 I COMMAND [conn57] CMD: drop db6.map_reduce_replace_nonexistent2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.113-0400 m31100| 2015-07-09T13:55:59.113-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_26 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.114-0400 m31100| 2015-07-09T13:55:59.113-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_26 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.114-0400 m31100| 2015-07-09T13:55:59.113-0400 I COMMAND [conn58] CMD: drop db6.tmp.mrs.coll6_1436464558_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.118-0400 m31100| 2015-07-09T13:55:59.118-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_24 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.119-0400 m31100| 2015-07-09T13:55:59.118-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_24 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.119-0400 m31100| 2015-07-09T13:55:59.119-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_26 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.119-0400 m31100| 2015-07-09T13:55:59.119-0400 I COMMAND [conn52] command db6.map_reduce_replace_nonexistent0 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.120-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.120-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.120-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.121-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.121-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.125-0400 m31100| }, out: { replace: "map_reduce_replace_nonexistent0" }, query: { key: { $exists: true }, value: { $exists: true } } }, inputDB: "db6", shardedOutputCollection: "tmp.mrs.coll6_1436464558_6", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll6_1436464558_6", timeMillis: 472, counts: { input: 970, emit: 970, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464558000|258, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll6_1436464558_6", timeMillis: 520, counts: { input: 1030, emit: 1030, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464558000|168, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 970, emit: 970, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1030, emit: 1030, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:247 locks:{ Global: { acquireCount: { r: 54, w: 49, W: 3 }, acquireWaitCount: { w: 3, W: 1 }, timeAcquiringMicros: { w: 27982, W: 278 } }, Database: { acquireCount: { r: 1, w: 44, W: 7 }, acquireWaitCount: { w: 9, W: 4 }, timeAcquiringMicros: { w: 61475, W: 7892 } }, Collection: { acquireCount: { r: 1, w: 24 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 164ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.125-0400 m31100| 2015-07-09T13:55:59.120-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.126-0400 m31100| 2015-07-09T13:55:59.120-0400 I COMMAND [conn37] CMD: drop db6.tmp.mrs.coll6_1436464558_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.127-0400 m31100| 2015-07-09T13:55:59.120-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_24 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.129-0400 m31100| 2015-07-09T13:55:59.121-0400 I COMMAND [conn57] command db6.map_reduce_replace_nonexistent2 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.129-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.129-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.131-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.131-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.131-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.134-0400 m31100| }, out: { replace: "map_reduce_replace_nonexistent2" }, query: { key: { $exists: true }, value: { $exists: true } } }, inputDB: "db6", shardedOutputCollection: "tmp.mrs.coll6_1436464558_7", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll6_1436464558_7", timeMillis: 475, counts: { input: 970, emit: 970, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464558000|274, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll6_1436464558_7", timeMillis: 447, counts: { input: 1030, emit: 1030, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464558000|164, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 970, emit: 970, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1030, emit: 1030, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:247 locks:{ Global: { acquireCount: { r: 54, w: 49, W: 3 }, acquireWaitCount: { w: 3, W: 1 }, timeAcquiringMicros: { w: 16350, W: 10314 } }, Database: { acquireCount: { r: 1, w: 44, W: 7 }, acquireWaitCount: { w: 8, W: 5 }, timeAcquiringMicros: { w: 22963, W: 8999 } }, Collection: { acquireCount: { r: 1, w: 24 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 136ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.134-0400 m31100| 2015-07-09T13:55:59.122-0400 I COMMAND [conn15] CMD: drop db6.tmp.mrs.coll6_1436464558_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.134-0400 m31200| 2015-07-09T13:55:59.123-0400 I COMMAND [conn18] CMD: drop db6.tmp.mrs.coll6_1436464558_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.134-0400 m31100| 2015-07-09T13:55:59.123-0400 I COMMAND [conn48] command db6.tmp.mrs.coll6_1436464558_4 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.134-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.134-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.134-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.135-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464558_4", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:5 reslen:210 locks:{ Global: { acquireCount: { r: 159, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 2, W: 1 }, timeAcquiringMicros: { r: 10021, w: 13223, W: 274 } }, Database: { acquireCount: { r: 25, w: 66, R: 16, W: 11 }, acquireWaitCount: { w: 10, R: 16, W: 9 }, timeAcquiringMicros: { w: 51789, R: 48236, W: 34137 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 435ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.135-0400 m31100| 2015-07-09T13:55:59.125-0400 I COMMAND [conn58] command db6.tmp.mrs.coll6_1436464558_5 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.136-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.136-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.136-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.137-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464558_5", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:3 reslen:210 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { r: 2, W: 1 }, timeAcquiringMicros: { r: 12586, W: 16978 } }, Database: { acquireCount: { r: 25, w: 66, R: 14, W: 11 }, acquireWaitCount: { w: 10, R: 14, W: 9 }, timeAcquiringMicros: { w: 54258, R: 38783, W: 67966 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 433ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.137-0400 m31200| 2015-07-09T13:55:59.126-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62774 #48 (44 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.137-0400 m30999| 2015-07-09T13:55:59.126-0400 I COMMAND [conn42] DROP: db6.map_reduce_replace_nonexistent0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.137-0400 m30999| 2015-07-09T13:55:59.126-0400 I COMMAND [conn42] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.138-0400 m31100| 2015-07-09T13:55:59.126-0400 I COMMAND [conn52] CMD: drop db6.map_reduce_replace_nonexistent0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.138-0400 m31100| 2015-07-09T13:55:59.127-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_28 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.138-0400 m31202| 2015-07-09T13:55:59.127-0400 I COMMAND [repl writer worker 13] CMD: drop db6.tmp.mrs.coll6_1436464558_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.138-0400 m31201| 2015-07-09T13:55:59.127-0400 I COMMAND [repl writer worker 15] CMD: drop db6.tmp.mrs.coll6_1436464558_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.138-0400 m31100| 2015-07-09T13:55:59.128-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_29 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.139-0400 m31200| 2015-07-09T13:55:59.129-0400 I COMMAND [conn48] CMD: drop db6.tmp.mrs.coll6_1436464558_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.140-0400 m31102| 2015-07-09T13:55:59.140-0400 I COMMAND [repl writer worker 7] CMD: drop db6.tmp.mrs.coll6_1436464558_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.143-0400 m31102| 2015-07-09T13:55:59.143-0400 I COMMAND [repl writer worker 12] CMD: drop db6.tmp.mrs.coll6_1436464558_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.147-0400 m31200| 2015-07-09T13:55:59.146-0400 I COMMAND [conn31] CMD: drop db6.tmp.mr.coll6_15 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.148-0400 m31202| 2015-07-09T13:55:59.148-0400 I COMMAND [repl writer worker 3] CMD: drop db6.tmp.mrs.coll6_1436464558_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.149-0400 m30999| 2015-07-09T13:55:59.148-0400 I COMMAND [conn43] DROP: db6.map_reduce_replace_nonexistent2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.149-0400 m30999| 2015-07-09T13:55:59.148-0400 I COMMAND [conn43] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.149-0400 m31201| 2015-07-09T13:55:59.149-0400 I COMMAND [repl writer worker 0] CMD: drop db6.tmp.mrs.coll6_1436464558_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.150-0400 m31100| 2015-07-09T13:55:59.149-0400 I COMMAND [conn57] CMD: drop db6.map_reduce_replace_nonexistent2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.155-0400 m31101| 2015-07-09T13:55:59.153-0400 I COMMAND [repl writer worker 10] CMD: drop db6.tmp.mrs.coll6_1436464558_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.156-0400 m31100| 2015-07-09T13:55:59.155-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_30 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.157-0400 m31102| 2015-07-09T13:55:59.156-0400 I COMMAND [repl writer worker 4] CMD: drop db6.map_reduce_replace_nonexistent0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.157-0400 m31101| 2015-07-09T13:55:59.157-0400 I COMMAND [repl writer worker 6] CMD: drop db6.tmp.mrs.coll6_1436464558_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.164-0400 m31101| 2015-07-09T13:55:59.164-0400 I COMMAND [repl writer worker 13] CMD: drop db6.map_reduce_replace_nonexistent0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.192-0400 m31200| 2015-07-09T13:55:59.191-0400 I COMMAND [conn39] CMD: drop db6.tmp.mr.coll6_16 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.194-0400 m31102| 2015-07-09T13:55:59.193-0400 I COMMAND [repl writer worker 5] CMD: drop db6.map_reduce_replace_nonexistent2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.195-0400 m31100| 2015-07-09T13:55:59.195-0400 I COMMAND [conn59] CMD: drop db6.map_reduce_replace_nonexistent4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.200-0400 m31101| 2015-07-09T13:55:59.199-0400 I COMMAND [repl writer worker 1] CMD: drop db6.map_reduce_replace_nonexistent2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.203-0400 m31100| 2015-07-09T13:55:59.203-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_27 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.203-0400 m31100| 2015-07-09T13:55:59.203-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_27 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.206-0400 m31100| 2015-07-09T13:55:59.205-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_27 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.206-0400 m31100| 2015-07-09T13:55:59.206-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_31 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.210-0400 m31100| 2015-07-09T13:55:59.209-0400 I COMMAND [conn59] command db6.map_reduce_replace_nonexistent4 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.210-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.210-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.210-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.211-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.211-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.213-0400 m31100| }, out: { replace: "map_reduce_replace_nonexistent4" }, query: { key: { $exists: true }, value: { $exists: true } } }, inputDB: "db6", shardedOutputCollection: "tmp.mrs.coll6_1436464558_8", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll6_1436464558_8", timeMillis: 467, counts: { input: 970, emit: 970, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464559000|22, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll6_1436464558_8", timeMillis: 395, counts: { input: 1030, emit: 1030, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464558000|184, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 970, emit: 970, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1030, emit: 1030, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:247 locks:{ Global: { acquireCount: { r: 54, w: 49, W: 3 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 28439, W: 7135 } }, Database: { acquireCount: { r: 1, w: 44, W: 7 }, acquireWaitCount: { w: 7, W: 5 }, timeAcquiringMicros: { w: 53225, W: 6802 } }, Collection: { acquireCount: { r: 1, w: 24 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 164ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.213-0400 m31100| 2015-07-09T13:55:59.210-0400 I COMMAND [conn15] CMD: drop db6.tmp.mrs.coll6_1436464558_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.233-0400 m31200| 2015-07-09T13:55:59.232-0400 I COMMAND [conn48] CMD: drop db6.tmp.mrs.coll6_1436464558_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.233-0400 m31101| 2015-07-09T13:55:59.233-0400 I COMMAND [repl writer worker 2] CMD: drop db6.tmp.mrs.coll6_1436464558_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.237-0400 m31102| 2015-07-09T13:55:59.236-0400 I COMMAND [repl writer worker 11] CMD: drop db6.tmp.mrs.coll6_1436464558_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.240-0400 m30999| 2015-07-09T13:55:59.239-0400 I COMMAND [conn44] DROP: db6.map_reduce_replace_nonexistent4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.240-0400 m30999| 2015-07-09T13:55:59.239-0400 I COMMAND [conn44] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.240-0400 m31100| 2015-07-09T13:55:59.240-0400 I COMMAND [conn59] CMD: drop db6.map_reduce_replace_nonexistent4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.241-0400 m31201| 2015-07-09T13:55:59.240-0400 I COMMAND [repl writer worker 5] CMD: drop db6.tmp.mrs.coll6_1436464558_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.241-0400 m31202| 2015-07-09T13:55:59.240-0400 I COMMAND [repl writer worker 5] CMD: drop db6.tmp.mrs.coll6_1436464558_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.267-0400 m31200| 2015-07-09T13:55:59.267-0400 I COMMAND [conn40] CMD: drop db6.tmp.mr.coll6_17 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.277-0400 m31100| 2015-07-09T13:55:59.277-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_32 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.286-0400 m31101| 2015-07-09T13:55:59.285-0400 I COMMAND [repl writer worker 1] CMD: drop db6.map_reduce_replace_nonexistent4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.289-0400 m31102| 2015-07-09T13:55:59.288-0400 I COMMAND [repl writer worker 5] CMD: drop db6.map_reduce_replace_nonexistent4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.447-0400 m31100| 2015-07-09T13:55:59.447-0400 I COMMAND [conn48] CMD: drop db6.map_reduce_replace_nonexistent3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.454-0400 m31100| 2015-07-09T13:55:59.453-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_29 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.455-0400 m31100| 2015-07-09T13:55:59.454-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_29 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.460-0400 m31100| 2015-07-09T13:55:59.460-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_29 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.464-0400 m31200| 2015-07-09T13:55:59.463-0400 I COMMAND [conn31] CMD: drop db6.tmp.mrs.coll6_1436464559_9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.469-0400 m31200| 2015-07-09T13:55:59.467-0400 I COMMAND [conn31] CMD: drop db6.tmp.mr.coll6_15 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.469-0400 m31200| 2015-07-09T13:55:59.467-0400 I COMMAND [conn31] CMD: drop db6.tmp.mr.coll6_15 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.470-0400 m31200| 2015-07-09T13:55:59.469-0400 I COMMAND [conn31] CMD: drop db6.tmp.mr.coll6_15 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.471-0400 m31200| 2015-07-09T13:55:59.471-0400 I COMMAND [conn31] command db6.tmp.mrs.coll6_1436464559_9 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.472-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.472-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.472-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.473-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464559_9", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:210 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 98 } }, Database: { acquireCount: { r: 25, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 1, w: 13, R: 10, W: 3 }, timeAcquiringMicros: { r: 15084, w: 86332, R: 28752, W: 1880 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 340ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.475-0400 m31200| 2015-07-09T13:55:59.475-0400 I COMMAND [conn39] CMD: drop db6.tmp.mrs.coll6_1436464559_10 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.476-0400 m31100| 2015-07-09T13:55:59.475-0400 I COMMAND [conn48] command db6.map_reduce_replace_nonexistent3 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.476-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.477-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.477-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.478-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.478-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.479-0400 m31100| }, out: { replace: "map_reduce_replace_nonexistent3" }, query: { key: { $exists: true }, value: { $exists: true } } }, inputDB: "db6", shardedOutputCollection: "tmp.mrs.coll6_1436464558_4", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll6_1436464558_4", timeMillis: 419, counts: { input: 970, emit: 970, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464559000|112, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll6_1436464558_4", timeMillis: 296, counts: { input: 1030, emit: 1030, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464558000|209, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 970, emit: 970, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1030, emit: 1030, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:247 locks:{ Global: { acquireCount: { r: 54, w: 49, W: 3 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 10984, W: 6477 } }, Database: { acquireCount: { r: 1, w: 44, W: 7 }, acquireWaitCount: { w: 19, W: 5 }, timeAcquiringMicros: { w: 214676, W: 26376 } }, Collection: { acquireCount: { r: 1, w: 24 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 348ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.480-0400 m31100| 2015-07-09T13:55:59.477-0400 I COMMAND [conn32] CMD: drop db6.tmp.mrs.coll6_1436464558_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.480-0400 m31200| 2015-07-09T13:55:59.480-0400 I COMMAND [conn39] CMD: drop db6.tmp.mr.coll6_16 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.481-0400 m31200| 2015-07-09T13:55:59.480-0400 I COMMAND [conn39] CMD: drop db6.tmp.mr.coll6_16 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.482-0400 m31200| 2015-07-09T13:55:59.481-0400 I COMMAND [conn47] CMD: drop db6.tmp.mrs.coll6_1436464558_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.485-0400 m31200| 2015-07-09T13:55:59.484-0400 I COMMAND [conn39] CMD: drop db6.tmp.mr.coll6_16 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.488-0400 m31101| 2015-07-09T13:55:59.488-0400 I COMMAND [repl writer worker 9] CMD: drop db6.tmp.mrs.coll6_1436464558_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.490-0400 m31200| 2015-07-09T13:55:59.490-0400 I COMMAND [conn39] command db6.tmp.mrs.coll6_1436464559_10 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.491-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.491-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.491-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.491-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464559_10", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:211 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 4592 } }, Database: { acquireCount: { r: 25, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 2, w: 11, R: 10, W: 5 }, timeAcquiringMicros: { r: 2490, w: 55739, R: 39188, W: 21278 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 301ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.492-0400 m30998| 2015-07-09T13:55:59.490-0400 I COMMAND [conn42] DROP: db6.map_reduce_replace_nonexistent3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.492-0400 m30998| 2015-07-09T13:55:59.490-0400 I COMMAND [conn42] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.492-0400 m31100| 2015-07-09T13:55:59.490-0400 I COMMAND [conn48] CMD: drop db6.map_reduce_replace_nonexistent3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.492-0400 m31202| 2015-07-09T13:55:59.491-0400 I COMMAND [repl writer worker 6] CMD: drop db6.tmp.mrs.coll6_1436464558_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.493-0400 m31100| 2015-07-09T13:55:59.491-0400 I COMMAND [conn58] CMD: drop db6.map_reduce_replace_nonexistent1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.493-0400 m31102| 2015-07-09T13:55:59.491-0400 I COMMAND [repl writer worker 10] CMD: drop db6.tmp.mrs.coll6_1436464558_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.494-0400 m31201| 2015-07-09T13:55:59.494-0400 I COMMAND [repl writer worker 13] CMD: drop db6.tmp.mrs.coll6_1436464558_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.495-0400 m31200| 2015-07-09T13:55:59.495-0400 I COMMAND [conn40] CMD: drop db6.tmp.mrs.coll6_1436464559_11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.497-0400 m31100| 2015-07-09T13:55:59.497-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_28 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.497-0400 m31100| 2015-07-09T13:55:59.497-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_28 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.500-0400 m31100| 2015-07-09T13:55:59.500-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_28 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.501-0400 m31200| 2015-07-09T13:55:59.501-0400 I COMMAND [conn40] CMD: drop db6.tmp.mr.coll6_17 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.502-0400 m31100| 2015-07-09T13:55:59.501-0400 I COMMAND [conn58] command db6.map_reduce_replace_nonexistent1 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.502-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.502-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.502-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.502-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.502-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.503-0400 m31100| }, out: { replace: "map_reduce_replace_nonexistent1" }, query: { key: { $exists: true }, value: { $exists: true } } }, inputDB: "db6", shardedOutputCollection: "tmp.mrs.coll6_1436464558_5", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll6_1436464558_5", timeMillis: 426, counts: { input: 970, emit: 970, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464559000|114, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll6_1436464558_5", timeMillis: 303, counts: { input: 1030, emit: 1030, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464558000|220, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 970, emit: 970, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1030, emit: 1030, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:247 locks:{ Global: { acquireCount: { r: 54, w: 49, W: 3 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 7959, W: 11469 } }, Database: { acquireCount: { r: 1, w: 44, W: 7 }, acquireWaitCount: { w: 21, W: 5 }, timeAcquiringMicros: { w: 208648, W: 6116 } }, Collection: { acquireCount: { r: 1, w: 24 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 374ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.503-0400 m31200| 2015-07-09T13:55:59.501-0400 I COMMAND [conn40] CMD: drop db6.tmp.mr.coll6_17 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.504-0400 m31100| 2015-07-09T13:55:59.502-0400 I COMMAND [conn32] CMD: drop db6.tmp.mrs.coll6_1436464558_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.505-0400 m31200| 2015-07-09T13:55:59.504-0400 I COMMAND [conn40] CMD: drop db6.tmp.mr.coll6_17 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.505-0400 m31200| 2015-07-09T13:55:59.505-0400 I COMMAND [conn40] command db6.tmp.mrs.coll6_1436464559_11 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.505-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.505-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.506-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.506-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464559_11", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:211 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { w: 2 }, timeAcquiringMicros: { w: 8291 } }, Database: { acquireCount: { r: 25, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 1, w: 7, R: 12, W: 5 }, timeAcquiringMicros: { r: 5160, w: 24642, R: 9851, W: 62158 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 238ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.506-0400 m31200| 2015-07-09T13:55:59.506-0400 I COMMAND [conn33] CMD: drop db6.tmp.mr.coll6_18 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.508-0400 m31200| 2015-07-09T13:55:59.508-0400 I COMMAND [conn47] CMD: drop db6.tmp.mrs.coll6_1436464558_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.510-0400 m31100| 2015-07-09T13:55:59.510-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_33 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.510-0400 m31101| 2015-07-09T13:55:59.510-0400 I COMMAND [repl writer worker 0] CMD: drop db6.map_reduce_replace_nonexistent3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.512-0400 m31101| 2015-07-09T13:55:59.511-0400 I COMMAND [repl writer worker 4] CMD: drop db6.tmp.mrs.coll6_1436464558_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.517-0400 m31102| 2015-07-09T13:55:59.516-0400 I COMMAND [repl writer worker 3] CMD: drop db6.map_reduce_replace_nonexistent3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.518-0400 m31102| 2015-07-09T13:55:59.517-0400 I COMMAND [repl writer worker 1] CMD: drop db6.tmp.mrs.coll6_1436464558_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.536-0400 m30998| 2015-07-09T13:55:59.536-0400 I COMMAND [conn43] DROP: db6.map_reduce_replace_nonexistent1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.536-0400 m30998| 2015-07-09T13:55:59.536-0400 I COMMAND [conn43] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.537-0400 m31100| 2015-07-09T13:55:59.536-0400 I COMMAND [conn58] CMD: drop db6.map_reduce_replace_nonexistent1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.537-0400 m31201| 2015-07-09T13:55:59.537-0400 I COMMAND [repl writer worker 14] CMD: drop db6.tmp.mrs.coll6_1436464558_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.538-0400 m31202| 2015-07-09T13:55:59.537-0400 I COMMAND [repl writer worker 4] CMD: drop db6.tmp.mrs.coll6_1436464558_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.546-0400 m31102| 2015-07-09T13:55:59.545-0400 I COMMAND [repl writer worker 7] CMD: drop db6.map_reduce_replace_nonexistent1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.547-0400 m31101| 2015-07-09T13:55:59.547-0400 I COMMAND [repl writer worker 15] CMD: drop db6.map_reduce_replace_nonexistent1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.557-0400 m31200| 2015-07-09T13:55:59.557-0400 I COMMAND [conn36] CMD: drop db6.tmp.mr.coll6_19 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.561-0400 m31100| 2015-07-09T13:55:59.561-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_34 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.676-0400 m31100| 2015-07-09T13:55:59.676-0400 I COMMAND [conn52] CMD: drop db6.tmp.mrs.coll6_1436464559_9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.680-0400 m31100| 2015-07-09T13:55:59.678-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_30 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.680-0400 m31100| 2015-07-09T13:55:59.679-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_30 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.680-0400 m31100| 2015-07-09T13:55:59.679-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_30 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.681-0400 m31100| 2015-07-09T13:55:59.680-0400 I COMMAND [conn52] command db6.tmp.mrs.coll6_1436464559_9 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.681-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.681-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.681-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.681-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464559_9", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:210 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 2, W: 1 }, timeAcquiringMicros: { r: 8336, w: 21681, W: 10673 } }, Database: { acquireCount: { r: 25, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 6, w: 28, R: 11, W: 7 }, timeAcquiringMicros: { r: 35920, w: 141234, R: 53376, W: 58016 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 548ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.681-0400 m31100| 2015-07-09T13:55:59.680-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_35 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.694-0400 m31200| 2015-07-09T13:55:59.694-0400 I COMMAND [conn33] CMD: drop db6.tmp.mrs.coll6_1436464559_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.700-0400 m31200| 2015-07-09T13:55:59.700-0400 I COMMAND [conn33] CMD: drop db6.tmp.mr.coll6_18 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.700-0400 m31200| 2015-07-09T13:55:59.700-0400 I COMMAND [conn33] CMD: drop db6.tmp.mr.coll6_18 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.701-0400 m31200| 2015-07-09T13:55:59.701-0400 I COMMAND [conn33] CMD: drop db6.tmp.mr.coll6_18 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.708-0400 m31200| 2015-07-09T13:55:59.708-0400 I COMMAND [conn33] command db6.tmp.mrs.coll6_1436464559_6 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.708-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.708-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.709-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.709-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464559_6", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:210 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 } }, Database: { acquireCount: { r: 25, w: 66, R: 12, W: 11 }, acquireWaitCount: { w: 3, R: 5, W: 3 }, timeAcquiringMicros: { w: 11458, R: 16700, W: 6527 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 202ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.729-0400 m31200| 2015-07-09T13:55:59.728-0400 I COMMAND [conn36] CMD: drop db6.tmp.mrs.coll6_1436464559_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.734-0400 m31200| 2015-07-09T13:55:59.734-0400 I COMMAND [conn36] CMD: drop db6.tmp.mr.coll6_19 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.734-0400 m31200| 2015-07-09T13:55:59.734-0400 I COMMAND [conn36] CMD: drop db6.tmp.mr.coll6_19 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.736-0400 m31200| 2015-07-09T13:55:59.736-0400 I COMMAND [conn36] CMD: drop db6.tmp.mr.coll6_19 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.737-0400 m31200| 2015-07-09T13:55:59.736-0400 I COMMAND [conn36] command db6.tmp.mrs.coll6_1436464559_7 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.737-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.737-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.737-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.737-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464559_7", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:210 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 2967 } }, Database: { acquireCount: { r: 25, w: 66, R: 12, W: 11 }, acquireWaitCount: { w: 6, R: 2, W: 3 }, timeAcquiringMicros: { w: 10477, R: 1516, W: 10843 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 181ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.746-0400 m31100| 2015-07-09T13:55:59.746-0400 I COMMAND [conn57] CMD: drop db6.tmp.mrs.coll6_1436464559_10 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.752-0400 m31100| 2015-07-09T13:55:59.752-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_31 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.752-0400 m31100| 2015-07-09T13:55:59.752-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_31 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.754-0400 m31100| 2015-07-09T13:55:59.753-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_31 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.756-0400 m31100| 2015-07-09T13:55:59.755-0400 I COMMAND [conn57] command db6.tmp.mrs.coll6_1436464559_10 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.756-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.756-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.757-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.758-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464559_10", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:211 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 3, w: 2, W: 1 }, timeAcquiringMicros: { r: 19284, w: 14972, W: 69 } }, Database: { acquireCount: { r: 25, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 4, w: 35, R: 12, W: 9 }, timeAcquiringMicros: { r: 4165, w: 232817, R: 35798, W: 43300 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 567ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.758-0400 m31100| 2015-07-09T13:55:59.758-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_36 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.774-0400 m31100| 2015-07-09T13:55:59.773-0400 I COMMAND [conn59] CMD: drop db6.tmp.mrs.coll6_1436464559_11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.777-0400 m31100| 2015-07-09T13:55:59.777-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_32 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.778-0400 m31100| 2015-07-09T13:55:59.777-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_32 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.778-0400 m31100| 2015-07-09T13:55:59.778-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_32 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.780-0400 m31100| 2015-07-09T13:55:59.779-0400 I COMMAND [conn59] command db6.tmp.mrs.coll6_1436464559_11 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.780-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.780-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.780-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.781-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464559_11", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:211 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 3, w: 2, W: 1 }, timeAcquiringMicros: { r: 9949, w: 7578, W: 243 } }, Database: { acquireCount: { r: 25, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 6, w: 24, R: 12, W: 7 }, timeAcquiringMicros: { r: 34683, w: 134672, R: 48888, W: 53360 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 512ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.781-0400 m31100| 2015-07-09T13:55:59.781-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_37 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.782-0400 m31100| 2015-07-09T13:55:59.781-0400 I COMMAND [conn52] CMD: drop db6.map_reduce_replace_nonexistent0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.787-0400 m31100| 2015-07-09T13:55:59.787-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_35 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.787-0400 m31100| 2015-07-09T13:55:59.787-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_35 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.814-0400 m31100| 2015-07-09T13:55:59.813-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_35 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.815-0400 m31100| 2015-07-09T13:55:59.814-0400 I COMMAND [conn52] command db6.map_reduce_replace_nonexistent0 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.815-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.815-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.816-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.816-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.816-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.817-0400 m31100| }, out: { replace: "map_reduce_replace_nonexistent0" }, query: { key: { $exists: true }, value: { $exists: true } } }, inputDB: "db6", shardedOutputCollection: "tmp.mrs.coll6_1436464559_9", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll6_1436464559_9", timeMillis: 547, counts: { input: 970, emit: 970, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464559000|206, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll6_1436464559_9", timeMillis: 336, counts: { input: 1030, emit: 1030, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464559000|31, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 970, emit: 970, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1030, emit: 1030, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:247 locks:{ Global: { acquireCount: { r: 54, w: 49, W: 3 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 9301, W: 750 } }, Database: { acquireCount: { r: 1, w: 44, W: 7 }, acquireWaitCount: { w: 12, W: 5 }, timeAcquiringMicros: { w: 43015, W: 26833 } }, Collection: { acquireCount: { r: 1, w: 24 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 133ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.818-0400 m31100| 2015-07-09T13:55:59.814-0400 I COMMAND [conn15] CMD: drop db6.tmp.mrs.coll6_1436464559_9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.819-0400 m31200| 2015-07-09T13:55:59.818-0400 I COMMAND [conn48] CMD: drop db6.tmp.mrs.coll6_1436464559_9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.827-0400 m30999| 2015-07-09T13:55:59.826-0400 I COMMAND [conn42] DROP: db6.map_reduce_replace_nonexistent0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.827-0400 m30999| 2015-07-09T13:55:59.826-0400 I COMMAND [conn42] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.828-0400 m31100| 2015-07-09T13:55:59.826-0400 I COMMAND [conn52] CMD: drop db6.map_reduce_replace_nonexistent0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.829-0400 m31201| 2015-07-09T13:55:59.827-0400 I COMMAND [repl writer worker 1] CMD: drop db6.tmp.mrs.coll6_1436464559_9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.829-0400 m31202| 2015-07-09T13:55:59.828-0400 I COMMAND [repl writer worker 4] CMD: drop db6.tmp.mrs.coll6_1436464559_9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.831-0400 m31101| 2015-07-09T13:55:59.830-0400 I COMMAND [repl writer worker 2] CMD: drop db6.tmp.mrs.coll6_1436464559_9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.835-0400 m31102| 2015-07-09T13:55:59.835-0400 I COMMAND [repl writer worker 3] CMD: drop db6.tmp.mrs.coll6_1436464559_9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.836-0400 m31200| 2015-07-09T13:55:59.835-0400 I COMMAND [conn31] CMD: drop db6.tmp.mr.coll6_20 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.837-0400 m31100| 2015-07-09T13:55:59.836-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_38 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.838-0400 m31100| 2015-07-09T13:55:59.836-0400 I COMMAND [conn48] CMD: drop db6.tmp.mrs.coll6_1436464559_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.839-0400 m31101| 2015-07-09T13:55:59.839-0400 I COMMAND [repl writer worker 8] CMD: drop db6.map_reduce_replace_nonexistent0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.843-0400 m31102| 2015-07-09T13:55:59.842-0400 I COMMAND [repl writer worker 7] CMD: drop db6.map_reduce_replace_nonexistent0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.846-0400 m31100| 2015-07-09T13:55:59.845-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_33 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.846-0400 m31100| 2015-07-09T13:55:59.845-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_33 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.846-0400 m31100| 2015-07-09T13:55:59.846-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_33 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.849-0400 m31100| 2015-07-09T13:55:59.848-0400 I COMMAND [conn48] command db6.tmp.mrs.coll6_1436464559_6 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.849-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.849-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.850-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.851-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464559_6", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:210 locks:{ Global: { acquireCount: { r: 149, w: 74, W: 3 }, acquireWaitCount: { r: 3, w: 2, W: 1 }, timeAcquiringMicros: { r: 11588, w: 10452, W: 749 } }, Database: { acquireCount: { r: 25, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 6, w: 16, R: 10, W: 9 }, timeAcquiringMicros: { r: 45903, w: 39477, R: 37919, W: 17295 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 342ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.851-0400 m31100| 2015-07-09T13:55:59.849-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_39 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.931-0400 m31100| 2015-07-09T13:55:59.931-0400 I COMMAND [conn57] CMD: drop db6.map_reduce_replace_nonexistent2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.935-0400 m31100| 2015-07-09T13:55:59.934-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_36 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.935-0400 m31100| 2015-07-09T13:55:59.934-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_36 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.935-0400 m31100| 2015-07-09T13:55:59.934-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_36 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.936-0400 m31100| 2015-07-09T13:55:59.935-0400 I COMMAND [conn57] command db6.map_reduce_replace_nonexistent2 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.936-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.936-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.936-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.936-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.936-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.938-0400 m31100| }, out: { replace: "map_reduce_replace_nonexistent2" }, query: { key: { $exists: true }, value: { $exists: true } } }, inputDB: "db6", shardedOutputCollection: "tmp.mrs.coll6_1436464559_10", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll6_1436464559_10", timeMillis: 564, counts: { input: 970, emit: 970, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464559000|244, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll6_1436464559_10", timeMillis: 292, counts: { input: 1030, emit: 1030, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464559000|48, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 970, emit: 970, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1030, emit: 1030, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:247 locks:{ Global: { acquireCount: { r: 54, w: 49, W: 3 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 13574, W: 6102 } }, Database: { acquireCount: { r: 1, w: 44, W: 7 }, acquireWaitCount: { w: 16, W: 5 }, timeAcquiringMicros: { w: 99010, W: 3119 } }, Collection: { acquireCount: { r: 1, w: 24 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 178ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.938-0400 m31100| 2015-07-09T13:55:59.936-0400 I COMMAND [conn15] CMD: drop db6.tmp.mrs.coll6_1436464559_10 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.946-0400 m31100| 2015-07-09T13:55:59.945-0400 I COMMAND [conn59] CMD: drop db6.map_reduce_replace_nonexistent4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.946-0400 m31200| 2015-07-09T13:55:59.945-0400 I COMMAND [conn48] CMD: drop db6.tmp.mrs.coll6_1436464559_10 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.948-0400 m30999| 2015-07-09T13:55:59.947-0400 I COMMAND [conn43] DROP: db6.map_reduce_replace_nonexistent2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.948-0400 m30999| 2015-07-09T13:55:59.947-0400 I COMMAND [conn43] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.949-0400 m31202| 2015-07-09T13:55:59.947-0400 I COMMAND [repl writer worker 13] CMD: drop db6.tmp.mrs.coll6_1436464559_10 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.949-0400 m31100| 2015-07-09T13:55:59.947-0400 I COMMAND [conn57] CMD: drop db6.map_reduce_replace_nonexistent2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.949-0400 m31201| 2015-07-09T13:55:59.949-0400 I COMMAND [repl writer worker 8] CMD: drop db6.tmp.mrs.coll6_1436464559_10 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.952-0400 m31100| 2015-07-09T13:55:59.951-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_37 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.952-0400 m31100| 2015-07-09T13:55:59.951-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_37 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.953-0400 m31101| 2015-07-09T13:55:59.953-0400 I COMMAND [repl writer worker 0] CMD: drop db6.tmp.mrs.coll6_1436464559_10 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.955-0400 m31102| 2015-07-09T13:55:59.955-0400 I COMMAND [repl writer worker 6] CMD: drop db6.tmp.mrs.coll6_1436464559_10 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.958-0400 m31100| 2015-07-09T13:55:59.958-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_37 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.965-0400 m31101| 2015-07-09T13:55:59.965-0400 I COMMAND [repl writer worker 10] CMD: drop db6.map_reduce_replace_nonexistent2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.966-0400 m31102| 2015-07-09T13:55:59.966-0400 I COMMAND [repl writer worker 8] CMD: drop db6.map_reduce_replace_nonexistent2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.968-0400 m31100| 2015-07-09T13:55:59.967-0400 I COMMAND [conn59] command db6.map_reduce_replace_nonexistent4 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.968-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.968-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.968-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.968-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.968-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.969-0400 m31100| }, out: { replace: "map_reduce_replace_nonexistent4" }, query: { key: { $exists: true }, value: { $exists: true } } }, inputDB: "db6", shardedOutputCollection: "tmp.mrs.coll6_1436464559_11", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll6_1436464559_11", timeMillis: 510, counts: { input: 970, emit: 970, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464559000|265, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll6_1436464559_11", timeMillis: 235, counts: { input: 1030, emit: 1030, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464559000|70, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 970, emit: 970, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1030, emit: 1030, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:247 locks:{ Global: { acquireCount: { r: 54, w: 49, W: 3 }, acquireWaitCount: { w: 4, W: 1 }, timeAcquiringMicros: { w: 19297, W: 695 } }, Database: { acquireCount: { r: 1, w: 44, W: 7 }, acquireWaitCount: { w: 14, W: 2 }, timeAcquiringMicros: { w: 87904, W: 15146 } }, Collection: { acquireCount: { r: 1, w: 24 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 186ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.969-0400 m31100| 2015-07-09T13:55:59.967-0400 I COMMAND [conn15] CMD: drop db6.tmp.mrs.coll6_1436464559_11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.969-0400 m31200| 2015-07-09T13:55:59.968-0400 I COMMAND [conn31] CMD: drop db6.tmp.mrs.coll6_1436464559_12 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.971-0400 m31200| 2015-07-09T13:55:59.970-0400 I COMMAND [conn48] CMD: drop db6.tmp.mrs.coll6_1436464559_11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.972-0400 m31100| 2015-07-09T13:55:59.972-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_40 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.974-0400 m31102| 2015-07-09T13:55:59.973-0400 I COMMAND [repl writer worker 15] CMD: drop db6.tmp.mrs.coll6_1436464559_11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.976-0400 m31101| 2015-07-09T13:55:59.976-0400 I COMMAND [repl writer worker 6] CMD: drop db6.tmp.mrs.coll6_1436464559_11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.978-0400 m31200| 2015-07-09T13:55:59.978-0400 I COMMAND [conn31] CMD: drop db6.tmp.mr.coll6_20 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.979-0400 m31200| 2015-07-09T13:55:59.978-0400 I COMMAND [conn31] CMD: drop db6.tmp.mr.coll6_20 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.980-0400 m30999| 2015-07-09T13:55:59.980-0400 I COMMAND [conn44] DROP: db6.map_reduce_replace_nonexistent4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.981-0400 m30999| 2015-07-09T13:55:59.980-0400 I COMMAND [conn44] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.981-0400 m31100| 2015-07-09T13:55:59.980-0400 I COMMAND [conn59] CMD: drop db6.map_reduce_replace_nonexistent4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.981-0400 m31200| 2015-07-09T13:55:59.981-0400 I COMMAND [conn39] CMD: drop db6.tmp.mr.coll6_21 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.983-0400 m31200| 2015-07-09T13:55:59.983-0400 I COMMAND [conn31] CMD: drop db6.tmp.mr.coll6_20 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.984-0400 m31200| 2015-07-09T13:55:59.983-0400 I COMMAND [conn31] command db6.tmp.mrs.coll6_1436464559_12 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.984-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.984-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.984-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.985-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464559_12", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:211 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 } }, Database: { acquireCount: { r: 25, w: 66, R: 12, W: 11 }, acquireWaitCount: { W: 4 }, timeAcquiringMicros: { W: 1345 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 148ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.986-0400 m31201| 2015-07-09T13:55:59.986-0400 I COMMAND [repl writer worker 5] CMD: drop db6.tmp.mrs.coll6_1436464559_11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:55:59.995-0400 m31202| 2015-07-09T13:55:59.994-0400 I COMMAND [repl writer worker 8] CMD: drop db6.tmp.mrs.coll6_1436464559_11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.017-0400 m31101| 2015-07-09T13:56:00.015-0400 I COMMAND [repl writer worker 5] CMD: drop db6.map_reduce_replace_nonexistent4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.017-0400 m31102| 2015-07-09T13:56:00.016-0400 I COMMAND [repl writer worker 10] CMD: drop db6.map_reduce_replace_nonexistent4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.035-0400 m31200| 2015-07-09T13:56:00.035-0400 I COMMAND [conn40] CMD: drop db6.tmp.mr.coll6_22 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.044-0400 m31100| 2015-07-09T13:56:00.043-0400 I COMMAND [conn58] CMD: drop db6.tmp.mrs.coll6_1436464559_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.053-0400 m31100| 2015-07-09T13:56:00.051-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_34 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.054-0400 m31100| 2015-07-09T13:56:00.051-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_34 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.059-0400 m31100| 2015-07-09T13:56:00.059-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_41 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.072-0400 m31100| 2015-07-09T13:56:00.072-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_34 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.075-0400 m31100| 2015-07-09T13:56:00.074-0400 I COMMAND [conn58] command db6.tmp.mrs.coll6_1436464559_7 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.075-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.075-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.075-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.076-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464559_7", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:210 locks:{ Global: { acquireCount: { r: 149, w: 74, W: 3 }, acquireWaitCount: { r: 4, w: 4, W: 1 }, timeAcquiringMicros: { r: 23405, w: 18153, W: 43715 } }, Database: { acquireCount: { r: 25, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 12, w: 33, R: 11, W: 9 }, timeAcquiringMicros: { r: 60052, w: 134681, R: 26913, W: 56642 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 519ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.076-0400 m31100| 2015-07-09T13:56:00.076-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_42 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.129-0400 m31100| 2015-07-09T13:56:00.128-0400 I COMMAND [conn48] CMD: drop db6.map_reduce_replace_nonexistent3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.133-0400 m31100| 2015-07-09T13:56:00.132-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_39 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.137-0400 m31100| 2015-07-09T13:56:00.133-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_39 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.137-0400 m31100| 2015-07-09T13:56:00.136-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_39 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.138-0400 m31200| 2015-07-09T13:56:00.136-0400 I COMMAND [conn39] CMD: drop db6.tmp.mrs.coll6_1436464559_13 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.147-0400 m31200| 2015-07-09T13:56:00.147-0400 I COMMAND [conn39] CMD: drop db6.tmp.mr.coll6_21 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.148-0400 m31200| 2015-07-09T13:56:00.148-0400 I COMMAND [conn39] CMD: drop db6.tmp.mr.coll6_21 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.155-0400 m31100| 2015-07-09T13:56:00.154-0400 I COMMAND [conn48] command db6.map_reduce_replace_nonexistent3 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.155-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.155-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.156-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.156-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.156-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.158-0400 m31100| }, out: { replace: "map_reduce_replace_nonexistent3" }, query: { key: { $exists: true }, value: { $exists: true } } }, inputDB: "db6", shardedOutputCollection: "tmp.mrs.coll6_1436464559_6", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll6_1436464559_6", timeMillis: 339, counts: { input: 970, emit: 970, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464559000|316, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll6_1436464559_6", timeMillis: 194, counts: { input: 1030, emit: 1030, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464559000|94, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 970, emit: 970, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1030, emit: 1030, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:247 locks:{ Global: { acquireCount: { r: 54, w: 49, W: 3 }, acquireWaitCount: { w: 7, W: 1 }, timeAcquiringMicros: { w: 59612, W: 38445 } }, Database: { acquireCount: { r: 1, w: 44, W: 7 }, acquireWaitCount: { w: 18, W: 5 }, timeAcquiringMicros: { w: 87867, W: 37816 } }, Collection: { acquireCount: { r: 1, w: 24 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 305ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.176-0400 m31100| 2015-07-09T13:56:00.176-0400 I COMMAND [conn32] CMD: drop db6.tmp.mrs.coll6_1436464559_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.181-0400 m31200| 2015-07-09T13:56:00.181-0400 I COMMAND [conn47] CMD: drop db6.tmp.mrs.coll6_1436464559_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.182-0400 m31101| 2015-07-09T13:56:00.181-0400 I COMMAND [repl writer worker 15] CMD: drop db6.tmp.mrs.coll6_1436464559_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.183-0400 m31200| 2015-07-09T13:56:00.182-0400 I COMMAND [conn39] CMD: drop db6.tmp.mr.coll6_21 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.185-0400 m30998| 2015-07-09T13:56:00.185-0400 I COMMAND [conn42] DROP: db6.map_reduce_replace_nonexistent3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.185-0400 m30998| 2015-07-09T13:56:00.185-0400 I COMMAND [conn42] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.186-0400 m31201| 2015-07-09T13:56:00.186-0400 I COMMAND [repl writer worker 9] CMD: drop db6.tmp.mrs.coll6_1436464559_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.186-0400 m31202| 2015-07-09T13:56:00.186-0400 I COMMAND [repl writer worker 7] CMD: drop db6.tmp.mrs.coll6_1436464559_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.186-0400 m31100| 2015-07-09T13:56:00.186-0400 I COMMAND [conn48] CMD: drop db6.map_reduce_replace_nonexistent3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.195-0400 m31200| 2015-07-09T13:56:00.195-0400 I COMMAND [conn39] command db6.tmp.mrs.coll6_1436464559_13 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.195-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.195-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.195-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.196-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464559_13", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:211 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 8526 } }, Database: { acquireCount: { r: 25, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 1, w: 4, R: 4, W: 6 }, timeAcquiringMicros: { r: 1421, w: 11923, R: 19703, W: 47665 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 224ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.197-0400 m31102| 2015-07-09T13:56:00.196-0400 I COMMAND [repl writer worker 6] CMD: drop db6.tmp.mrs.coll6_1436464559_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.199-0400 m31101| 2015-07-09T13:56:00.199-0400 I COMMAND [repl writer worker 10] CMD: drop db6.map_reduce_replace_nonexistent3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.200-0400 m31102| 2015-07-09T13:56:00.199-0400 I COMMAND [repl writer worker 8] CMD: drop db6.map_reduce_replace_nonexistent3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.202-0400 m31200| 2015-07-09T13:56:00.202-0400 I COMMAND [conn33] CMD: drop db6.tmp.mr.coll6_23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.208-0400 m31100| 2015-07-09T13:56:00.207-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_43 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.283-0400 m31200| 2015-07-09T13:56:00.281-0400 I COMMAND [conn40] CMD: drop db6.tmp.mrs.coll6_1436464560_14 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.287-0400 m31200| 2015-07-09T13:56:00.287-0400 I COMMAND [conn40] CMD: drop db6.tmp.mr.coll6_22 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.288-0400 m31200| 2015-07-09T13:56:00.288-0400 I COMMAND [conn40] CMD: drop db6.tmp.mr.coll6_22 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.291-0400 m31200| 2015-07-09T13:56:00.291-0400 I COMMAND [conn40] CMD: drop db6.tmp.mr.coll6_22 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.291-0400 m31200| 2015-07-09T13:56:00.291-0400 I COMMAND [conn40] command db6.tmp.mrs.coll6_1436464560_14 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.291-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.291-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.291-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.293-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464560_14", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:211 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 2386 } }, Database: { acquireCount: { r: 25, w: 66, R: 13, W: 11 }, acquireWaitCount: { w: 11, R: 6, W: 1 }, timeAcquiringMicros: { w: 59661, R: 3885, W: 4917 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 256ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.296-0400 m31100| 2015-07-09T13:56:00.296-0400 I COMMAND [conn57] CMD: drop db6.tmp.mrs.coll6_1436464559_13 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.305-0400 m31100| 2015-07-09T13:56:00.304-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_40 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.305-0400 m31100| 2015-07-09T13:56:00.305-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_40 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.306-0400 m31100| 2015-07-09T13:56:00.306-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_40 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.317-0400 m31100| 2015-07-09T13:56:00.317-0400 I COMMAND [conn57] command db6.tmp.mrs.coll6_1436464559_13 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.317-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.317-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.317-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.318-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464559_13", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:211 locks:{ Global: { acquireCount: { r: 149, w: 74, W: 3 }, acquireWaitCount: { r: 5, w: 2, W: 1 }, timeAcquiringMicros: { r: 32415, w: 772, W: 550 } }, Database: { acquireCount: { r: 25, w: 66, R: 11, W: 11 }, acquireWaitCount: { w: 16, R: 9, W: 7 }, timeAcquiringMicros: { w: 89758, R: 28352, W: 19803 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 348ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.318-0400 m31100| 2015-07-09T13:56:00.318-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_44 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.322-0400 m31100| 2015-07-09T13:56:00.322-0400 I COMMAND [conn52] CMD: drop db6.tmp.mrs.coll6_1436464559_12 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.327-0400 m31100| 2015-07-09T13:56:00.327-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_38 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.328-0400 m31100| 2015-07-09T13:56:00.327-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_38 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.328-0400 m31100| 2015-07-09T13:56:00.327-0400 I COMMAND [conn58] CMD: drop db6.map_reduce_replace_nonexistent1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.332-0400 m31100| 2015-07-09T13:56:00.332-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_42 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.333-0400 m31100| 2015-07-09T13:56:00.332-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_42 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.333-0400 m31100| 2015-07-09T13:56:00.332-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_42 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.333-0400 m31100| 2015-07-09T13:56:00.333-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_38 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.335-0400 m31100| 2015-07-09T13:56:00.333-0400 I COMMAND [conn58] command db6.map_reduce_replace_nonexistent1 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.335-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.335-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.335-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.335-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.335-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.336-0400 m31100| }, out: { replace: "map_reduce_replace_nonexistent1" }, query: { key: { $exists: true }, value: { $exists: true } } }, inputDB: "db6", shardedOutputCollection: "tmp.mrs.coll6_1436464559_7", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll6_1436464559_7", timeMillis: 496, counts: { input: 970, emit: 970, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464560000|6, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll6_1436464559_7", timeMillis: 179, counts: { input: 1030, emit: 1030, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464559000|115, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 970, emit: 970, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1030, emit: 1030, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:247 locks:{ Global: { acquireCount: { r: 54, w: 49, W: 3 }, acquireWaitCount: { w: 3, W: 1 }, timeAcquiringMicros: { w: 13490, W: 21258 } }, Database: { acquireCount: { r: 1, w: 44, W: 7 }, acquireWaitCount: { w: 20, W: 3 }, timeAcquiringMicros: { w: 130475, W: 15613 } }, Collection: { acquireCount: { r: 1, w: 24 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 257ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.337-0400 m31100| 2015-07-09T13:56:00.334-0400 I COMMAND [conn32] CMD: drop db6.tmp.mrs.coll6_1436464559_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.345-0400 m31200| 2015-07-09T13:56:00.344-0400 I COMMAND [conn47] CMD: drop db6.tmp.mrs.coll6_1436464559_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.347-0400 m31102| 2015-07-09T13:56:00.347-0400 I COMMAND [repl writer worker 11] CMD: drop db6.tmp.mrs.coll6_1436464559_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.348-0400 m31101| 2015-07-09T13:56:00.347-0400 I COMMAND [repl writer worker 10] CMD: drop db6.tmp.mrs.coll6_1436464559_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.350-0400 m30998| 2015-07-09T13:56:00.350-0400 I COMMAND [conn43] DROP: db6.map_reduce_replace_nonexistent1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.351-0400 m30998| 2015-07-09T13:56:00.350-0400 I COMMAND [conn43] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.351-0400 m31100| 2015-07-09T13:56:00.350-0400 I COMMAND [conn58] CMD: drop db6.map_reduce_replace_nonexistent1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.351-0400 m31200| 2015-07-09T13:56:00.350-0400 I COMMAND [conn33] CMD: drop db6.tmp.mrs.coll6_1436464560_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.351-0400 m31100| 2015-07-09T13:56:00.351-0400 I COMMAND [conn52] command db6.tmp.mrs.coll6_1436464559_12 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.352-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.352-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.352-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.352-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464559_12", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:211 locks:{ Global: { acquireCount: { r: 149, w: 74, W: 3 }, acquireWaitCount: { r: 4, w: 5, W: 1 }, timeAcquiringMicros: { r: 36593, w: 40957, W: 14399 } }, Database: { acquireCount: { r: 25, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 2, w: 26, R: 9, W: 7 }, timeAcquiringMicros: { r: 13585, w: 163211, R: 36583, W: 42741 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 516ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.353-0400 m31101| 2015-07-09T13:56:00.352-0400 I COMMAND [repl writer worker 11] CMD: drop db6.map_reduce_replace_nonexistent1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.353-0400 m31100| 2015-07-09T13:56:00.353-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_45 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.354-0400 m31102| 2015-07-09T13:56:00.353-0400 I COMMAND [repl writer worker 8] CMD: drop db6.map_reduce_replace_nonexistent1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.357-0400 m31200| 2015-07-09T13:56:00.355-0400 I COMMAND [conn33] CMD: drop db6.tmp.mr.coll6_23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.357-0400 m31200| 2015-07-09T13:56:00.355-0400 I COMMAND [conn33] CMD: drop db6.tmp.mr.coll6_23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.357-0400 m31201| 2015-07-09T13:56:00.356-0400 I COMMAND [repl writer worker 1] CMD: drop db6.tmp.mrs.coll6_1436464559_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.358-0400 m31200| 2015-07-09T13:56:00.356-0400 I COMMAND [conn33] CMD: drop db6.tmp.mr.coll6_23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.358-0400 m31200| 2015-07-09T13:56:00.356-0400 I COMMAND [conn33] command db6.tmp.mrs.coll6_1436464560_8 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.358-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.358-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.358-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.358-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464560_8", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:210 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 } }, Database: { acquireCount: { r: 25, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 1, R: 6, W: 4 }, timeAcquiringMicros: { r: 3884, R: 7985, W: 3656 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 154ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.359-0400 m31200| 2015-07-09T13:56:00.359-0400 I COMMAND [conn36] CMD: drop db6.tmp.mr.coll6_24 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.359-0400 m31202| 2015-07-09T13:56:00.359-0400 I COMMAND [repl writer worker 15] CMD: drop db6.tmp.mrs.coll6_1436464559_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.391-0400 m31100| 2015-07-09T13:56:00.390-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_46 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.483-0400 m31200| 2015-07-09T13:56:00.483-0400 I COMMAND [conn36] CMD: drop db6.tmp.mrs.coll6_1436464560_9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.487-0400 m31200| 2015-07-09T13:56:00.486-0400 I COMMAND [conn36] CMD: drop db6.tmp.mr.coll6_24 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.487-0400 m31200| 2015-07-09T13:56:00.487-0400 I COMMAND [conn36] CMD: drop db6.tmp.mr.coll6_24 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.489-0400 m31200| 2015-07-09T13:56:00.488-0400 I COMMAND [conn36] CMD: drop db6.tmp.mr.coll6_24 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.489-0400 m31200| 2015-07-09T13:56:00.489-0400 I COMMAND [conn36] command db6.tmp.mrs.coll6_1436464560_9 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.489-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.489-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.490-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.490-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464560_9", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:210 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 } }, Database: { acquireCount: { r: 25, w: 66, R: 12, W: 11 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 130ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.517-0400 m31100| 2015-07-09T13:56:00.516-0400 I COMMAND [conn57] CMD: drop db6.map_reduce_replace_nonexistent2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.524-0400 m31100| 2015-07-09T13:56:00.524-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_44 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.524-0400 m31100| 2015-07-09T13:56:00.524-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_44 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.532-0400 m31100| 2015-07-09T13:56:00.531-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_44 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.533-0400 m31100| 2015-07-09T13:56:00.532-0400 I COMMAND [conn57] command db6.map_reduce_replace_nonexistent2 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.533-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.533-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.533-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.533-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.533-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.536-0400 m31100| }, out: { replace: "map_reduce_replace_nonexistent2" }, query: { key: { $exists: true }, value: { $exists: true } } }, inputDB: "db6", shardedOutputCollection: "tmp.mrs.coll6_1436464559_13", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll6_1436464559_13", timeMillis: 336, counts: { input: 970, emit: 970, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464560000|77, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll6_1436464559_13", timeMillis: 177, counts: { input: 1030, emit: 1030, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464560000|23, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 970, emit: 970, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1030, emit: 1030, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:247 locks:{ Global: { acquireCount: { r: 54, w: 49, W: 3 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 14260, W: 11595 } }, Database: { acquireCount: { r: 1, w: 44, W: 7 }, acquireWaitCount: { w: 16, W: 5 }, timeAcquiringMicros: { w: 87766, W: 19968 } }, Collection: { acquireCount: { r: 1, w: 24 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 214ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.537-0400 m31100| 2015-07-09T13:56:00.533-0400 I COMMAND [conn15] CMD: drop db6.tmp.mrs.coll6_1436464559_13 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.537-0400 m31100| 2015-07-09T13:56:00.533-0400 I COMMAND [conn48] CMD: drop db6.tmp.mrs.coll6_1436464560_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.539-0400 m31100| 2015-07-09T13:56:00.539-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_43 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.540-0400 m31100| 2015-07-09T13:56:00.539-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_43 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.541-0400 m31200| 2015-07-09T13:56:00.540-0400 I COMMAND [conn48] CMD: drop db6.tmp.mrs.coll6_1436464559_13 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.543-0400 m31100| 2015-07-09T13:56:00.542-0400 I COMMAND [conn52] CMD: drop db6.map_reduce_replace_nonexistent0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.543-0400 m31100| 2015-07-09T13:56:00.542-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_43 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.545-0400 m31100| 2015-07-09T13:56:00.545-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_45 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.545-0400 m31100| 2015-07-09T13:56:00.545-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_45 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.546-0400 m31100| 2015-07-09T13:56:00.545-0400 I COMMAND [conn59] CMD: drop db6.tmp.mrs.coll6_1436464560_14 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.556-0400 m31100| 2015-07-09T13:56:00.556-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_41 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.557-0400 m31100| 2015-07-09T13:56:00.556-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_41 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.558-0400 m31100| 2015-07-09T13:56:00.556-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_45 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.558-0400 m31100| 2015-07-09T13:56:00.557-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_41 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.558-0400 m31100| 2015-07-09T13:56:00.557-0400 I COMMAND [conn52] command db6.map_reduce_replace_nonexistent0 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.559-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.559-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.559-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.559-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.559-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.560-0400 m31100| }, out: { replace: "map_reduce_replace_nonexistent0" }, query: { key: { $exists: true }, value: { $exists: true } } }, inputDB: "db6", shardedOutputCollection: "tmp.mrs.coll6_1436464559_12", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll6_1436464559_12", timeMillis: 493, counts: { input: 970, emit: 970, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464560000|79, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll6_1436464559_12", timeMillis: 143, counts: { input: 1030, emit: 1030, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464559000|139, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 970, emit: 970, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1030, emit: 1030, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:247 locks:{ Global: { acquireCount: { r: 54, w: 49, W: 3 }, acquireWaitCount: { w: 5, W: 1 }, timeAcquiringMicros: { w: 35505, W: 665 } }, Database: { acquireCount: { r: 1, w: 44, W: 7 }, acquireWaitCount: { w: 15, W: 4 }, timeAcquiringMicros: { w: 94552, W: 11638 } }, Collection: { acquireCount: { r: 1, w: 24 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 204ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.561-0400 m31100| 2015-07-09T13:56:00.558-0400 I COMMAND [conn15] CMD: drop db6.tmp.mrs.coll6_1436464559_12 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.561-0400 m31102| 2015-07-09T13:56:00.558-0400 I COMMAND [repl writer worker 5] CMD: drop db6.tmp.mrs.coll6_1436464559_13 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.561-0400 m30999| 2015-07-09T13:56:00.559-0400 I COMMAND [conn43] DROP: db6.map_reduce_replace_nonexistent2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.561-0400 m30999| 2015-07-09T13:56:00.559-0400 I COMMAND [conn43] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.561-0400 m31100| 2015-07-09T13:56:00.559-0400 I COMMAND [conn57] CMD: drop db6.map_reduce_replace_nonexistent2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.561-0400 m31202| 2015-07-09T13:56:00.559-0400 I COMMAND [repl writer worker 12] CMD: drop db6.tmp.mrs.coll6_1436464559_13 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.561-0400 m31101| 2015-07-09T13:56:00.561-0400 I COMMAND [repl writer worker 14] CMD: drop db6.tmp.mrs.coll6_1436464559_13 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.562-0400 m31201| 2015-07-09T13:56:00.561-0400 I COMMAND [repl writer worker 7] CMD: drop db6.tmp.mrs.coll6_1436464559_13 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.564-0400 m31100| 2015-07-09T13:56:00.563-0400 I COMMAND [conn48] command db6.tmp.mrs.coll6_1436464560_8 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.565-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.565-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.566-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.566-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464560_8", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:210 locks:{ Global: { acquireCount: { r: 149, w: 74, W: 3 }, acquireWaitCount: { r: 5, w: 2, W: 1 }, timeAcquiringMicros: { r: 10680, w: 19785, W: 1080 } }, Database: { acquireCount: { r: 25, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 1, w: 17, R: 9, W: 9 }, timeAcquiringMicros: { r: 869, w: 107674, R: 37368, W: 17417 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 361ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.567-0400 m31200| 2015-07-09T13:56:00.563-0400 I COMMAND [conn48] CMD: drop db6.tmp.mrs.coll6_1436464559_12 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.567-0400 m31100| 2015-07-09T13:56:00.564-0400 I COMMAND [conn59] command db6.tmp.mrs.coll6_1436464560_14 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.567-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.567-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.567-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.568-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464560_14", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:211 locks:{ Global: { acquireCount: { r: 149, w: 74, W: 3 }, acquireWaitCount: { r: 5, w: 5, W: 1 }, timeAcquiringMicros: { r: 44055, w: 30482, W: 5328 } }, Database: { acquireCount: { r: 25, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 2, w: 26, R: 11, W: 7 }, timeAcquiringMicros: { r: 290, w: 175796, R: 23504, W: 28985 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 528ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.568-0400 m31100| 2015-07-09T13:56:00.564-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_47 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.569-0400 m31100| 2015-07-09T13:56:00.565-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_48 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.578-0400 m30999| 2015-07-09T13:56:00.577-0400 I COMMAND [conn42] DROP: db6.map_reduce_replace_nonexistent0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.578-0400 m31200| 2015-07-09T13:56:00.577-0400 I COMMAND [conn39] CMD: drop db6.tmp.mr.coll6_25 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.578-0400 m30999| 2015-07-09T13:56:00.577-0400 I COMMAND [conn42] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.578-0400 m31100| 2015-07-09T13:56:00.578-0400 I COMMAND [conn52] CMD: drop db6.map_reduce_replace_nonexistent0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.580-0400 m31201| 2015-07-09T13:56:00.580-0400 I COMMAND [repl writer worker 14] CMD: drop db6.tmp.mrs.coll6_1436464559_12 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.580-0400 m31202| 2015-07-09T13:56:00.580-0400 I COMMAND [repl writer worker 10] CMD: drop db6.tmp.mrs.coll6_1436464559_12 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.583-0400 m31101| 2015-07-09T13:56:00.583-0400 I COMMAND [repl writer worker 15] CMD: drop db6.tmp.mrs.coll6_1436464559_12 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.589-0400 m31102| 2015-07-09T13:56:00.589-0400 I COMMAND [repl writer worker 7] CMD: drop db6.tmp.mrs.coll6_1436464559_12 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.599-0400 m31101| 2015-07-09T13:56:00.599-0400 I COMMAND [repl writer worker 13] CMD: drop db6.map_reduce_replace_nonexistent2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.601-0400 m31100| 2015-07-09T13:56:00.600-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_49 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.601-0400 m31102| 2015-07-09T13:56:00.601-0400 I COMMAND [repl writer worker 13] CMD: drop db6.map_reduce_replace_nonexistent2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.627-0400 m31200| 2015-07-09T13:56:00.627-0400 I COMMAND [conn31] CMD: drop db6.tmp.mr.coll6_26 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.632-0400 m31101| 2015-07-09T13:56:00.631-0400 I COMMAND [repl writer worker 12] CMD: drop db6.map_reduce_replace_nonexistent0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.633-0400 m31102| 2015-07-09T13:56:00.632-0400 I COMMAND [repl writer worker 14] CMD: drop db6.map_reduce_replace_nonexistent0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.645-0400 m31100| 2015-07-09T13:56:00.645-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_50 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.759-0400 m31100| 2015-07-09T13:56:00.759-0400 I COMMAND [conn58] CMD: drop db6.tmp.mrs.coll6_1436464560_9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.762-0400 m31100| 2015-07-09T13:56:00.762-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_46 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.762-0400 m31100| 2015-07-09T13:56:00.762-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_46 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.764-0400 m31100| 2015-07-09T13:56:00.763-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_46 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.767-0400 m31100| 2015-07-09T13:56:00.767-0400 I COMMAND [conn58] command db6.tmp.mrs.coll6_1436464560_9 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.768-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.768-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.768-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.768-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464560_9", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:210 locks:{ Global: { acquireCount: { r: 149, w: 74, W: 3 }, acquireWaitCount: { r: 3, W: 1 }, timeAcquiringMicros: { r: 23208, W: 136 } }, Database: { acquireCount: { r: 25, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 1, w: 17, R: 10, W: 6 }, timeAcquiringMicros: { r: 27616, w: 194450, R: 17289, W: 13266 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 408ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.775-0400 m31100| 2015-07-09T13:56:00.775-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_51 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.781-0400 m31200| 2015-07-09T13:56:00.780-0400 I COMMAND [conn39] CMD: drop db6.tmp.mrs.coll6_1436464560_15 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.784-0400 m31200| 2015-07-09T13:56:00.784-0400 I COMMAND [conn39] CMD: drop db6.tmp.mr.coll6_25 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.784-0400 m31200| 2015-07-09T13:56:00.784-0400 I COMMAND [conn39] CMD: drop db6.tmp.mr.coll6_25 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.787-0400 m31200| 2015-07-09T13:56:00.787-0400 I COMMAND [conn39] CMD: drop db6.tmp.mr.coll6_25 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.790-0400 m31200| 2015-07-09T13:56:00.789-0400 I COMMAND [conn39] command db6.tmp.mrs.coll6_1436464560_15 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.790-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.790-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.790-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.791-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464560_15", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:211 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 } }, Database: { acquireCount: { r: 25, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 1, w: 4, R: 6, W: 4 }, timeAcquiringMicros: { r: 11608, w: 14639, R: 30041, W: 2123 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 223ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.804-0400 m31200| 2015-07-09T13:56:00.804-0400 I COMMAND [conn31] CMD: drop db6.tmp.mrs.coll6_1436464560_16 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.809-0400 m31200| 2015-07-09T13:56:00.809-0400 I COMMAND [conn31] CMD: drop db6.tmp.mr.coll6_26 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.809-0400 m31200| 2015-07-09T13:56:00.809-0400 I COMMAND [conn31] CMD: drop db6.tmp.mr.coll6_26 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.810-0400 m31200| 2015-07-09T13:56:00.810-0400 I COMMAND [conn31] CMD: drop db6.tmp.mr.coll6_26 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.810-0400 m31200| 2015-07-09T13:56:00.810-0400 I COMMAND [conn31] command db6.tmp.mrs.coll6_1436464560_16 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.811-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.811-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.811-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.811-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464560_16", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:211 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 } }, Database: { acquireCount: { r: 25, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 1, w: 6, R: 4, W: 3 }, timeAcquiringMicros: { r: 262, w: 20781, R: 25628, W: 15256 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 207ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.868-0400 m31100| 2015-07-09T13:56:00.868-0400 I COMMAND [conn48] CMD: drop db6.map_reduce_replace_nonexistent3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.875-0400 m31100| 2015-07-09T13:56:00.875-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_47 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.875-0400 m31100| 2015-07-09T13:56:00.875-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_47 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.875-0400 m31100| 2015-07-09T13:56:00.875-0400 I COMMAND [conn59] CMD: drop db6.map_reduce_replace_nonexistent4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.881-0400 m31100| 2015-07-09T13:56:00.880-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_48 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.881-0400 m31100| 2015-07-09T13:56:00.881-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_48 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.882-0400 m31100| 2015-07-09T13:56:00.882-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_47 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.882-0400 m31100| 2015-07-09T13:56:00.882-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_48 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.888-0400 m31100| 2015-07-09T13:56:00.888-0400 I COMMAND [conn59] command db6.map_reduce_replace_nonexistent4 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.889-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.889-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.889-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.890-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.890-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.892-0400 m31100| }, out: { replace: "map_reduce_replace_nonexistent4" }, query: { key: { $exists: true }, value: { $exists: true } } }, inputDB: "db6", shardedOutputCollection: "tmp.mrs.coll6_1436464560_14", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll6_1436464560_14", timeMillis: 520, counts: { input: 970, emit: 970, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464560000|170, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll6_1436464560_14", timeMillis: 252, counts: { input: 1030, emit: 1030, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464560000|46, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 970, emit: 970, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1030, emit: 1030, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:247 locks:{ Global: { acquireCount: { r: 54, w: 49, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 23809 } }, Database: { acquireCount: { r: 1, w: 44, W: 7 }, acquireWaitCount: { w: 20, W: 5 }, timeAcquiringMicros: { w: 145970, W: 41242 } }, Collection: { acquireCount: { r: 1, w: 24 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 322ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.892-0400 m31100| 2015-07-09T13:56:00.888-0400 I COMMAND [conn48] command db6.map_reduce_replace_nonexistent3 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.892-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.892-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.892-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.892-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.893-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.895-0400 m31100| }, out: { replace: "map_reduce_replace_nonexistent3" }, query: { key: { $exists: true }, value: { $exists: true } } }, inputDB: "db6", shardedOutputCollection: "tmp.mrs.coll6_1436464560_8", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll6_1436464560_8", timeMillis: 337, counts: { input: 970, emit: 970, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464560000|165, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll6_1436464560_8", timeMillis: 153, counts: { input: 1030, emit: 1030, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464560000|68, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 970, emit: 970, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1030, emit: 1030, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:247 locks:{ Global: { acquireCount: { r: 54, w: 49, W: 3 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 6235, W: 16598 } }, Database: { acquireCount: { r: 1, w: 44, W: 7 }, acquireWaitCount: { w: 18, W: 2 }, timeAcquiringMicros: { w: 169435, W: 5794 } }, Collection: { acquireCount: { r: 1, w: 24 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 323ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.895-0400 m31100| 2015-07-09T13:56:00.889-0400 I COMMAND [conn32] CMD: drop db6.tmp.mrs.coll6_1436464560_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.895-0400 m31100| 2015-07-09T13:56:00.891-0400 I COMMAND [conn15] CMD: drop db6.tmp.mrs.coll6_1436464560_14 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.895-0400 m31200| 2015-07-09T13:56:00.892-0400 I COMMAND [conn47] CMD: drop db6.tmp.mrs.coll6_1436464560_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.896-0400 m31200| 2015-07-09T13:56:00.892-0400 I COMMAND [conn48] CMD: drop db6.tmp.mrs.coll6_1436464560_14 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.896-0400 m31102| 2015-07-09T13:56:00.896-0400 I COMMAND [repl writer worker 12] CMD: drop db6.tmp.mrs.coll6_1436464560_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.899-0400 m30998| 2015-07-09T13:56:00.898-0400 I COMMAND [conn42] DROP: db6.map_reduce_replace_nonexistent3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.899-0400 m30998| 2015-07-09T13:56:00.899-0400 I COMMAND [conn42] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.899-0400 m31201| 2015-07-09T13:56:00.899-0400 I COMMAND [repl writer worker 9] CMD: drop db6.tmp.mrs.coll6_1436464560_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.899-0400 m31100| 2015-07-09T13:56:00.899-0400 I COMMAND [conn48] CMD: drop db6.map_reduce_replace_nonexistent3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.899-0400 m31202| 2015-07-09T13:56:00.899-0400 I COMMAND [repl writer worker 4] CMD: drop db6.tmp.mrs.coll6_1436464560_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.900-0400 m31102| 2015-07-09T13:56:00.900-0400 I COMMAND [repl writer worker 5] CMD: drop db6.tmp.mrs.coll6_1436464560_14 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.901-0400 m30999| 2015-07-09T13:56:00.901-0400 I COMMAND [conn44] DROP: db6.map_reduce_replace_nonexistent4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.901-0400 m30999| 2015-07-09T13:56:00.901-0400 I COMMAND [conn44] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.901-0400 m31101| 2015-07-09T13:56:00.901-0400 I COMMAND [repl writer worker 15] CMD: drop db6.tmp.mrs.coll6_1436464560_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.902-0400 m31100| 2015-07-09T13:56:00.901-0400 I COMMAND [conn59] CMD: drop db6.map_reduce_replace_nonexistent4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.902-0400 m31202| 2015-07-09T13:56:00.902-0400 I COMMAND [repl writer worker 2] CMD: drop db6.tmp.mrs.coll6_1436464560_14 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.903-0400 m31200| 2015-07-09T13:56:00.902-0400 I COMMAND [conn33] CMD: drop db6.tmp.mr.coll6_27 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.903-0400 m31201| 2015-07-09T13:56:00.902-0400 I COMMAND [repl writer worker 15] CMD: drop db6.tmp.mrs.coll6_1436464560_14 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.905-0400 m31100| 2015-07-09T13:56:00.905-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_52 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.906-0400 m31101| 2015-07-09T13:56:00.906-0400 I COMMAND [repl writer worker 2] CMD: drop db6.tmp.mrs.coll6_1436464560_14 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.913-0400 m31101| 2015-07-09T13:56:00.912-0400 I COMMAND [repl writer worker 1] CMD: drop db6.map_reduce_replace_nonexistent3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.914-0400 m31101| 2015-07-09T13:56:00.914-0400 I COMMAND [repl writer worker 11] CMD: drop db6.map_reduce_replace_nonexistent4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.915-0400 m31102| 2015-07-09T13:56:00.915-0400 I COMMAND [repl writer worker 7] CMD: drop db6.map_reduce_replace_nonexistent3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.920-0400 m31200| 2015-07-09T13:56:00.920-0400 I COMMAND [conn40] CMD: drop db6.tmp.mr.coll6_28 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.921-0400 m31102| 2015-07-09T13:56:00.921-0400 I COMMAND [repl writer worker 13] CMD: drop db6.map_reduce_replace_nonexistent4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.926-0400 m31100| 2015-07-09T13:56:00.925-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_53 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.983-0400 m31100| 2015-07-09T13:56:00.983-0400 I COMMAND [conn58] CMD: drop db6.map_reduce_replace_nonexistent1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.991-0400 m31100| 2015-07-09T13:56:00.991-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_51 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.991-0400 m31100| 2015-07-09T13:56:00.991-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_51 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.991-0400 m31100| 2015-07-09T13:56:00.991-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_51 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.991-0400 m31100| 2015-07-09T13:56:00.991-0400 I COMMAND [conn58] command db6.map_reduce_replace_nonexistent1 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.992-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.992-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.992-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.992-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.992-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.995-0400 m31100| }, out: { replace: "map_reduce_replace_nonexistent1" }, query: { key: { $exists: true }, value: { $exists: true } } }, inputDB: "db6", shardedOutputCollection: "tmp.mrs.coll6_1436464560_9", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll6_1436464560_9", timeMillis: 403, counts: { input: 970, emit: 970, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464560000|220, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll6_1436464560_9", timeMillis: 128, counts: { input: 1030, emit: 1030, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464560000|90, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 970, emit: 970, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1030, emit: 1030, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:247 locks:{ Global: { acquireCount: { r: 54, w: 49, W: 3 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 15818, W: 42196 } }, Database: { acquireCount: { r: 1, w: 44, W: 7 }, acquireWaitCount: { w: 14 }, timeAcquiringMicros: { w: 79073 } }, Collection: { acquireCount: { r: 1, w: 24 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 223ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.995-0400 m31100| 2015-07-09T13:56:00.992-0400 I COMMAND [conn32] CMD: drop db6.tmp.mrs.coll6_1436464560_9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:00.999-0400 m31200| 2015-07-09T13:56:00.999-0400 I COMMAND [conn47] CMD: drop db6.tmp.mrs.coll6_1436464560_9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.000-0400 m31101| 2015-07-09T13:56:01.000-0400 I COMMAND [repl writer worker 8] CMD: drop db6.tmp.mrs.coll6_1436464560_9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.001-0400 m31102| 2015-07-09T13:56:01.000-0400 I COMMAND [repl writer worker 3] CMD: drop db6.tmp.mrs.coll6_1436464560_9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.005-0400 m30998| 2015-07-09T13:56:01.004-0400 I COMMAND [conn43] DROP: db6.map_reduce_replace_nonexistent1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.005-0400 m30998| 2015-07-09T13:56:01.004-0400 I COMMAND [conn43] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.005-0400 m31100| 2015-07-09T13:56:01.005-0400 I COMMAND [conn58] CMD: drop db6.map_reduce_replace_nonexistent1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.006-0400 m31201| 2015-07-09T13:56:01.006-0400 I COMMAND [repl writer worker 1] CMD: drop db6.tmp.mrs.coll6_1436464560_9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.006-0400 m31202| 2015-07-09T13:56:01.006-0400 I COMMAND [repl writer worker 3] CMD: drop db6.tmp.mrs.coll6_1436464560_9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.011-0400 m31101| 2015-07-09T13:56:01.011-0400 I COMMAND [repl writer worker 9] CMD: drop db6.map_reduce_replace_nonexistent1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.012-0400 m31102| 2015-07-09T13:56:01.011-0400 I COMMAND [repl writer worker 13] CMD: drop db6.map_reduce_replace_nonexistent1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.017-0400 m31200| 2015-07-09T13:56:01.017-0400 I COMMAND [conn36] CMD: drop db6.tmp.mr.coll6_29 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.041-0400 m31100| 2015-07-09T13:56:01.040-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_54 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.142-0400 m31100| 2015-07-09T13:56:01.142-0400 I COMMAND [conn57] CMD: drop db6.tmp.mrs.coll6_1436464560_15 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.145-0400 m31100| 2015-07-09T13:56:01.145-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_49 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.145-0400 m31100| 2015-07-09T13:56:01.145-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_49 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.147-0400 m31100| 2015-07-09T13:56:01.147-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_49 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.159-0400 m31200| 2015-07-09T13:56:01.158-0400 I COMMAND [conn33] CMD: drop db6.tmp.mrs.coll6_1436464560_10 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.164-0400 m31200| 2015-07-09T13:56:01.163-0400 I COMMAND [conn33] CMD: drop db6.tmp.mr.coll6_27 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.164-0400 m31200| 2015-07-09T13:56:01.164-0400 I COMMAND [conn33] CMD: drop db6.tmp.mr.coll6_27 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.164-0400 m31200| 2015-07-09T13:56:01.164-0400 I COMMAND [conn33] CMD: drop db6.tmp.mr.coll6_27 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.167-0400 m31100| 2015-07-09T13:56:01.165-0400 I COMMAND [conn57] command db6.tmp.mrs.coll6_1436464560_15 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.167-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.167-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.167-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.168-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464560_15", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:211 locks:{ Global: { acquireCount: { r: 149, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 2, W: 1 }, timeAcquiringMicros: { r: 7964, w: 21157, W: 2561 } }, Database: { acquireCount: { r: 25, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 13, w: 25, R: 11, W: 7 }, timeAcquiringMicros: { r: 120509, w: 157204, R: 48355, W: 22249 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 600ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.169-0400 m31100| 2015-07-09T13:56:01.169-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_55 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.173-0400 m31200| 2015-07-09T13:56:01.172-0400 I COMMAND [conn33] command db6.tmp.mrs.coll6_1436464560_10 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.173-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.173-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.173-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.173-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464560_10", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:211 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 25 } }, Database: { acquireCount: { r: 25, w: 66, R: 13, W: 11 }, acquireWaitCount: { w: 5, R: 5, W: 4 }, timeAcquiringMicros: { w: 40285, R: 15508, W: 15686 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 270ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.188-0400 m31200| 2015-07-09T13:56:01.187-0400 I COMMAND [conn40] CMD: drop db6.tmp.mrs.coll6_1436464560_17 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.195-0400 m31200| 2015-07-09T13:56:01.192-0400 I COMMAND [conn40] CMD: drop db6.tmp.mr.coll6_28 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.196-0400 m31200| 2015-07-09T13:56:01.195-0400 I COMMAND [conn40] CMD: drop db6.tmp.mr.coll6_28 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.197-0400 m31200| 2015-07-09T13:56:01.196-0400 I COMMAND [conn40] CMD: drop db6.tmp.mr.coll6_28 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.197-0400 m31200| 2015-07-09T13:56:01.196-0400 I COMMAND [conn40] command db6.tmp.mrs.coll6_1436464560_17 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.197-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.198-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.198-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.198-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464560_17", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:211 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 5375 } }, Database: { acquireCount: { r: 25, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 1, w: 8, R: 8, W: 1 }, timeAcquiringMicros: { r: 13597, w: 32981, R: 27235, W: 196 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 290ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.219-0400 m31100| 2015-07-09T13:56:01.218-0400 I COMMAND [conn52] CMD: drop db6.tmp.mrs.coll6_1436464560_16 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.223-0400 m31100| 2015-07-09T13:56:01.223-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_50 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.223-0400 m31100| 2015-07-09T13:56:01.223-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_50 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.224-0400 m31100| 2015-07-09T13:56:01.224-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_50 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.230-0400 m31100| 2015-07-09T13:56:01.229-0400 I COMMAND [conn52] command db6.tmp.mrs.coll6_1436464560_16 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.230-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.230-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.230-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.231-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464560_16", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:211 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 5, w: 1, W: 1 }, timeAcquiringMicros: { r: 47172, w: 109, W: 88 } }, Database: { acquireCount: { r: 25, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 14, w: 34, R: 10, W: 8 }, timeAcquiringMicros: { r: 76478, w: 234443, R: 28437, W: 19625 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 627ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.233-0400 m31200| 2015-07-09T13:56:01.229-0400 I COMMAND [conn36] CMD: drop db6.tmp.mrs.coll6_1436464561_11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.235-0400 m31100| 2015-07-09T13:56:01.234-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_56 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.241-0400 m31200| 2015-07-09T13:56:01.238-0400 I COMMAND [conn36] CMD: drop db6.tmp.mr.coll6_29 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.242-0400 m31200| 2015-07-09T13:56:01.238-0400 I COMMAND [conn36] CMD: drop db6.tmp.mr.coll6_29 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.242-0400 m31200| 2015-07-09T13:56:01.239-0400 I COMMAND [conn36] CMD: drop db6.tmp.mr.coll6_29 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.243-0400 m31200| 2015-07-09T13:56:01.239-0400 I COMMAND [conn36] command db6.tmp.mrs.coll6_1436464561_11 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.243-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.243-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.243-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.243-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464561_11", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:211 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 1288 } }, Database: { acquireCount: { r: 25, w: 66, R: 13, W: 11 }, acquireWaitCount: { w: 2, R: 6, W: 4 }, timeAcquiringMicros: { w: 11311, R: 5029, W: 26666 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 223ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.274-0400 m31100| 2015-07-09T13:56:01.274-0400 I COMMAND [conn59] CMD: drop db6.tmp.mrs.coll6_1436464560_17 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.278-0400 m31100| 2015-07-09T13:56:01.278-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_53 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.278-0400 m31100| 2015-07-09T13:56:01.278-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_53 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.280-0400 m31100| 2015-07-09T13:56:01.280-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_53 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.280-0400 m31100| 2015-07-09T13:56:01.280-0400 I COMMAND [conn48] CMD: drop db6.tmp.mrs.coll6_1436464560_10 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.292-0400 m31100| 2015-07-09T13:56:01.291-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_52 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.292-0400 m31100| 2015-07-09T13:56:01.291-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_52 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.292-0400 m31100| 2015-07-09T13:56:01.292-0400 I COMMAND [conn59] command db6.tmp.mrs.coll6_1436464560_17 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.292-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.293-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.293-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.293-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464560_17", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:211 locks:{ Global: { acquireCount: { r: 149, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 3, W: 1 }, timeAcquiringMicros: { r: 8325, w: 27395, W: 216 } }, Database: { acquireCount: { r: 25, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 4, w: 17, R: 11, W: 8 }, timeAcquiringMicros: { r: 40995, w: 58664, R: 31307, W: 31471 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 385ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.293-0400 m31100| 2015-07-09T13:56:01.292-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_52 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.294-0400 m31100| 2015-07-09T13:56:01.293-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_57 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.294-0400 m31100| 2015-07-09T13:56:01.293-0400 I COMMAND [conn57] CMD: drop db6.map_reduce_replace_nonexistent2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.307-0400 m31100| 2015-07-09T13:56:01.307-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_55 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.308-0400 m31100| 2015-07-09T13:56:01.308-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_55 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.308-0400 m31100| 2015-07-09T13:56:01.308-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_55 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.309-0400 m31100| 2015-07-09T13:56:01.309-0400 I COMMAND [conn48] command db6.tmp.mrs.coll6_1436464560_10 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.309-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.309-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.310-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.310-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464560_10", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:211 locks:{ Global: { acquireCount: { r: 149, w: 74, W: 3 }, acquireWaitCount: { r: 5, w: 3, W: 1 }, timeAcquiringMicros: { r: 46074, w: 22798, W: 1591 } }, Database: { acquireCount: { r: 25, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 5, w: 20, R: 10, W: 8 }, timeAcquiringMicros: { r: 24981, w: 117808, R: 15048, W: 3557 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 406ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.311-0400 m31100| 2015-07-09T13:56:01.310-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_58 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.319-0400 m31100| 2015-07-09T13:56:01.319-0400 I COMMAND [conn57] command db6.map_reduce_replace_nonexistent2 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.319-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.320-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.320-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.320-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.320-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.322-0400 m31100| }, out: { replace: "map_reduce_replace_nonexistent2" }, query: { key: { $exists: true }, value: { $exists: true } } }, inputDB: "db6", shardedOutputCollection: "tmp.mrs.coll6_1436464560_15", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll6_1436464560_15", timeMillis: 579, counts: { input: 970, emit: 970, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464561000|31, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll6_1436464560_15", timeMillis: 218, counts: { input: 1030, emit: 1030, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464560000|115, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 970, emit: 970, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1030, emit: 1030, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:247 locks:{ Global: { acquireCount: { r: 54, w: 49, W: 3 }, acquireWaitCount: { w: 3, W: 1 }, timeAcquiringMicros: { w: 18876, W: 1123 } }, Database: { acquireCount: { r: 1, w: 44, W: 7 }, acquireWaitCount: { w: 9, W: 4 }, timeAcquiringMicros: { w: 42340, W: 9142 } }, Collection: { acquireCount: { r: 1, w: 24 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 151ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.322-0400 m31100| 2015-07-09T13:56:01.319-0400 I COMMAND [conn15] CMD: drop db6.tmp.mrs.coll6_1436464560_15 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.332-0400 m31200| 2015-07-09T13:56:01.332-0400 I COMMAND [conn48] CMD: drop db6.tmp.mrs.coll6_1436464560_15 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.340-0400 m30999| 2015-07-09T13:56:01.339-0400 I COMMAND [conn43] DROP: db6.map_reduce_replace_nonexistent2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.340-0400 m30999| 2015-07-09T13:56:01.339-0400 I COMMAND [conn43] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.340-0400 m31100| 2015-07-09T13:56:01.340-0400 I COMMAND [conn57] CMD: drop db6.map_reduce_replace_nonexistent2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.341-0400 m31201| 2015-07-09T13:56:01.339-0400 I COMMAND [repl writer worker 5] CMD: drop db6.tmp.mrs.coll6_1436464560_15 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.341-0400 m31202| 2015-07-09T13:56:01.341-0400 I COMMAND [repl writer worker 9] CMD: drop db6.tmp.mrs.coll6_1436464560_15 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.354-0400 m31101| 2015-07-09T13:56:01.353-0400 I COMMAND [repl writer worker 7] CMD: drop db6.tmp.mrs.coll6_1436464560_15 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.354-0400 m31102| 2015-07-09T13:56:01.353-0400 I COMMAND [repl writer worker 4] CMD: drop db6.tmp.mrs.coll6_1436464560_15 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.365-0400 m31200| 2015-07-09T13:56:01.364-0400 I COMMAND [conn39] CMD: drop db6.tmp.mr.coll6_30 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.367-0400 m31100| 2015-07-09T13:56:01.366-0400 I COMMAND [conn58] CMD: drop db6.tmp.mrs.coll6_1436464561_11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.376-0400 m31100| 2015-07-09T13:56:01.376-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_54 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.376-0400 m31100| 2015-07-09T13:56:01.376-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_54 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.377-0400 m31101| 2015-07-09T13:56:01.377-0400 I COMMAND [repl writer worker 10] CMD: drop db6.map_reduce_replace_nonexistent2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.378-0400 m31100| 2015-07-09T13:56:01.377-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_54 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.379-0400 m31100| 2015-07-09T13:56:01.378-0400 I COMMAND [conn58] command db6.tmp.mrs.coll6_1436464561_11 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.379-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.379-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.380-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.380-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464561_11", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:211 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 3 }, timeAcquiringMicros: { r: 2099, w: 32464 } }, Database: { acquireCount: { r: 25, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 5, w: 11, R: 12, W: 8 }, timeAcquiringMicros: { r: 32985, w: 53052, R: 45836, W: 24057 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 362ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.385-0400 m31102| 2015-07-09T13:56:01.383-0400 I COMMAND [repl writer worker 11] CMD: drop db6.map_reduce_replace_nonexistent2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.387-0400 m31100| 2015-07-09T13:56:01.387-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_60 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.415-0400 m31100| 2015-07-09T13:56:01.413-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_59 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.415-0400 m31100| 2015-07-09T13:56:01.414-0400 I COMMAND [conn52] CMD: drop db6.map_reduce_replace_nonexistent0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.424-0400 m31100| 2015-07-09T13:56:01.422-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_56 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.424-0400 m31100| 2015-07-09T13:56:01.422-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_56 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.425-0400 m31100| 2015-07-09T13:56:01.422-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_56 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.431-0400 m31100| 2015-07-09T13:56:01.430-0400 I COMMAND [conn52] command db6.map_reduce_replace_nonexistent0 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.432-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.432-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.432-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.432-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.432-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.434-0400 m31100| }, out: { replace: "map_reduce_replace_nonexistent0" }, query: { key: { $exists: true }, value: { $exists: true } } }, inputDB: "db6", shardedOutputCollection: "tmp.mrs.coll6_1436464560_16", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll6_1436464560_16", timeMillis: 620, counts: { input: 970, emit: 970, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464561000|44, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll6_1436464560_16", timeMillis: 206, counts: { input: 1030, emit: 1030, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464560000|136, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 970, emit: 970, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1030, emit: 1030, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:247 locks:{ Global: { acquireCount: { r: 54, w: 49, W: 3 }, acquireWaitCount: { w: 4, W: 1 }, timeAcquiringMicros: { w: 38900, W: 1467 } }, Database: { acquireCount: { r: 1, w: 44, W: 7 }, acquireWaitCount: { w: 7, W: 3 }, timeAcquiringMicros: { w: 72664, W: 1076 } }, Collection: { acquireCount: { r: 1, w: 24 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 200ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.434-0400 m31100| 2015-07-09T13:56:01.431-0400 I COMMAND [conn15] CMD: drop db6.tmp.mrs.coll6_1436464560_16 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.440-0400 m31200| 2015-07-09T13:56:01.440-0400 I COMMAND [conn48] CMD: drop db6.tmp.mrs.coll6_1436464560_16 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.447-0400 m30999| 2015-07-09T13:56:01.446-0400 I COMMAND [conn42] DROP: db6.map_reduce_replace_nonexistent0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.447-0400 m30999| 2015-07-09T13:56:01.446-0400 I COMMAND [conn42] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.447-0400 m31101| 2015-07-09T13:56:01.447-0400 I COMMAND [repl writer worker 14] CMD: drop db6.tmp.mrs.coll6_1436464560_16 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.448-0400 m31100| 2015-07-09T13:56:01.447-0400 I COMMAND [conn52] CMD: drop db6.map_reduce_replace_nonexistent0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.456-0400 m31102| 2015-07-09T13:56:01.451-0400 I COMMAND [repl writer worker 7] CMD: drop db6.tmp.mrs.coll6_1436464560_16 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.456-0400 m31201| 2015-07-09T13:56:01.452-0400 I COMMAND [repl writer worker 11] CMD: drop db6.tmp.mrs.coll6_1436464560_16 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.462-0400 m31200| 2015-07-09T13:56:01.461-0400 I COMMAND [conn31] CMD: drop db6.tmp.mr.coll6_31 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.464-0400 m31202| 2015-07-09T13:56:01.463-0400 I COMMAND [repl writer worker 7] CMD: drop db6.tmp.mrs.coll6_1436464560_16 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.474-0400 m31102| 2015-07-09T13:56:01.472-0400 I COMMAND [repl writer worker 13] CMD: drop db6.map_reduce_replace_nonexistent0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.483-0400 m31101| 2015-07-09T13:56:01.483-0400 I COMMAND [repl writer worker 13] CMD: drop db6.map_reduce_replace_nonexistent0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.494-0400 m31100| 2015-07-09T13:56:01.494-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_61 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.548-0400 m31100| 2015-07-09T13:56:01.548-0400 I COMMAND [conn48] CMD: drop db6.map_reduce_replace_nonexistent3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.551-0400 m31100| 2015-07-09T13:56:01.551-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_58 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.552-0400 m31100| 2015-07-09T13:56:01.552-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_58 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.552-0400 m31100| 2015-07-09T13:56:01.552-0400 I COMMAND [conn59] CMD: drop db6.map_reduce_replace_nonexistent4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.557-0400 m31100| 2015-07-09T13:56:01.557-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_57 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.558-0400 m31100| 2015-07-09T13:56:01.557-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_57 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.558-0400 m31200| 2015-07-09T13:56:01.557-0400 I COMMAND [conn39] CMD: drop db6.tmp.mrs.coll6_1436464561_18 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.558-0400 m31100| 2015-07-09T13:56:01.557-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_57 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.559-0400 m31100| 2015-07-09T13:56:01.558-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_58 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.570-0400 m31200| 2015-07-09T13:56:01.569-0400 I COMMAND [conn39] CMD: drop db6.tmp.mr.coll6_30 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.571-0400 m31200| 2015-07-09T13:56:01.571-0400 I COMMAND [conn39] CMD: drop db6.tmp.mr.coll6_30 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.580-0400 m31100| 2015-07-09T13:56:01.578-0400 I COMMAND [conn48] command db6.map_reduce_replace_nonexistent3 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.580-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.580-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.580-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.580-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.580-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.581-0400 m31100| }, out: { replace: "map_reduce_replace_nonexistent3" }, query: { key: { $exists: true }, value: { $exists: true } } }, inputDB: "db6", shardedOutputCollection: "tmp.mrs.coll6_1436464560_10", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll6_1436464560_10", timeMillis: 388, counts: { input: 970, emit: 970, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464561000|112, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll6_1436464560_10", timeMillis: 261, counts: { input: 1030, emit: 1030, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464561000|23, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 970, emit: 970, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1030, emit: 1030, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:247 locks:{ Global: { acquireCount: { r: 54, w: 49, W: 3 }, acquireWaitCount: { w: 3, W: 1 }, timeAcquiringMicros: { w: 22908, W: 18561 } }, Database: { acquireCount: { r: 1, w: 44, W: 7 }, acquireWaitCount: { w: 12, W: 5 }, timeAcquiringMicros: { w: 97209, W: 19138 } }, Collection: { acquireCount: { r: 1, w: 24 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 267ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.581-0400 m31100| 2015-07-09T13:56:01.580-0400 I COMMAND [conn59] command db6.map_reduce_replace_nonexistent4 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.582-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.582-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.582-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.582-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.582-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.583-0400 m31100| }, out: { replace: "map_reduce_replace_nonexistent4" }, query: { key: { $exists: true }, value: { $exists: true } } }, inputDB: "db6", shardedOutputCollection: "tmp.mrs.coll6_1436464560_17", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll6_1436464560_17", timeMillis: 372, counts: { input: 970, emit: 970, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464561000|108, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll6_1436464560_17", timeMillis: 289, counts: { input: 1030, emit: 1030, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464561000|44, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 970, emit: 970, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1030, emit: 1030, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:247 locks:{ Global: { acquireCount: { r: 54, w: 49, W: 3 }, acquireWaitCount: { w: 3, W: 1 }, timeAcquiringMicros: { w: 32173, W: 22316 } }, Database: { acquireCount: { r: 1, w: 44, W: 7 }, acquireWaitCount: { w: 15, W: 3 }, timeAcquiringMicros: { w: 122960, W: 1069 } }, Collection: { acquireCount: { r: 1, w: 24 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 287ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.583-0400 m31100| 2015-07-09T13:56:01.582-0400 I COMMAND [conn32] CMD: drop db6.tmp.mrs.coll6_1436464560_10 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.583-0400 m31200| 2015-07-09T13:56:01.583-0400 I COMMAND [conn39] CMD: drop db6.tmp.mr.coll6_30 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.584-0400 m31200| 2015-07-09T13:56:01.583-0400 I COMMAND [conn39] command db6.tmp.mrs.coll6_1436464561_18 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.584-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.584-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.584-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.584-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464561_18", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:211 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 4333 } }, Database: { acquireCount: { r: 25, w: 66, R: 12, W: 11 }, acquireWaitCount: { w: 9, R: 2, W: 2 }, timeAcquiringMicros: { w: 40590, R: 5667, W: 10899 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 219ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.586-0400 m31100| 2015-07-09T13:56:01.585-0400 I COMMAND [conn15] CMD: drop db6.tmp.mrs.coll6_1436464560_17 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.594-0400 m31200| 2015-07-09T13:56:01.593-0400 I COMMAND [conn31] CMD: drop db6.tmp.mrs.coll6_1436464561_19 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.596-0400 m31200| 2015-07-09T13:56:01.596-0400 I COMMAND [conn47] CMD: drop db6.tmp.mrs.coll6_1436464560_10 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.604-0400 m31200| 2015-07-09T13:56:01.603-0400 I COMMAND [conn31] CMD: drop db6.tmp.mr.coll6_31 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.605-0400 m31200| 2015-07-09T13:56:01.603-0400 I COMMAND [conn31] CMD: drop db6.tmp.mr.coll6_31 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.605-0400 m31200| 2015-07-09T13:56:01.605-0400 I COMMAND [conn48] CMD: drop db6.tmp.mrs.coll6_1436464560_17 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.608-0400 m30998| 2015-07-09T13:56:01.608-0400 I COMMAND [conn42] DROP: db6.map_reduce_replace_nonexistent3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.609-0400 m30998| 2015-07-09T13:56:01.608-0400 I COMMAND [conn42] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.609-0400 m31102| 2015-07-09T13:56:01.608-0400 I COMMAND [repl writer worker 14] CMD: drop db6.tmp.mrs.coll6_1436464560_10 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.609-0400 m31100| 2015-07-09T13:56:01.608-0400 I COMMAND [conn48] CMD: drop db6.map_reduce_replace_nonexistent3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.609-0400 m31101| 2015-07-09T13:56:01.609-0400 I COMMAND [repl writer worker 12] CMD: drop db6.tmp.mrs.coll6_1436464560_10 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.610-0400 m31200| 2015-07-09T13:56:01.610-0400 I COMMAND [conn31] CMD: drop db6.tmp.mr.coll6_31 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.610-0400 m30999| 2015-07-09T13:56:01.610-0400 I COMMAND [conn44] DROP: db6.map_reduce_replace_nonexistent4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.610-0400 m30999| 2015-07-09T13:56:01.610-0400 I COMMAND [conn44] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.611-0400 m31200| 2015-07-09T13:56:01.610-0400 I COMMAND [conn31] command db6.tmp.mrs.coll6_1436464561_19 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.611-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.611-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.611-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.612-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464561_19", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:211 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 2784 } }, Database: { acquireCount: { r: 25, w: 66, R: 12, W: 11 }, acquireWaitCount: { w: 1, R: 10, W: 4 }, timeAcquiringMicros: { w: 4395, R: 5367, W: 11437 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 149ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.612-0400 m31100| 2015-07-09T13:56:01.611-0400 I COMMAND [conn59] CMD: drop db6.map_reduce_replace_nonexistent4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.612-0400 m31201| 2015-07-09T13:56:01.611-0400 I COMMAND [repl writer worker 0] CMD: drop db6.tmp.mrs.coll6_1436464560_10 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.613-0400 m31101| 2015-07-09T13:56:01.612-0400 I COMMAND [repl writer worker 14] CMD: drop db6.tmp.mrs.coll6_1436464560_17 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.614-0400 m31102| 2015-07-09T13:56:01.613-0400 I COMMAND [repl writer worker 9] CMD: drop db6.tmp.mrs.coll6_1436464560_17 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.615-0400 m31201| 2015-07-09T13:56:01.615-0400 I COMMAND [repl writer worker 8] CMD: drop db6.tmp.mrs.coll6_1436464560_17 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.617-0400 m31202| 2015-07-09T13:56:01.617-0400 I COMMAND [repl writer worker 12] CMD: drop db6.tmp.mrs.coll6_1436464560_10 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.620-0400 m31202| 2015-07-09T13:56:01.619-0400 I COMMAND [repl writer worker 0] CMD: drop db6.tmp.mrs.coll6_1436464560_17 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.620-0400 m31200| 2015-07-09T13:56:01.620-0400 I COMMAND [conn33] CMD: drop db6.tmp.mr.coll6_32 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.625-0400 m31100| 2015-07-09T13:56:01.625-0400 I COMMAND [conn58] CMD: drop db6.map_reduce_replace_nonexistent1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.631-0400 m31100| 2015-07-09T13:56:01.631-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_60 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.632-0400 m31100| 2015-07-09T13:56:01.631-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_60 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.633-0400 m31102| 2015-07-09T13:56:01.632-0400 I COMMAND [repl writer worker 15] CMD: drop db6.map_reduce_replace_nonexistent3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.633-0400 m31100| 2015-07-09T13:56:01.631-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_60 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.633-0400 m31102| 2015-07-09T13:56:01.633-0400 I COMMAND [repl writer worker 0] CMD: drop db6.map_reduce_replace_nonexistent4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.634-0400 m31101| 2015-07-09T13:56:01.633-0400 I COMMAND [repl writer worker 3] CMD: drop db6.map_reduce_replace_nonexistent3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.635-0400 m31101| 2015-07-09T13:56:01.635-0400 I COMMAND [repl writer worker 0] CMD: drop db6.map_reduce_replace_nonexistent4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.645-0400 m31100| 2015-07-09T13:56:01.643-0400 I COMMAND [conn58] command db6.map_reduce_replace_nonexistent1 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.645-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.645-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.645-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.645-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.645-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.646-0400 m31100| }, out: { replace: "map_reduce_replace_nonexistent1" }, query: { key: { $exists: true }, value: { $exists: true } } }, inputDB: "db6", shardedOutputCollection: "tmp.mrs.coll6_1436464561_11", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll6_1436464561_11", timeMillis: 360, counts: { input: 970, emit: 970, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464561000|149, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll6_1436464561_11", timeMillis: 222, counts: { input: 1030, emit: 1030, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464561000|65, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 970, emit: 970, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1030, emit: 1030, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:247 locks:{ Global: { acquireCount: { r: 54, w: 49, W: 3 }, acquireWaitCount: { w: 3, W: 1 }, timeAcquiringMicros: { w: 29998, W: 6343 } }, Database: { acquireCount: { r: 1, w: 44, W: 7 }, acquireWaitCount: { w: 13 }, timeAcquiringMicros: { w: 106288 } }, Collection: { acquireCount: { r: 1, w: 24 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 262ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.647-0400 m31100| 2015-07-09T13:56:01.643-0400 I COMMAND [conn32] CMD: drop db6.tmp.mrs.coll6_1436464561_11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.647-0400 m31200| 2015-07-09T13:56:01.644-0400 I COMMAND [conn40] CMD: drop db6.tmp.mr.coll6_33 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.652-0400 m31200| 2015-07-09T13:56:01.651-0400 I COMMAND [conn47] CMD: drop db6.tmp.mrs.coll6_1436464561_11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.652-0400 m31102| 2015-07-09T13:56:01.652-0400 I COMMAND [repl writer worker 10] CMD: drop db6.tmp.mrs.coll6_1436464561_11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.655-0400 m31101| 2015-07-09T13:56:01.654-0400 I COMMAND [repl writer worker 9] CMD: drop db6.tmp.mrs.coll6_1436464561_11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.670-0400 m30998| 2015-07-09T13:56:01.668-0400 I COMMAND [conn43] DROP: db6.map_reduce_replace_nonexistent1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.670-0400 m30998| 2015-07-09T13:56:01.668-0400 I COMMAND [conn43] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.670-0400 m31100| 2015-07-09T13:56:01.669-0400 I COMMAND [conn58] CMD: drop db6.map_reduce_replace_nonexistent1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.670-0400 m31202| 2015-07-09T13:56:01.670-0400 I COMMAND [repl writer worker 6] CMD: drop db6.tmp.mrs.coll6_1436464561_11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.673-0400 m31201| 2015-07-09T13:56:01.672-0400 I COMMAND [repl writer worker 1] CMD: drop db6.tmp.mrs.coll6_1436464561_11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.679-0400 m31100| 2015-07-09T13:56:01.679-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_62 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.683-0400 m31100| 2015-07-09T13:56:01.682-0400 I COMMAND [conn57] CMD: drop db6.tmp.mrs.coll6_1436464561_18 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.689-0400 m31100| 2015-07-09T13:56:01.688-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_59 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.689-0400 m31100| 2015-07-09T13:56:01.689-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_59 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.690-0400 m31101| 2015-07-09T13:56:01.689-0400 I COMMAND [repl writer worker 8] CMD: drop db6.map_reduce_replace_nonexistent1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.691-0400 m31102| 2015-07-09T13:56:01.690-0400 I COMMAND [repl writer worker 2] CMD: drop db6.map_reduce_replace_nonexistent1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.691-0400 m31100| 2015-07-09T13:56:01.689-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_59 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.718-0400 m31100| 2015-07-09T13:56:01.718-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_63 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.721-0400 m31200| 2015-07-09T13:56:01.721-0400 I COMMAND [conn36] CMD: drop db6.tmp.mr.coll6_34 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.726-0400 m31100| 2015-07-09T13:56:01.724-0400 I COMMAND [conn57] command db6.tmp.mrs.coll6_1436464561_18 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.726-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.726-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.727-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.727-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464561_18", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:211 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 2, W: 1 }, timeAcquiringMicros: { r: 14335, w: 13316, W: 7266 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 2, w: 5, R: 9, W: 5 }, timeAcquiringMicros: { r: 16370, w: 24302, R: 23181, W: 34746 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 360ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.728-0400 m31100| 2015-07-09T13:56:01.724-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_64 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.767-0400 m31100| 2015-07-09T13:56:01.767-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_65 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.905-0400 m31200| 2015-07-09T13:56:01.905-0400 I COMMAND [conn33] CMD: drop db6.tmp.mrs.coll6_1436464561_12 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.913-0400 m31200| 2015-07-09T13:56:01.912-0400 I COMMAND [conn33] CMD: drop db6.tmp.mr.coll6_32 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.913-0400 m31200| 2015-07-09T13:56:01.912-0400 I COMMAND [conn33] CMD: drop db6.tmp.mr.coll6_32 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.915-0400 m31200| 2015-07-09T13:56:01.914-0400 I COMMAND [conn33] CMD: drop db6.tmp.mr.coll6_32 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.935-0400 m31200| 2015-07-09T13:56:01.935-0400 I COMMAND [conn33] command db6.tmp.mrs.coll6_1436464561_12 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.936-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.936-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.936-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.936-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464561_12", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:211 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 448 } }, Database: { acquireCount: { r: 25, w: 66, R: 12, W: 11 }, acquireWaitCount: { w: 6, R: 7 }, timeAcquiringMicros: { w: 53352, R: 61077 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 315ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.943-0400 m31200| 2015-07-09T13:56:01.942-0400 I COMMAND [conn40] CMD: drop db6.tmp.mrs.coll6_1436464561_20 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.949-0400 m31200| 2015-07-09T13:56:01.949-0400 I COMMAND [conn40] CMD: drop db6.tmp.mr.coll6_33 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.950-0400 m31200| 2015-07-09T13:56:01.950-0400 I COMMAND [conn40] CMD: drop db6.tmp.mr.coll6_33 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.952-0400 m31200| 2015-07-09T13:56:01.952-0400 I COMMAND [conn40] CMD: drop db6.tmp.mr.coll6_33 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.953-0400 m31200| 2015-07-09T13:56:01.953-0400 I COMMAND [conn40] command db6.tmp.mrs.coll6_1436464561_20 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.953-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.953-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.953-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.954-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464561_20", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:211 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 8057, W: 477 } }, Database: { acquireCount: { r: 25, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 1, w: 5, R: 9, W: 6 }, timeAcquiringMicros: { r: 8359, w: 20551, R: 27377, W: 29397 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 318ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.958-0400 m31200| 2015-07-09T13:56:01.958-0400 I COMMAND [conn36] CMD: drop db6.tmp.mrs.coll6_1436464561_13 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.969-0400 m31200| 2015-07-09T13:56:01.969-0400 I COMMAND [conn36] CMD: drop db6.tmp.mr.coll6_34 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.970-0400 m31200| 2015-07-09T13:56:01.970-0400 I COMMAND [conn36] CMD: drop db6.tmp.mr.coll6_34 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.978-0400 m31200| 2015-07-09T13:56:01.978-0400 I COMMAND [conn36] CMD: drop db6.tmp.mr.coll6_34 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.978-0400 m31200| 2015-07-09T13:56:01.978-0400 I COMMAND [conn36] command db6.tmp.mrs.coll6_1436464561_13 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.979-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.979-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.979-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:01.979-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464561_13", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:211 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 7232 } }, Database: { acquireCount: { r: 25, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 3, w: 4, R: 7, W: 3 }, timeAcquiringMicros: { r: 3292, w: 22833, R: 7686, W: 20745 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 259ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.020-0400 m31100| 2015-07-09T13:56:02.019-0400 I COMMAND [conn57] CMD: drop db6.map_reduce_replace_nonexistent2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.025-0400 m31100| 2015-07-09T13:56:02.025-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_65 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.026-0400 m31100| 2015-07-09T13:56:02.026-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_65 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.026-0400 m31100| 2015-07-09T13:56:02.026-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_65 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.028-0400 m31100| 2015-07-09T13:56:02.027-0400 I COMMAND [conn57] command db6.map_reduce_replace_nonexistent2 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.028-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.028-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.028-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.028-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.028-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.030-0400 m31100| }, out: { replace: "map_reduce_replace_nonexistent2" }, query: { key: { $exists: true }, value: { $exists: true } } }, inputDB: "db6", shardedOutputCollection: "tmp.mrs.coll6_1436464561_18", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll6_1436464561_18", timeMillis: 325, counts: { input: 970, emit: 970, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464561000|246, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll6_1436464561_18", timeMillis: 207, counts: { input: 1030, emit: 1030, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464561000|90, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 970, emit: 970, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1030, emit: 1030, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:247 locks:{ Global: { acquireCount: { r: 56, w: 49, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 69 } }, Database: { acquireCount: { r: 2, w: 44, W: 7 }, acquireWaitCount: { r: 1, w: 17, W: 5 }, timeAcquiringMicros: { r: 10398, w: 152581, W: 26008 } }, Collection: { acquireCount: { r: 2, w: 24 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 300ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.030-0400 m31100| 2015-07-09T13:56:02.027-0400 I COMMAND [conn15] CMD: drop db6.tmp.mrs.coll6_1436464561_18 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.036-0400 m31200| 2015-07-09T13:56:02.036-0400 I COMMAND [conn48] CMD: drop db6.tmp.mrs.coll6_1436464561_18 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.037-0400 m31100| 2015-07-09T13:56:02.037-0400 I COMMAND [conn52] CMD: drop db6.tmp.mrs.coll6_1436464561_19 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.039-0400 m31102| 2015-07-09T13:56:02.039-0400 I COMMAND [repl writer worker 6] CMD: drop db6.tmp.mrs.coll6_1436464561_18 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.040-0400 m31101| 2015-07-09T13:56:02.039-0400 I COMMAND [repl writer worker 15] CMD: drop db6.tmp.mrs.coll6_1436464561_18 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.041-0400 m30999| 2015-07-09T13:56:02.040-0400 I COMMAND [conn43] DROP: db6.map_reduce_replace_nonexistent2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.041-0400 m30999| 2015-07-09T13:56:02.040-0400 I COMMAND [conn43] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.041-0400 m31100| 2015-07-09T13:56:02.041-0400 I COMMAND [conn57] CMD: drop db6.map_reduce_replace_nonexistent2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.041-0400 m31100| 2015-07-09T13:56:02.041-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_61 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.041-0400 m31100| 2015-07-09T13:56:02.041-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_61 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.042-0400 m31201| 2015-07-09T13:56:02.042-0400 I COMMAND [repl writer worker 8] CMD: drop db6.tmp.mrs.coll6_1436464561_18 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.042-0400 m31202| 2015-07-09T13:56:02.042-0400 I COMMAND [repl writer worker 13] CMD: drop db6.tmp.mrs.coll6_1436464561_18 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.046-0400 m31100| 2015-07-09T13:56:02.046-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_61 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.049-0400 m31200| 2015-07-09T13:56:02.049-0400 I COMMAND [conn39] CMD: drop db6.tmp.mr.coll6_35 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.051-0400 m31100| 2015-07-09T13:56:02.049-0400 I COMMAND [conn52] command db6.tmp.mrs.coll6_1436464561_19 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.051-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.051-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.052-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.053-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464561_19", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:211 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 3, w: 1, W: 1 }, timeAcquiringMicros: { r: 27616, w: 6224, W: 103 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 2, w: 23, R: 10, W: 6 }, timeAcquiringMicros: { r: 8021, w: 252024, R: 49729, W: 19645 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 589ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.053-0400 m31102| 2015-07-09T13:56:02.051-0400 I COMMAND [repl writer worker 1] CMD: drop db6.map_reduce_replace_nonexistent2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.053-0400 m31101| 2015-07-09T13:56:02.052-0400 I COMMAND [repl writer worker 10] CMD: drop db6.map_reduce_replace_nonexistent2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.055-0400 m31100| 2015-07-09T13:56:02.054-0400 I COMMAND [conn48] CMD: drop db6.tmp.mrs.coll6_1436464561_12 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.055-0400 m31100| 2015-07-09T13:56:02.055-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_67 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.057-0400 m31100| 2015-07-09T13:56:02.057-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_66 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.060-0400 m31100| 2015-07-09T13:56:02.060-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_62 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.061-0400 m31100| 2015-07-09T13:56:02.060-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_62 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.062-0400 m31100| 2015-07-09T13:56:02.062-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_62 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.065-0400 m31100| 2015-07-09T13:56:02.064-0400 I COMMAND [conn48] command db6.tmp.mrs.coll6_1436464561_12 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.065-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.066-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.066-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.067-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464561_12", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:211 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 3, w: 1, W: 1 }, timeAcquiringMicros: { r: 24051, w: 10021, W: 127 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 6, w: 15, R: 12, W: 7 }, timeAcquiringMicros: { r: 15290, w: 79749, R: 83692, W: 3199 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 444ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.067-0400 m31100| 2015-07-09T13:56:02.065-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_68 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.169-0400 m31100| 2015-07-09T13:56:02.169-0400 I COMMAND [conn59] CMD: drop db6.tmp.mrs.coll6_1436464561_20 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.174-0400 m31200| 2015-07-09T13:56:02.173-0400 I COMMAND [conn39] CMD: drop db6.tmp.mrs.coll6_1436464562_21 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.174-0400 m31100| 2015-07-09T13:56:02.174-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_63 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.175-0400 m31100| 2015-07-09T13:56:02.174-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_63 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.181-0400 m31200| 2015-07-09T13:56:02.180-0400 I COMMAND [conn39] CMD: drop db6.tmp.mr.coll6_35 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.181-0400 m31200| 2015-07-09T13:56:02.181-0400 I COMMAND [conn39] CMD: drop db6.tmp.mr.coll6_35 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.181-0400 m31100| 2015-07-09T13:56:02.181-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_63 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.183-0400 m31200| 2015-07-09T13:56:02.182-0400 I COMMAND [conn39] CMD: drop db6.tmp.mr.coll6_35 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.183-0400 m31200| 2015-07-09T13:56:02.182-0400 I COMMAND [conn39] command db6.tmp.mrs.coll6_1436464562_21 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.183-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.183-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.183-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.184-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464562_21", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:211 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 } }, Database: { acquireCount: { r: 25, w: 66, R: 12, W: 11 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 133ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.195-0400 m31100| 2015-07-09T13:56:02.195-0400 I COMMAND [conn59] command db6.tmp.mrs.coll6_1436464561_20 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.196-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.196-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.196-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.197-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464561_20", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:211 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 3, W: 1 }, timeAcquiringMicros: { r: 12732, w: 16744, W: 891 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 10, w: 24, R: 12, W: 9 }, timeAcquiringMicros: { r: 81494, w: 96119, R: 66331, W: 42462 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 560ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.197-0400 m31100| 2015-07-09T13:56:02.196-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_69 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.236-0400 m31100| 2015-07-09T13:56:02.235-0400 I COMMAND [conn58] CMD: drop db6.tmp.mrs.coll6_1436464561_13 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.239-0400 m31100| 2015-07-09T13:56:02.239-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_64 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.239-0400 m31100| 2015-07-09T13:56:02.239-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_64 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.240-0400 m31100| 2015-07-09T13:56:02.240-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_64 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.245-0400 m31100| 2015-07-09T13:56:02.245-0400 I COMMAND [conn58] command db6.tmp.mrs.coll6_1436464561_13 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.245-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.246-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.246-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.246-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464561_13", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:211 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 3, W: 1 }, timeAcquiringMicros: { r: 6180, w: 16903, W: 4263 } }, Database: { acquireCount: { r: 25, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 9, w: 35, R: 12, W: 6 }, timeAcquiringMicros: { r: 75071, w: 121243, R: 18196, W: 95741 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 526ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.251-0400 m31100| 2015-07-09T13:56:02.251-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_70 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.279-0400 m31100| 2015-07-09T13:56:02.279-0400 I COMMAND [conn52] CMD: drop db6.map_reduce_replace_nonexistent0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.283-0400 m31100| 2015-07-09T13:56:02.283-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_67 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.283-0400 m31100| 2015-07-09T13:56:02.283-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_67 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.284-0400 m31100| 2015-07-09T13:56:02.283-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_67 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.291-0400 m31100| 2015-07-09T13:56:02.290-0400 I COMMAND [conn52] command db6.map_reduce_replace_nonexistent0 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.291-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.291-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.291-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.291-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.291-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.293-0400 m31100| }, out: { replace: "map_reduce_replace_nonexistent0" }, query: { key: { $exists: true }, value: { $exists: true } } }, inputDB: "db6", shardedOutputCollection: "tmp.mrs.coll6_1436464561_19", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll6_1436464561_19", timeMillis: 580, counts: { input: 970, emit: 970, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464562000|47, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll6_1436464561_19", timeMillis: 143, counts: { input: 1030, emit: 1030, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464561000|111, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 970, emit: 970, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1030, emit: 1030, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:247 locks:{ Global: { acquireCount: { r: 54, w: 49, W: 3 }, acquireWaitCount: { w: 3, W: 1 }, timeAcquiringMicros: { w: 17332, W: 16089 } }, Database: { acquireCount: { r: 1, w: 44, W: 7 }, acquireWaitCount: { w: 18, W: 5 }, timeAcquiringMicros: { w: 101888, W: 4753 } }, Collection: { acquireCount: { r: 1, w: 24 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 236ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.293-0400 m31100| 2015-07-09T13:56:02.291-0400 I COMMAND [conn15] CMD: drop db6.tmp.mrs.coll6_1436464561_19 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.293-0400 m31200| 2015-07-09T13:56:02.292-0400 I COMMAND [conn48] CMD: drop db6.tmp.mrs.coll6_1436464561_19 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.294-0400 m31100| 2015-07-09T13:56:02.293-0400 I COMMAND [conn48] CMD: drop db6.map_reduce_replace_nonexistent3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.299-0400 m31100| 2015-07-09T13:56:02.299-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_68 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.300-0400 m31100| 2015-07-09T13:56:02.300-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_68 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.301-0400 m31100| 2015-07-09T13:56:02.301-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_68 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.303-0400 m30999| 2015-07-09T13:56:02.302-0400 I COMMAND [conn42] DROP: db6.map_reduce_replace_nonexistent0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.303-0400 m30999| 2015-07-09T13:56:02.302-0400 I COMMAND [conn42] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.303-0400 m31100| 2015-07-09T13:56:02.302-0400 I COMMAND [conn48] command db6.map_reduce_replace_nonexistent3 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.303-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.303-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.303-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.304-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.304-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.306-0400 m31100| }, out: { replace: "map_reduce_replace_nonexistent3" }, query: { key: { $exists: true }, value: { $exists: true } } }, inputDB: "db6", shardedOutputCollection: "tmp.mrs.coll6_1436464561_12", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll6_1436464561_12", timeMillis: 441, counts: { input: 970, emit: 970, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464562000|57, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll6_1436464561_12", timeMillis: 292, counts: { input: 1030, emit: 1030, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464561000|138, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 970, emit: 970, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1030, emit: 1030, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:247 locks:{ Global: { acquireCount: { r: 54, w: 49, W: 3 }, acquireWaitCount: { w: 3 }, timeAcquiringMicros: { w: 17071 } }, Database: { acquireCount: { r: 1, w: 44, W: 7 }, acquireWaitCount: { w: 15, W: 5 }, timeAcquiringMicros: { w: 71520, W: 68628 } }, Collection: { acquireCount: { r: 1, w: 24 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 236ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.306-0400 m31100| 2015-07-09T13:56:02.302-0400 I COMMAND [conn52] CMD: drop db6.map_reduce_replace_nonexistent0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.306-0400 m31202| 2015-07-09T13:56:02.304-0400 I COMMAND [repl writer worker 12] CMD: drop db6.tmp.mrs.coll6_1436464561_19 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.306-0400 m31201| 2015-07-09T13:56:02.304-0400 I COMMAND [repl writer worker 4] CMD: drop db6.tmp.mrs.coll6_1436464561_19 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.307-0400 m31100| 2015-07-09T13:56:02.305-0400 I COMMAND [conn32] CMD: drop db6.tmp.mrs.coll6_1436464561_12 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.310-0400 m31200| 2015-07-09T13:56:02.310-0400 I COMMAND [conn47] CMD: drop db6.tmp.mrs.coll6_1436464561_12 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.313-0400 m31100| 2015-07-09T13:56:02.313-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_71 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.318-0400 m31101| 2015-07-09T13:56:02.317-0400 I COMMAND [repl writer worker 14] CMD: drop db6.tmp.mrs.coll6_1436464561_19 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.321-0400 m31200| 2015-07-09T13:56:02.320-0400 I COMMAND [conn31] CMD: drop db6.tmp.mr.coll6_36 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.321-0400 m31201| 2015-07-09T13:56:02.320-0400 I COMMAND [repl writer worker 0] CMD: drop db6.tmp.mrs.coll6_1436464561_12 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.322-0400 m31102| 2015-07-09T13:56:02.322-0400 I COMMAND [repl writer worker 1] CMD: drop db6.tmp.mrs.coll6_1436464561_19 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.324-0400 m30998| 2015-07-09T13:56:02.323-0400 I COMMAND [conn42] DROP: db6.map_reduce_replace_nonexistent3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.324-0400 m30998| 2015-07-09T13:56:02.323-0400 I COMMAND [conn42] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.324-0400 m31202| 2015-07-09T13:56:02.323-0400 I COMMAND [repl writer worker 5] CMD: drop db6.tmp.mrs.coll6_1436464561_12 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.324-0400 m31100| 2015-07-09T13:56:02.324-0400 I COMMAND [conn48] CMD: drop db6.map_reduce_replace_nonexistent3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.337-0400 m31101| 2015-07-09T13:56:02.337-0400 I COMMAND [repl writer worker 4] CMD: drop db6.map_reduce_replace_nonexistent0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.341-0400 m31101| 2015-07-09T13:56:02.341-0400 I COMMAND [repl writer worker 5] CMD: drop db6.tmp.mrs.coll6_1436464561_12 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.345-0400 m31102| 2015-07-09T13:56:02.345-0400 I COMMAND [repl writer worker 0] CMD: drop db6.map_reduce_replace_nonexistent0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.350-0400 m31102| 2015-07-09T13:56:02.349-0400 I COMMAND [repl writer worker 10] CMD: drop db6.tmp.mrs.coll6_1436464561_12 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.351-0400 m31101| 2015-07-09T13:56:02.351-0400 I COMMAND [repl writer worker 3] CMD: drop db6.map_reduce_replace_nonexistent3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.356-0400 m31200| 2015-07-09T13:56:02.356-0400 I COMMAND [conn33] CMD: drop db6.tmp.mr.coll6_37 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.359-0400 m31102| 2015-07-09T13:56:02.359-0400 I COMMAND [repl writer worker 4] CMD: drop db6.map_reduce_replace_nonexistent3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.363-0400 m31100| 2015-07-09T13:56:02.362-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_72 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.378-0400 m31100| 2015-07-09T13:56:02.377-0400 I COMMAND [conn59] CMD: drop db6.map_reduce_replace_nonexistent4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.392-0400 m31100| 2015-07-09T13:56:02.392-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_69 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.393-0400 m31100| 2015-07-09T13:56:02.392-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_69 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.412-0400 m31100| 2015-07-09T13:56:02.411-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_69 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.419-0400 m31100| 2015-07-09T13:56:02.418-0400 I COMMAND [conn59] command db6.map_reduce_replace_nonexistent4 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.420-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.420-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.420-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.420-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.420-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.422-0400 m31100| }, out: { replace: "map_reduce_replace_nonexistent4" }, query: { key: { $exists: true }, value: { $exists: true } } }, inputDB: "db6", shardedOutputCollection: "tmp.mrs.coll6_1436464561_20", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll6_1436464561_20", timeMillis: 539, counts: { input: 970, emit: 970, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464562000|98, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll6_1436464561_20", timeMillis: 315, counts: { input: 1030, emit: 1030, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464561000|162, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 970, emit: 970, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1030, emit: 1030, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:247 locks:{ Global: { acquireCount: { r: 54, w: 49, W: 3 }, acquireWaitCount: { w: 3, W: 1 }, timeAcquiringMicros: { w: 17382, W: 15836 } }, Database: { acquireCount: { r: 1, w: 44, W: 7 }, acquireWaitCount: { w: 14, W: 5 }, timeAcquiringMicros: { w: 73076, W: 30922 } }, Collection: { acquireCount: { r: 1, w: 24 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 222ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.422-0400 m31100| 2015-07-09T13:56:02.419-0400 I COMMAND [conn15] CMD: drop db6.tmp.mrs.coll6_1436464561_20 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.442-0400 m31200| 2015-07-09T13:56:02.442-0400 I COMMAND [conn48] CMD: drop db6.tmp.mrs.coll6_1436464561_20 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.458-0400 m30999| 2015-07-09T13:56:02.457-0400 I COMMAND [conn44] DROP: db6.map_reduce_replace_nonexistent4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.458-0400 m30999| 2015-07-09T13:56:02.457-0400 I COMMAND [conn44] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.458-0400 m31100| 2015-07-09T13:56:02.458-0400 I COMMAND [conn59] CMD: drop db6.map_reduce_replace_nonexistent4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.461-0400 m31202| 2015-07-09T13:56:02.460-0400 I COMMAND [repl writer worker 0] CMD: drop db6.tmp.mrs.coll6_1436464561_20 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.463-0400 m31101| 2015-07-09T13:56:02.462-0400 I COMMAND [repl writer worker 5] CMD: drop db6.tmp.mrs.coll6_1436464561_20 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.464-0400 m31201| 2015-07-09T13:56:02.464-0400 I COMMAND [repl writer worker 5] CMD: drop db6.tmp.mrs.coll6_1436464561_20 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.470-0400 m31101| 2015-07-09T13:56:02.470-0400 I COMMAND [repl writer worker 6] CMD: drop db6.map_reduce_replace_nonexistent4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.470-0400 m31200| 2015-07-09T13:56:02.470-0400 I COMMAND [conn40] CMD: drop db6.tmp.mr.coll6_38 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.471-0400 m31102| 2015-07-09T13:56:02.470-0400 I COMMAND [repl writer worker 9] CMD: drop db6.tmp.mrs.coll6_1436464561_20 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.474-0400 m31100| 2015-07-09T13:56:02.473-0400 I COMMAND [conn58] CMD: drop db6.map_reduce_replace_nonexistent1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.474-0400 m31100| 2015-07-09T13:56:02.474-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_73 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.477-0400 m31102| 2015-07-09T13:56:02.477-0400 I COMMAND [repl writer worker 13] CMD: drop db6.map_reduce_replace_nonexistent4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.477-0400 m31100| 2015-07-09T13:56:02.477-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_70 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.478-0400 m31100| 2015-07-09T13:56:02.477-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_70 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.478-0400 m31100| 2015-07-09T13:56:02.477-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_70 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.480-0400 m31100| 2015-07-09T13:56:02.478-0400 I COMMAND [conn58] command db6.map_reduce_replace_nonexistent1 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.480-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.480-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.480-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.480-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.481-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.482-0400 m31100| }, out: { replace: "map_reduce_replace_nonexistent1" }, query: { key: { $exists: true }, value: { $exists: true } } }, inputDB: "db6", shardedOutputCollection: "tmp.mrs.coll6_1436464561_13", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll6_1436464561_13", timeMillis: 520, counts: { input: 970, emit: 970, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464562000|125, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll6_1436464561_13", timeMillis: 251, counts: { input: 1030, emit: 1030, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464561000|180, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 970, emit: 970, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1030, emit: 1030, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:247 locks:{ Global: { acquireCount: { r: 54, w: 49, W: 3 }, acquireWaitCount: { w: 4, W: 1 }, timeAcquiringMicros: { w: 36561, W: 443 } }, Database: { acquireCount: { r: 1, w: 44, W: 7 }, acquireWaitCount: { w: 14, W: 2 }, timeAcquiringMicros: { w: 120444, W: 316 } }, Collection: { acquireCount: { r: 1, w: 24 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 229ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.482-0400 m31100| 2015-07-09T13:56:02.478-0400 I COMMAND [conn32] CMD: drop db6.tmp.mrs.coll6_1436464561_13 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.499-0400 m31200| 2015-07-09T13:56:02.497-0400 I COMMAND [conn47] CMD: drop db6.tmp.mrs.coll6_1436464561_13 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.500-0400 m31101| 2015-07-09T13:56:02.500-0400 I COMMAND [repl writer worker 0] CMD: drop db6.tmp.mrs.coll6_1436464561_13 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.500-0400 m31102| 2015-07-09T13:56:02.500-0400 I COMMAND [repl writer worker 15] CMD: drop db6.tmp.mrs.coll6_1436464561_13 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.515-0400 m30998| 2015-07-09T13:56:02.515-0400 I COMMAND [conn43] DROP: db6.map_reduce_replace_nonexistent1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.515-0400 m30998| 2015-07-09T13:56:02.515-0400 I COMMAND [conn43] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.515-0400 m31100| 2015-07-09T13:56:02.515-0400 I COMMAND [conn58] CMD: drop db6.map_reduce_replace_nonexistent1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.518-0400 m31202| 2015-07-09T13:56:02.517-0400 I COMMAND [repl writer worker 6] CMD: drop db6.tmp.mrs.coll6_1436464561_13 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.519-0400 m31201| 2015-07-09T13:56:02.518-0400 I COMMAND [repl writer worker 2] CMD: drop db6.tmp.mrs.coll6_1436464561_13 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.528-0400 m31101| 2015-07-09T13:56:02.528-0400 I COMMAND [repl writer worker 13] CMD: drop db6.map_reduce_replace_nonexistent1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.530-0400 m31102| 2015-07-09T13:56:02.529-0400 I COMMAND [repl writer worker 5] CMD: drop db6.map_reduce_replace_nonexistent1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.552-0400 m31200| 2015-07-09T13:56:02.551-0400 I COMMAND [conn36] CMD: drop db6.tmp.mr.coll6_39 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.561-0400 m31100| 2015-07-09T13:56:02.560-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_74 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.660-0400 m31100| 2015-07-09T13:56:02.659-0400 I COMMAND [conn57] CMD: drop db6.tmp.mrs.coll6_1436464562_21 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.662-0400 m31100| 2015-07-09T13:56:02.662-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_66 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.663-0400 m31100| 2015-07-09T13:56:02.663-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_66 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.683-0400 m31100| 2015-07-09T13:56:02.683-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_66 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.689-0400 m31100| 2015-07-09T13:56:02.688-0400 I COMMAND [conn57] command db6.tmp.mrs.coll6_1436464562_21 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.689-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.689-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.689-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.689-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464562_21", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:211 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 3, w: 2, W: 1 }, timeAcquiringMicros: { r: 12643, w: 25851, W: 17742 } }, Database: { acquireCount: { r: 25, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 16, w: 24, R: 12, W: 9 }, timeAcquiringMicros: { r: 115128, w: 174898, R: 43076, W: 49918 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 639ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.690-0400 m31100| 2015-07-09T13:56:02.690-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_75 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.717-0400 m31200| 2015-07-09T13:56:02.716-0400 I COMMAND [conn31] CMD: drop db6.tmp.mrs.coll6_1436464562_22 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.724-0400 m31200| 2015-07-09T13:56:02.724-0400 I COMMAND [conn31] CMD: drop db6.tmp.mr.coll6_36 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.724-0400 m31200| 2015-07-09T13:56:02.724-0400 I COMMAND [conn31] CMD: drop db6.tmp.mr.coll6_36 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.732-0400 m31200| 2015-07-09T13:56:02.732-0400 I COMMAND [conn31] CMD: drop db6.tmp.mr.coll6_36 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.737-0400 m31200| 2015-07-09T13:56:02.737-0400 I COMMAND [conn33] CMD: drop db6.tmp.mrs.coll6_1436464562_14 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.741-0400 m31100| 2015-07-09T13:56:02.740-0400 I COMMAND [conn52] CMD: drop db6.tmp.mrs.coll6_1436464562_22 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.746-0400 m31200| 2015-07-09T13:56:02.745-0400 I COMMAND [conn33] CMD: drop db6.tmp.mr.coll6_37 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.747-0400 m31100| 2015-07-09T13:56:02.747-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_71 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.748-0400 m31100| 2015-07-09T13:56:02.747-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_71 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.748-0400 m31200| 2015-07-09T13:56:02.748-0400 I COMMAND [conn33] CMD: drop db6.tmp.mr.coll6_37 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.754-0400 m31200| 2015-07-09T13:56:02.753-0400 I COMMAND [conn31] command db6.tmp.mrs.coll6_1436464562_22 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.754-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.755-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.755-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.756-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464562_22", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:211 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 2503 } }, Database: { acquireCount: { r: 25, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 1, w: 21, R: 9, W: 4 }, timeAcquiringMicros: { r: 7706, w: 160779, R: 13233, W: 4605 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 441ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.757-0400 m31200| 2015-07-09T13:56:02.757-0400 I COMMAND [conn33] CMD: drop db6.tmp.mr.coll6_37 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.758-0400 m31100| 2015-07-09T13:56:02.758-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_71 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.760-0400 m31100| 2015-07-09T13:56:02.760-0400 I COMMAND [conn52] command db6.tmp.mrs.coll6_1436464562_22 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.760-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.760-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.760-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.761-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464562_22", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:211 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 3, W: 1 }, timeAcquiringMicros: { r: 15993, w: 10654, W: 19 } }, Database: { acquireCount: { r: 25, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 2, w: 23, R: 13, W: 9 }, timeAcquiringMicros: { r: 13246, w: 112772, R: 66183, W: 17930 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 447ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.761-0400 m31100| 2015-07-09T13:56:02.761-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_76 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.761-0400 m31100| 2015-07-09T13:56:02.761-0400 I COMMAND [conn59] CMD: drop db6.tmp.mrs.coll6_1436464562_23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.765-0400 m31200| 2015-07-09T13:56:02.765-0400 I COMMAND [conn33] command db6.tmp.mrs.coll6_1436464562_14 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.765-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.765-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.765-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.766-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464562_14", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:211 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 8541, W: 4519 } }, Database: { acquireCount: { r: 25, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 3, w: 27, R: 6, W: 5 }, timeAcquiringMicros: { r: 13504, w: 192212, R: 26100, W: 31943 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 422ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.766-0400 m31100| 2015-07-09T13:56:02.766-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_73 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.767-0400 m31100| 2015-07-09T13:56:02.766-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_73 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.769-0400 m31100| 2015-07-09T13:56:02.768-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_73 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.769-0400 m31100| 2015-07-09T13:56:02.769-0400 I COMMAND [conn48] CMD: drop db6.tmp.mrs.coll6_1436464562_14 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.775-0400 m31100| 2015-07-09T13:56:02.774-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_72 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.775-0400 m31100| 2015-07-09T13:56:02.774-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_72 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.776-0400 m31100| 2015-07-09T13:56:02.776-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_72 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.782-0400 m31200| 2015-07-09T13:56:02.781-0400 I COMMAND [conn40] CMD: drop db6.tmp.mrs.coll6_1436464562_23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.790-0400 m31200| 2015-07-09T13:56:02.790-0400 I COMMAND [conn40] CMD: drop db6.tmp.mr.coll6_38 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.791-0400 m31200| 2015-07-09T13:56:02.790-0400 I COMMAND [conn40] CMD: drop db6.tmp.mr.coll6_38 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.792-0400 m31200| 2015-07-09T13:56:02.791-0400 I COMMAND [conn40] CMD: drop db6.tmp.mr.coll6_38 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.793-0400 m31200| 2015-07-09T13:56:02.792-0400 I COMMAND [conn40] command db6.tmp.mrs.coll6_1436464562_23 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.793-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.794-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.794-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.796-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464562_23", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:211 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 25293, W: 325 } }, Database: { acquireCount: { r: 25, w: 66, R: 13, W: 11 }, acquireWaitCount: { w: 12, R: 12, W: 6 }, timeAcquiringMicros: { w: 74262, R: 32315, W: 20887 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 324ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.799-0400 m31200| 2015-07-09T13:56:02.799-0400 I COMMAND [conn36] CMD: drop db6.tmp.mrs.coll6_1436464562_15 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.806-0400 m31100| 2015-07-09T13:56:02.805-0400 I COMMAND [conn59] command db6.tmp.mrs.coll6_1436464562_23 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.806-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.806-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.806-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.808-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464562_23", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:211 locks:{ Global: { acquireCount: { r: 149, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 2, W: 1 }, timeAcquiringMicros: { r: 11755, w: 9638, W: 1253 } }, Database: { acquireCount: { r: 25, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 4, w: 17, R: 10, W: 6 }, timeAcquiringMicros: { r: 8751, w: 68250, R: 28982, W: 39709 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 337ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.808-0400 m31100| 2015-07-09T13:56:02.807-0400 I COMMAND [conn48] command db6.tmp.mrs.coll6_1436464562_14 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.808-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.809-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.809-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.809-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464562_14", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:211 locks:{ Global: { acquireCount: { r: 149, w: 74, W: 3 }, acquireWaitCount: { w: 6, W: 1 }, timeAcquiringMicros: { w: 48985, W: 3080 } }, Database: { acquireCount: { r: 25, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 6, w: 23, R: 11, W: 5 }, timeAcquiringMicros: { r: 19807, w: 113747, R: 64381, W: 32448 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 465ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.810-0400 m31100| 2015-07-09T13:56:02.808-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_77 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.810-0400 m31200| 2015-07-09T13:56:02.808-0400 I COMMAND [conn36] CMD: drop db6.tmp.mr.coll6_39 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.810-0400 m31100| 2015-07-09T13:56:02.809-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_78 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.812-0400 m31200| 2015-07-09T13:56:02.810-0400 I COMMAND [conn36] CMD: drop db6.tmp.mr.coll6_39 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.812-0400 m31200| 2015-07-09T13:56:02.811-0400 I COMMAND [conn36] CMD: drop db6.tmp.mr.coll6_39 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.812-0400 m31200| 2015-07-09T13:56:02.812-0400 I COMMAND [conn36] command db6.tmp.mrs.coll6_1436464562_15 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.813-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.813-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.813-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.814-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464562_15", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:211 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 1 }, timeAcquiringMicros: { r: 13469, w: 9793 } }, Database: { acquireCount: { r: 25, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 1, w: 3, R: 12, W: 5 }, timeAcquiringMicros: { r: 58, w: 8209, R: 51987, W: 9634 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 267ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.847-0400 m31100| 2015-07-09T13:56:02.847-0400 I COMMAND [conn57] CMD: drop db6.map_reduce_replace_nonexistent2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.851-0400 m31100| 2015-07-09T13:56:02.851-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_75 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.855-0400 m31100| 2015-07-09T13:56:02.855-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_75 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.856-0400 m31100| 2015-07-09T13:56:02.855-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_75 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.858-0400 m31100| 2015-07-09T13:56:02.858-0400 I COMMAND [conn57] command db6.map_reduce_replace_nonexistent2 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.859-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.859-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.859-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.859-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.859-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.861-0400 m31100| }, out: { replace: "map_reduce_replace_nonexistent2" }, query: { key: { $exists: true }, value: { $exists: true } } }, inputDB: "db6", shardedOutputCollection: "tmp.mrs.coll6_1436464562_21", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll6_1436464562_21", timeMillis: 614, counts: { input: 970, emit: 970, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464562000|217, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll6_1436464562_21", timeMillis: 131, counts: { input: 1030, emit: 1030, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464562000|23, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 970, emit: 970, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1030, emit: 1030, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:247 locks:{ Global: { acquireCount: { r: 54, w: 49, W: 3 }, acquireWaitCount: { w: 3, W: 1 }, timeAcquiringMicros: { w: 18642, W: 9375 } }, Database: { acquireCount: { r: 1, w: 44, W: 7 }, acquireWaitCount: { w: 14, W: 5 }, timeAcquiringMicros: { w: 78636, W: 1787 } }, Collection: { acquireCount: { r: 1, w: 24 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 167ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.862-0400 m31100| 2015-07-09T13:56:02.858-0400 I COMMAND [conn15] CMD: drop db6.tmp.mrs.coll6_1436464562_21 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.863-0400 m31200| 2015-07-09T13:56:02.862-0400 I COMMAND [conn48] CMD: drop db6.tmp.mrs.coll6_1436464562_21 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.867-0400 m30999| 2015-07-09T13:56:02.867-0400 I COMMAND [conn43] DROP: db6.map_reduce_replace_nonexistent2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.867-0400 m30999| 2015-07-09T13:56:02.867-0400 I COMMAND [conn43] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.868-0400 m31100| 2015-07-09T13:56:02.867-0400 I COMMAND [conn57] CMD: drop db6.map_reduce_replace_nonexistent2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.868-0400 m31201| 2015-07-09T13:56:02.867-0400 I COMMAND [repl writer worker 8] CMD: drop db6.tmp.mrs.coll6_1436464562_21 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.869-0400 m31202| 2015-07-09T13:56:02.868-0400 I COMMAND [repl writer worker 1] CMD: drop db6.tmp.mrs.coll6_1436464562_21 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.870-0400 m31100| 2015-07-09T13:56:02.870-0400 I COMMAND [conn58] CMD: drop db6.tmp.mrs.coll6_1436464562_15 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.875-0400 m31100| 2015-07-09T13:56:02.874-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_74 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.876-0400 m31100| 2015-07-09T13:56:02.875-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_74 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.878-0400 m31200| 2015-07-09T13:56:02.878-0400 I COMMAND [conn39] CMD: drop db6.tmp.mr.coll6_40 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.880-0400 m31100| 2015-07-09T13:56:02.879-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_74 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.884-0400 m31100| 2015-07-09T13:56:02.883-0400 I COMMAND [conn58] command db6.tmp.mrs.coll6_1436464562_15 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.884-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.885-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.885-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.886-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464562_15", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:211 locks:{ Global: { acquireCount: { r: 149, w: 74, W: 3 }, acquireWaitCount: { r: 3, w: 3 }, timeAcquiringMicros: { r: 6312, w: 11387 } }, Database: { acquireCount: { r: 25, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 2, w: 15, R: 11, W: 8 }, timeAcquiringMicros: { r: 2773, w: 79575, R: 31412, W: 39350 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 338ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.886-0400 m31100| 2015-07-09T13:56:02.884-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_79 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.886-0400 m31100| 2015-07-09T13:56:02.886-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_80 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.900-0400 m31101| 2015-07-09T13:56:02.898-0400 I COMMAND [repl writer worker 13] CMD: drop db6.tmp.mrs.coll6_1436464562_21 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.906-0400 m31102| 2015-07-09T13:56:02.906-0400 I COMMAND [repl writer worker 5] CMD: drop db6.tmp.mrs.coll6_1436464562_21 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.913-0400 m31101| 2015-07-09T13:56:02.912-0400 I COMMAND [repl writer worker 6] CMD: drop db6.map_reduce_replace_nonexistent2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.921-0400 m31102| 2015-07-09T13:56:02.920-0400 I COMMAND [repl writer worker 3] CMD: drop db6.map_reduce_replace_nonexistent2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.974-0400 m31100| 2015-07-09T13:56:02.974-0400 I COMMAND [conn52] CMD: drop db6.map_reduce_replace_nonexistent0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.978-0400 m31100| 2015-07-09T13:56:02.978-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_76 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.979-0400 m31100| 2015-07-09T13:56:02.979-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_76 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.985-0400 m31100| 2015-07-09T13:56:02.985-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_76 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.994-0400 m31100| 2015-07-09T13:56:02.993-0400 I COMMAND [conn52] command db6.map_reduce_replace_nonexistent0 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.994-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.994-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.995-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.995-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.995-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.997-0400 m31100| }, out: { replace: "map_reduce_replace_nonexistent0" }, query: { key: { $exists: true }, value: { $exists: true } } }, inputDB: "db6", shardedOutputCollection: "tmp.mrs.coll6_1436464562_22", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll6_1436464562_22", timeMillis: 434, counts: { input: 970, emit: 970, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464562000|277, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll6_1436464562_22", timeMillis: 412, counts: { input: 1030, emit: 1030, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464562000|70, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 970, emit: 970, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1030, emit: 1030, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:247 locks:{ Global: { acquireCount: { r: 54, w: 49, W: 3 }, acquireWaitCount: { w: 5, W: 1 }, timeAcquiringMicros: { w: 33282, W: 16674 } }, Database: { acquireCount: { r: 1, w: 44, W: 7 }, acquireWaitCount: { w: 11, W: 5 }, timeAcquiringMicros: { w: 90134, W: 8713 } }, Collection: { acquireCount: { r: 1, w: 24 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 232ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:02.997-0400 m31100| 2015-07-09T13:56:02.994-0400 I COMMAND [conn15] CMD: drop db6.tmp.mrs.coll6_1436464562_22 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.001-0400 m31200| 2015-07-09T13:56:03.001-0400 I COMMAND [conn48] CMD: drop db6.tmp.mrs.coll6_1436464562_22 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.002-0400 m31100| 2015-07-09T13:56:03.002-0400 I COMMAND [conn48] CMD: drop db6.map_reduce_replace_nonexistent3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.006-0400 m30999| 2015-07-09T13:56:03.005-0400 I COMMAND [conn42] DROP: db6.map_reduce_replace_nonexistent0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.006-0400 m30999| 2015-07-09T13:56:03.006-0400 I COMMAND [conn42] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.006-0400 m31202| 2015-07-09T13:56:03.006-0400 I COMMAND [repl writer worker 5] CMD: drop db6.tmp.mrs.coll6_1436464562_22 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.006-0400 m31201| 2015-07-09T13:56:03.006-0400 I COMMAND [repl writer worker 0] CMD: drop db6.tmp.mrs.coll6_1436464562_22 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.007-0400 m31100| 2015-07-09T13:56:03.007-0400 I COMMAND [conn52] CMD: drop db6.map_reduce_replace_nonexistent0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.008-0400 m31100| 2015-07-09T13:56:03.007-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_78 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.008-0400 m31100| 2015-07-09T13:56:03.008-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_78 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.010-0400 m31101| 2015-07-09T13:56:03.010-0400 I COMMAND [repl writer worker 2] CMD: drop db6.tmp.mrs.coll6_1436464562_22 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.010-0400 m31102| 2015-07-09T13:56:03.010-0400 I COMMAND [repl writer worker 8] CMD: drop db6.tmp.mrs.coll6_1436464562_22 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.011-0400 m31100| 2015-07-09T13:56:03.010-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_78 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.011-0400 m31100| 2015-07-09T13:56:03.011-0400 I COMMAND [conn48] command db6.map_reduce_replace_nonexistent3 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.011-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.012-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.012-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.012-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.012-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.013-0400 m31100| }, out: { replace: "map_reduce_replace_nonexistent3" }, query: { key: { $exists: true }, value: { $exists: true } } }, inputDB: "db6", shardedOutputCollection: "tmp.mrs.coll6_1436464562_14", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll6_1436464562_14", timeMillis: 432, counts: { input: 970, emit: 970, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464562000|293, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll6_1436464562_14", timeMillis: 405, counts: { input: 1030, emit: 1030, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464562000|73, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 970, emit: 970, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1030, emit: 1030, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:247 locks:{ Global: { acquireCount: { r: 54, w: 49, W: 3 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 7379, W: 18573 } }, Database: { acquireCount: { r: 1, w: 44, W: 7 }, acquireWaitCount: { w: 12, W: 4 }, timeAcquiringMicros: { w: 86549, W: 26969 } }, Collection: { acquireCount: { r: 1, w: 24 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 202ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.013-0400 m31100| 2015-07-09T13:56:03.011-0400 I COMMAND [conn32] CMD: drop db6.tmp.mrs.coll6_1436464562_14 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.018-0400 m31101| 2015-07-09T13:56:03.017-0400 I COMMAND [repl writer worker 7] CMD: drop db6.map_reduce_replace_nonexistent0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.021-0400 m31200| 2015-07-09T13:56:03.021-0400 I COMMAND [conn47] CMD: drop db6.tmp.mrs.coll6_1436464562_14 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.026-0400 m31200| 2015-07-09T13:56:03.025-0400 I COMMAND [conn31] CMD: drop db6.tmp.mr.coll6_41 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.026-0400 m30998| 2015-07-09T13:56:03.026-0400 I COMMAND [conn42] DROP: db6.map_reduce_replace_nonexistent3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.026-0400 m30998| 2015-07-09T13:56:03.026-0400 I COMMAND [conn42] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.027-0400 m31100| 2015-07-09T13:56:03.026-0400 I COMMAND [conn48] CMD: drop db6.map_reduce_replace_nonexistent3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.027-0400 m31101| 2015-07-09T13:56:03.026-0400 I COMMAND [repl writer worker 12] CMD: drop db6.tmp.mrs.coll6_1436464562_14 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.028-0400 m31202| 2015-07-09T13:56:03.028-0400 I COMMAND [repl writer worker 3] CMD: drop db6.tmp.mrs.coll6_1436464562_14 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.029-0400 m31100| 2015-07-09T13:56:03.029-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_81 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.029-0400 m31102| 2015-07-09T13:56:03.029-0400 I COMMAND [repl writer worker 12] CMD: drop db6.map_reduce_replace_nonexistent0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.030-0400 m31201| 2015-07-09T13:56:03.029-0400 I COMMAND [repl writer worker 4] CMD: drop db6.tmp.mrs.coll6_1436464562_14 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.035-0400 m31102| 2015-07-09T13:56:03.034-0400 I COMMAND [repl writer worker 4] CMD: drop db6.tmp.mrs.coll6_1436464562_14 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.040-0400 m31100| 2015-07-09T13:56:03.040-0400 I COMMAND [conn59] CMD: drop db6.map_reduce_replace_nonexistent4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.047-0400 m31100| 2015-07-09T13:56:03.046-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_77 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.047-0400 m31100| 2015-07-09T13:56:03.047-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_77 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.048-0400 m31101| 2015-07-09T13:56:03.048-0400 I COMMAND [repl writer worker 6] CMD: drop db6.map_reduce_replace_nonexistent3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.048-0400 m31100| 2015-07-09T13:56:03.048-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_77 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.049-0400 m31102| 2015-07-09T13:56:03.048-0400 I COMMAND [repl writer worker 14] CMD: drop db6.map_reduce_replace_nonexistent3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.049-0400 m31100| 2015-07-09T13:56:03.048-0400 I COMMAND [conn59] command db6.map_reduce_replace_nonexistent4 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.050-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.050-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.050-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.050-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.050-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.052-0400 m31100| }, out: { replace: "map_reduce_replace_nonexistent4" }, query: { key: { $exists: true }, value: { $exists: true } } }, inputDB: "db6", shardedOutputCollection: "tmp.mrs.coll6_1436464562_23", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll6_1436464562_23", timeMillis: 297, counts: { input: 970, emit: 970, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464562000|290, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll6_1436464562_23", timeMillis: 322, counts: { input: 1030, emit: 1030, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464562000|94, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 970, emit: 970, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1030, emit: 1030, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:247 locks:{ Global: { acquireCount: { r: 54, w: 49, W: 3 }, acquireWaitCount: { w: 4, W: 1 }, timeAcquiringMicros: { w: 14079, W: 11660 } }, Database: { acquireCount: { r: 1, w: 44, W: 7 }, acquireWaitCount: { w: 15, W: 4 }, timeAcquiringMicros: { w: 105458, W: 4324 } }, Collection: { acquireCount: { r: 1, w: 24 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 240ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.052-0400 m31100| 2015-07-09T13:56:03.049-0400 I COMMAND [conn15] CMD: drop db6.tmp.mrs.coll6_1436464562_23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.057-0400 m31200| 2015-07-09T13:56:03.057-0400 I COMMAND [conn33] CMD: drop db6.tmp.mr.coll6_42 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.073-0400 m31100| 2015-07-09T13:56:03.072-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_82 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.073-0400 m31200| 2015-07-09T13:56:03.073-0400 I COMMAND [conn48] CMD: drop db6.tmp.mrs.coll6_1436464562_23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.078-0400 m31101| 2015-07-09T13:56:03.076-0400 I COMMAND [repl writer worker 15] CMD: drop db6.tmp.mrs.coll6_1436464562_23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.079-0400 m31102| 2015-07-09T13:56:03.079-0400 I COMMAND [repl writer worker 9] CMD: drop db6.tmp.mrs.coll6_1436464562_23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.091-0400 m31200| 2015-07-09T13:56:03.091-0400 I COMMAND [conn39] CMD: drop db6.tmp.mrs.coll6_1436464562_24 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.092-0400 m30999| 2015-07-09T13:56:03.092-0400 I COMMAND [conn44] DROP: db6.map_reduce_replace_nonexistent4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.093-0400 m30999| 2015-07-09T13:56:03.092-0400 I COMMAND [conn44] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.093-0400 m31100| 2015-07-09T13:56:03.092-0400 I COMMAND [conn59] CMD: drop db6.map_reduce_replace_nonexistent4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.096-0400 m31200| 2015-07-09T13:56:03.095-0400 I COMMAND [conn39] CMD: drop db6.tmp.mr.coll6_40 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.096-0400 m31200| 2015-07-09T13:56:03.095-0400 I COMMAND [conn39] CMD: drop db6.tmp.mr.coll6_40 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.096-0400 m31202| 2015-07-09T13:56:03.096-0400 I COMMAND [repl writer worker 4] CMD: drop db6.tmp.mrs.coll6_1436464562_23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.097-0400 m31201| 2015-07-09T13:56:03.097-0400 I COMMAND [repl writer worker 10] CMD: drop db6.tmp.mrs.coll6_1436464562_23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.098-0400 m31200| 2015-07-09T13:56:03.098-0400 I COMMAND [conn39] CMD: drop db6.tmp.mr.coll6_40 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.107-0400 m31101| 2015-07-09T13:56:03.106-0400 I COMMAND [repl writer worker 4] CMD: drop db6.map_reduce_replace_nonexistent4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.109-0400 m31102| 2015-07-09T13:56:03.109-0400 I COMMAND [repl writer worker 2] CMD: drop db6.map_reduce_replace_nonexistent4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.141-0400 m31200| 2015-07-09T13:56:03.140-0400 I COMMAND [conn39] command db6.tmp.mrs.coll6_1436464562_24 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.141-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.141-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.142-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.142-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464562_24", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:211 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 7207 } }, Database: { acquireCount: { r: 25, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 6, w: 6, W: 2 }, timeAcquiringMicros: { r: 44820, w: 15230, W: 42298 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 263ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.147-0400 m31200| 2015-07-09T13:56:03.145-0400 I COMMAND [conn40] CMD: drop db6.tmp.mr.coll6_43 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.152-0400 m31100| 2015-07-09T13:56:03.150-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_83 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.245-0400 m31100| 2015-07-09T13:56:03.245-0400 I COMMAND [conn58] CMD: drop db6.map_reduce_replace_nonexistent1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.249-0400 m31100| 2015-07-09T13:56:03.249-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_80 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.249-0400 m31100| 2015-07-09T13:56:03.249-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_80 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.249-0400 m31100| 2015-07-09T13:56:03.249-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_80 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.250-0400 m31100| 2015-07-09T13:56:03.250-0400 I COMMAND [conn58] command db6.map_reduce_replace_nonexistent1 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.250-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.250-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.250-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.251-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.251-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.253-0400 m31100| }, out: { replace: "map_reduce_replace_nonexistent1" }, query: { key: { $exists: true }, value: { $exists: true } } }, inputDB: "db6", shardedOutputCollection: "tmp.mrs.coll6_1436464562_15", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll6_1436464562_15", timeMillis: 330, counts: { input: 970, emit: 970, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464562000|355, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll6_1436464562_15", timeMillis: 264, counts: { input: 1030, emit: 1030, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464562000|115, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 970, emit: 970, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1030, emit: 1030, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:247 locks:{ Global: { acquireCount: { r: 54, w: 49, W: 3 }, acquireWaitCount: { w: 5, W: 1 }, timeAcquiringMicros: { w: 26562, W: 31812 } }, Database: { acquireCount: { r: 1, w: 44, W: 7 }, acquireWaitCount: { w: 21, W: 3 }, timeAcquiringMicros: { w: 196276, W: 45353 } }, Collection: { acquireCount: { r: 1, w: 24 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 364ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.253-0400 m31100| 2015-07-09T13:56:03.250-0400 I COMMAND [conn32] CMD: drop db6.tmp.mrs.coll6_1436464562_15 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.265-0400 m31200| 2015-07-09T13:56:03.263-0400 I COMMAND [conn47] CMD: drop db6.tmp.mrs.coll6_1436464562_15 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.271-0400 m31201| 2015-07-09T13:56:03.270-0400 I COMMAND [repl writer worker 5] CMD: drop db6.tmp.mrs.coll6_1436464562_15 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.271-0400 m30998| 2015-07-09T13:56:03.271-0400 I COMMAND [conn43] DROP: db6.map_reduce_replace_nonexistent1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.271-0400 m30998| 2015-07-09T13:56:03.271-0400 I COMMAND [conn43] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.274-0400 m31100| 2015-07-09T13:56:03.271-0400 I COMMAND [conn58] CMD: drop db6.map_reduce_replace_nonexistent1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.274-0400 m31202| 2015-07-09T13:56:03.272-0400 I COMMAND [repl writer worker 10] CMD: drop db6.tmp.mrs.coll6_1436464562_15 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.275-0400 m31101| 2015-07-09T13:56:03.275-0400 I COMMAND [repl writer worker 15] CMD: drop db6.tmp.mrs.coll6_1436464562_15 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.282-0400 m31102| 2015-07-09T13:56:03.282-0400 I COMMAND [repl writer worker 9] CMD: drop db6.tmp.mrs.coll6_1436464562_15 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.288-0400 m31102| 2015-07-09T13:56:03.288-0400 I COMMAND [repl writer worker 1] CMD: drop db6.map_reduce_replace_nonexistent1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.288-0400 m31101| 2015-07-09T13:56:03.288-0400 I COMMAND [repl writer worker 3] CMD: drop db6.map_reduce_replace_nonexistent1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.301-0400 m31100| 2015-07-09T13:56:03.300-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_84 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.313-0400 m31200| 2015-07-09T13:56:03.313-0400 I COMMAND [conn36] CMD: drop db6.tmp.mr.coll6_44 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.382-0400 m31100| 2015-07-09T13:56:03.382-0400 I COMMAND [conn57] CMD: drop db6.tmp.mrs.coll6_1436464562_24 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.385-0400 m31100| 2015-07-09T13:56:03.385-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_79 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.385-0400 m31100| 2015-07-09T13:56:03.385-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_79 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.386-0400 m31100| 2015-07-09T13:56:03.386-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_79 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.390-0400 m31200| 2015-07-09T13:56:03.390-0400 I COMMAND [conn31] CMD: drop db6.tmp.mrs.coll6_1436464563_25 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.395-0400 m31200| 2015-07-09T13:56:03.394-0400 I COMMAND [conn31] CMD: drop db6.tmp.mr.coll6_41 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.395-0400 m31200| 2015-07-09T13:56:03.395-0400 I COMMAND [conn31] CMD: drop db6.tmp.mr.coll6_41 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.397-0400 m31200| 2015-07-09T13:56:03.397-0400 I COMMAND [conn31] CMD: drop db6.tmp.mr.coll6_41 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.399-0400 m31100| 2015-07-09T13:56:03.398-0400 I COMMAND [conn57] command db6.tmp.mrs.coll6_1436464562_24 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.399-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.399-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.399-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.400-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464562_24", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:211 locks:{ Global: { acquireCount: { r: 149, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 3, W: 1 }, timeAcquiringMicros: { r: 13629, w: 24378, W: 60 } }, Database: { acquireCount: { r: 25, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 5, w: 24, R: 9, W: 6 }, timeAcquiringMicros: { r: 20920, w: 224284, R: 35745, W: 13056 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 520ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.401-0400 m31100| 2015-07-09T13:56:03.399-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_85 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.409-0400 m31200| 2015-07-09T13:56:03.409-0400 I COMMAND [conn31] command db6.tmp.mrs.coll6_1436464563_25 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.409-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.409-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.410-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.410-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464563_25", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:211 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 7508, W: 8104 } }, Database: { acquireCount: { r: 25, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 4, w: 16, R: 10, W: 7 }, timeAcquiringMicros: { r: 6454, w: 100153, R: 57667, W: 12448 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 387ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.424-0400 m31200| 2015-07-09T13:56:03.424-0400 I COMMAND [conn33] CMD: drop db6.tmp.mrs.coll6_1436464563_16 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.426-0400 m31100| 2015-07-09T13:56:03.425-0400 I COMMAND [conn52] CMD: drop db6.tmp.mrs.coll6_1436464563_25 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.431-0400 m31200| 2015-07-09T13:56:03.430-0400 I COMMAND [conn33] CMD: drop db6.tmp.mr.coll6_42 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.433-0400 m31200| 2015-07-09T13:56:03.430-0400 I COMMAND [conn33] CMD: drop db6.tmp.mr.coll6_42 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.433-0400 m31200| 2015-07-09T13:56:03.432-0400 I COMMAND [conn33] CMD: drop db6.tmp.mr.coll6_42 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.439-0400 m31100| 2015-07-09T13:56:03.437-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_81 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.440-0400 m31100| 2015-07-09T13:56:03.437-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_81 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.447-0400 m31100| 2015-07-09T13:56:03.446-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_81 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.448-0400 m31200| 2015-07-09T13:56:03.448-0400 I COMMAND [conn33] command db6.tmp.mrs.coll6_1436464563_16 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.449-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.449-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.449-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.450-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464563_16", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:211 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 18620, W: 515 } }, Database: { acquireCount: { r: 25, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 5, w: 16, R: 10, W: 6 }, timeAcquiringMicros: { r: 35899, w: 72668, R: 35179, W: 30535 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 398ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.458-0400 m31100| 2015-07-09T13:56:03.458-0400 I COMMAND [conn52] command db6.tmp.mrs.coll6_1436464563_25 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.458-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.459-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.459-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.459-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464563_25", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:211 locks:{ Global: { acquireCount: { r: 149, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 2, W: 1 }, timeAcquiringMicros: { r: 5592, w: 14713, W: 530 } }, Database: { acquireCount: { r: 25, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 7, w: 16, R: 11, W: 8 }, timeAcquiringMicros: { r: 29320, w: 84442, R: 67000, W: 35216 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 436ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.462-0400 m31100| 2015-07-09T13:56:03.462-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_86 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.485-0400 m31100| 2015-07-09T13:56:03.484-0400 I COMMAND [conn48] CMD: drop db6.tmp.mrs.coll6_1436464563_16 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.488-0400 m31200| 2015-07-09T13:56:03.488-0400 I COMMAND [conn40] CMD: drop db6.tmp.mrs.coll6_1436464563_26 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.491-0400 m31100| 2015-07-09T13:56:03.491-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_82 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.491-0400 m31100| 2015-07-09T13:56:03.491-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_82 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.492-0400 m31100| 2015-07-09T13:56:03.492-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_82 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.494-0400 m31100| 2015-07-09T13:56:03.493-0400 I COMMAND [conn48] command db6.tmp.mrs.coll6_1436464563_16 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.494-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.494-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.494-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.495-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464563_16", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:211 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 3, w: 1, W: 1 }, timeAcquiringMicros: { r: 28623, w: 2862, W: 8811 } }, Database: { acquireCount: { r: 25, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 6, w: 20, R: 10, W: 7 }, timeAcquiringMicros: { r: 26490, w: 128177, R: 34337, W: 30473 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 443ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.496-0400 m31100| 2015-07-09T13:56:03.494-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_87 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.498-0400 m31200| 2015-07-09T13:56:03.498-0400 I COMMAND [conn40] CMD: drop db6.tmp.mr.coll6_43 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.499-0400 m31200| 2015-07-09T13:56:03.498-0400 I COMMAND [conn40] CMD: drop db6.tmp.mr.coll6_43 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.504-0400 m31200| 2015-07-09T13:56:03.504-0400 I COMMAND [conn40] CMD: drop db6.tmp.mr.coll6_43 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.505-0400 m31200| 2015-07-09T13:56:03.505-0400 I COMMAND [conn40] command db6.tmp.mrs.coll6_1436464563_26 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.505-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.506-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.506-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.507-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464563_26", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:211 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 6852, w: 5876, W: 448 } }, Database: { acquireCount: { r: 25, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 2, w: 17, R: 9, W: 8 }, timeAcquiringMicros: { r: 9489, w: 94852, R: 8745, W: 21194 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 362ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.516-0400 m31200| 2015-07-09T13:56:03.515-0400 I COMMAND [conn36] CMD: drop db6.tmp.mrs.coll6_1436464563_17 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.527-0400 m31200| 2015-07-09T13:56:03.526-0400 I COMMAND [conn36] CMD: drop db6.tmp.mr.coll6_44 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.527-0400 m31200| 2015-07-09T13:56:03.526-0400 I COMMAND [conn36] CMD: drop db6.tmp.mr.coll6_44 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.528-0400 m31200| 2015-07-09T13:56:03.528-0400 I COMMAND [conn36] CMD: drop db6.tmp.mr.coll6_44 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.530-0400 m31200| 2015-07-09T13:56:03.529-0400 I COMMAND [conn36] command db6.tmp.mrs.coll6_1436464563_17 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.530-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.530-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.530-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.531-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464563_17", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:211 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 1 }, timeAcquiringMicros: { r: 7783, w: 10651 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { w: 5, R: 12, W: 6 }, timeAcquiringMicros: { w: 6017, R: 19658, W: 15124 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 238ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.543-0400 m31100| 2015-07-09T13:56:03.542-0400 I COMMAND [conn57] CMD: drop db6.map_reduce_replace_nonexistent2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.548-0400 m31100| 2015-07-09T13:56:03.548-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_85 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.549-0400 m31100| 2015-07-09T13:56:03.548-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_85 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.550-0400 m31100| 2015-07-09T13:56:03.549-0400 I COMMAND [conn57] CMD: drop db6.tmp.mr.coll6_85 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.551-0400 m31100| 2015-07-09T13:56:03.550-0400 I COMMAND [conn57] command db6.map_reduce_replace_nonexistent2 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.551-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.551-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.551-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.551-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.551-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.553-0400 m31100| }, out: { replace: "map_reduce_replace_nonexistent2" }, query: { key: { $exists: true }, value: { $exists: true } } }, inputDB: "db6", shardedOutputCollection: "tmp.mrs.coll6_1436464562_24", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll6_1436464562_24", timeMillis: 508, counts: { input: 970, emit: 970, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464563000|66, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll6_1436464562_24", timeMillis: 218, counts: { input: 1030, emit: 1030, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464563000|25, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 970, emit: 970, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1030, emit: 1030, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:247 locks:{ Global: { acquireCount: { r: 54, w: 49, W: 3 }, acquireWaitCount: { w: 3, W: 1 }, timeAcquiringMicros: { w: 18346, W: 1388 } }, Database: { acquireCount: { r: 1, w: 44, W: 7 }, acquireWaitCount: { w: 13, W: 5 }, timeAcquiringMicros: { w: 54437, W: 9712 } }, Collection: { acquireCount: { r: 1, w: 24 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 151ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.554-0400 m31100| 2015-07-09T13:56:03.550-0400 I COMMAND [conn59] CMD: drop db6.tmp.mrs.coll6_1436464563_26 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.554-0400 m31100| 2015-07-09T13:56:03.551-0400 I COMMAND [conn15] CMD: drop db6.tmp.mrs.coll6_1436464562_24 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.556-0400 m31100| 2015-07-09T13:56:03.555-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_83 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.557-0400 m31100| 2015-07-09T13:56:03.556-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_83 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.561-0400 m31200| 2015-07-09T13:56:03.560-0400 I COMMAND [conn48] CMD: drop db6.tmp.mrs.coll6_1436464562_24 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.564-0400 m31100| 2015-07-09T13:56:03.563-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_83 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.567-0400 m31202| 2015-07-09T13:56:03.567-0400 I COMMAND [repl writer worker 6] CMD: drop db6.tmp.mrs.coll6_1436464562_24 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.568-0400 m31100| 2015-07-09T13:56:03.567-0400 I COMMAND [conn59] command db6.tmp.mrs.coll6_1436464563_26 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.568-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.569-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.569-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.570-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464563_26", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:211 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 4, w: 3, W: 1 }, timeAcquiringMicros: { r: 26576, w: 17209, W: 609 } }, Database: { acquireCount: { r: 25, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 7, w: 12, R: 11, W: 8 }, timeAcquiringMicros: { r: 40825, w: 45312, R: 51763, W: 48214 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 426ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.570-0400 m30999| 2015-07-09T13:56:03.567-0400 I COMMAND [conn43] DROP: db6.map_reduce_replace_nonexistent2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.570-0400 m30999| 2015-07-09T13:56:03.568-0400 I COMMAND [conn43] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.570-0400 m31100| 2015-07-09T13:56:03.568-0400 I COMMAND [conn57] CMD: drop db6.map_reduce_replace_nonexistent2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.571-0400 m31100| 2015-07-09T13:56:03.569-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_88 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.571-0400 m31201| 2015-07-09T13:56:03.569-0400 I COMMAND [repl writer worker 14] CMD: drop db6.tmp.mrs.coll6_1436464562_24 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.573-0400 m31102| 2015-07-09T13:56:03.572-0400 I COMMAND [repl writer worker 3] CMD: drop db6.tmp.mrs.coll6_1436464562_24 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.573-0400 m31101| 2015-07-09T13:56:03.573-0400 I COMMAND [repl writer worker 6] CMD: drop db6.tmp.mrs.coll6_1436464562_24 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.579-0400 m31101| 2015-07-09T13:56:03.579-0400 I COMMAND [repl writer worker 12] CMD: drop db6.map_reduce_replace_nonexistent2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.581-0400 m31102| 2015-07-09T13:56:03.580-0400 I COMMAND [repl writer worker 12] CMD: drop db6.map_reduce_replace_nonexistent2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.581-0400 m30999| 2015-07-09T13:56:03.581-0400 I NETWORK [conn43] end connection 127.0.0.1:62759 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.596-0400 m31100| 2015-07-09T13:56:03.596-0400 I COMMAND [conn58] CMD: drop db6.tmp.mrs.coll6_1436464563_17 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.603-0400 m31100| 2015-07-09T13:56:03.603-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_84 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.603-0400 m31100| 2015-07-09T13:56:03.603-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_84 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.604-0400 m31100| 2015-07-09T13:56:03.604-0400 I COMMAND [conn52] CMD: drop db6.map_reduce_replace_nonexistent0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.607-0400 m31100| 2015-07-09T13:56:03.607-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_86 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.607-0400 m31100| 2015-07-09T13:56:03.607-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_86 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.607-0400 m31100| 2015-07-09T13:56:03.607-0400 I COMMAND [conn52] CMD: drop db6.tmp.mr.coll6_86 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.608-0400 m31100| 2015-07-09T13:56:03.608-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_84 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.609-0400 m31100| 2015-07-09T13:56:03.608-0400 I COMMAND [conn52] command db6.map_reduce_replace_nonexistent0 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.609-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.609-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.609-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.609-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.609-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.612-0400 m31100| }, out: { replace: "map_reduce_replace_nonexistent0" }, query: { key: { $exists: true }, value: { $exists: true } } }, inputDB: "db6", shardedOutputCollection: "tmp.mrs.coll6_1436464563_25", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll6_1436464563_25", timeMillis: 416, counts: { input: 970, emit: 970, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464563000|87, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll6_1436464563_25", timeMillis: 373, counts: { input: 1030, emit: 1030, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464563000|61, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 970, emit: 970, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1030, emit: 1030, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:247 locks:{ Global: { acquireCount: { r: 54, w: 49, W: 3 }, acquireWaitCount: { w: 4, W: 1 }, timeAcquiringMicros: { w: 23253, W: 697 } }, Database: { acquireCount: { r: 1, w: 44, W: 7 }, acquireWaitCount: { w: 10, W: 5 }, timeAcquiringMicros: { w: 53434, W: 1700 } }, Collection: { acquireCount: { r: 1, w: 24 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 148ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.612-0400 m31100| 2015-07-09T13:56:03.609-0400 I COMMAND [conn15] CMD: drop db6.tmp.mrs.coll6_1436464563_25 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.613-0400 m31200| 2015-07-09T13:56:03.611-0400 I COMMAND [conn48] CMD: drop db6.tmp.mrs.coll6_1436464563_25 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.614-0400 m31100| 2015-07-09T13:56:03.614-0400 I COMMAND [conn58] command db6.tmp.mrs.coll6_1436464563_17 command: mapReduce { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.615-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.615-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.615-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.616-0400 m30999| 2015-07-09T13:56:03.614-0400 I COMMAND [conn42] DROP: db6.map_reduce_replace_nonexistent0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.616-0400 m30999| 2015-07-09T13:56:03.614-0400 I COMMAND [conn42] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.617-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, out: "tmp.mrs.coll6_1436464563_17", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:211 locks:{ Global: { acquireCount: { r: 149, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 4 }, timeAcquiringMicros: { r: 7486, w: 16065 } }, Database: { acquireCount: { r: 25, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 5, w: 10, R: 11, W: 8 }, timeAcquiringMicros: { r: 13127, w: 52082, R: 24071, W: 23675 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 322ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.617-0400 m31100| 2015-07-09T13:56:03.614-0400 I COMMAND [conn52] CMD: drop db6.map_reduce_replace_nonexistent0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.617-0400 m31202| 2015-07-09T13:56:03.615-0400 I COMMAND [repl writer worker 14] CMD: drop db6.tmp.mrs.coll6_1436464563_25 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.617-0400 m31100| 2015-07-09T13:56:03.615-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_89 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.618-0400 m31201| 2015-07-09T13:56:03.617-0400 I COMMAND [repl writer worker 11] CMD: drop db6.tmp.mrs.coll6_1436464563_25 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.624-0400 m30999| 2015-07-09T13:56:03.623-0400 I NETWORK [conn42] end connection 127.0.0.1:62757 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.628-0400 m31101| 2015-07-09T13:56:03.627-0400 I COMMAND [repl writer worker 5] CMD: drop db6.tmp.mrs.coll6_1436464563_25 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.631-0400 m31102| 2015-07-09T13:56:03.630-0400 I COMMAND [repl writer worker 1] CMD: drop db6.tmp.mrs.coll6_1436464563_25 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.631-0400 m31101| 2015-07-09T13:56:03.631-0400 I COMMAND [repl writer worker 7] CMD: drop db6.map_reduce_replace_nonexistent0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.634-0400 m31102| 2015-07-09T13:56:03.633-0400 I COMMAND [repl writer worker 10] CMD: drop db6.map_reduce_replace_nonexistent0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.648-0400 m31100| 2015-07-09T13:56:03.647-0400 I COMMAND [conn48] CMD: drop db6.map_reduce_replace_nonexistent3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.654-0400 m31100| 2015-07-09T13:56:03.653-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_87 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.654-0400 m31100| 2015-07-09T13:56:03.653-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_87 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.654-0400 m31100| 2015-07-09T13:56:03.654-0400 I COMMAND [conn48] CMD: drop db6.tmp.mr.coll6_87 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.656-0400 m31100| 2015-07-09T13:56:03.655-0400 I COMMAND [conn48] command db6.map_reduce_replace_nonexistent3 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.656-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.657-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.657-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.657-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.657-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.659-0400 m31100| }, out: { replace: "map_reduce_replace_nonexistent3" }, query: { key: { $exists: true }, value: { $exists: true } } }, inputDB: "db6", shardedOutputCollection: "tmp.mrs.coll6_1436464563_16", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll6_1436464563_16", timeMillis: 441, counts: { input: 970, emit: 970, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464563000|115, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll6_1436464563_16", timeMillis: 380, counts: { input: 1030, emit: 1030, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464563000|75, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 970, emit: 970, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1030, emit: 1030, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:247 locks:{ Global: { acquireCount: { r: 54, w: 49, W: 3 }, acquireWaitCount: { w: 4, W: 1 }, timeAcquiringMicros: { w: 17901, W: 233 } }, Database: { acquireCount: { r: 1, w: 44, W: 7 }, acquireWaitCount: { w: 9, W: 5 }, timeAcquiringMicros: { w: 49276, W: 8579 } }, Collection: { acquireCount: { r: 1, w: 24 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 161ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.660-0400 m31100| 2015-07-09T13:56:03.656-0400 I COMMAND [conn32] CMD: drop db6.tmp.mrs.coll6_1436464563_16 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.660-0400 m31200| 2015-07-09T13:56:03.659-0400 I COMMAND [conn47] CMD: drop db6.tmp.mrs.coll6_1436464563_16 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.662-0400 m30998| 2015-07-09T13:56:03.662-0400 I COMMAND [conn42] DROP: db6.map_reduce_replace_nonexistent3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.663-0400 m30998| 2015-07-09T13:56:03.662-0400 I COMMAND [conn42] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.663-0400 m31100| 2015-07-09T13:56:03.662-0400 I COMMAND [conn48] CMD: drop db6.map_reduce_replace_nonexistent3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.663-0400 m31201| 2015-07-09T13:56:03.663-0400 I COMMAND [repl writer worker 12] CMD: drop db6.tmp.mrs.coll6_1436464563_16 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.663-0400 m31202| 2015-07-09T13:56:03.663-0400 I COMMAND [repl writer worker 1] CMD: drop db6.tmp.mrs.coll6_1436464563_16 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.668-0400 m31100| 2015-07-09T13:56:03.667-0400 I COMMAND [conn59] CMD: drop db6.map_reduce_replace_nonexistent4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.671-0400 m30998| 2015-07-09T13:56:03.670-0400 I NETWORK [conn42] end connection 127.0.0.1:62758 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.671-0400 m31101| 2015-07-09T13:56:03.670-0400 I COMMAND [repl writer worker 10] CMD: drop db6.tmp.mrs.coll6_1436464563_16 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.676-0400 m31102| 2015-07-09T13:56:03.675-0400 I COMMAND [repl writer worker 2] CMD: drop db6.tmp.mrs.coll6_1436464563_16 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.677-0400 m31100| 2015-07-09T13:56:03.676-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_88 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.677-0400 m31100| 2015-07-09T13:56:03.676-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_88 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.678-0400 m31100| 2015-07-09T13:56:03.676-0400 I COMMAND [conn59] CMD: drop db6.tmp.mr.coll6_88 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.678-0400 m31100| 2015-07-09T13:56:03.676-0400 I COMMAND [conn59] command db6.map_reduce_replace_nonexistent4 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll6", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.678-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.678-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.678-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.679-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.679-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.680-0400 m31100| }, out: { replace: "map_reduce_replace_nonexistent4" }, query: { key: { $exists: true }, value: { $exists: true } } }, inputDB: "db6", shardedOutputCollection: "tmp.mrs.coll6_1436464563_26", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll6_1436464563_26", timeMillis: 415, counts: { input: 970, emit: 970, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464563000|162, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll6_1436464563_26", timeMillis: 355, counts: { input: 1030, emit: 1030, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464563000|92, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 970, emit: 970, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1030, emit: 1030, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:247 locks:{ Global: { acquireCount: { r: 54, w: 49, W: 3 }, acquireWaitCount: { w: 3, W: 1 }, timeAcquiringMicros: { w: 15945, W: 1277 } }, Database: { acquireCount: { r: 1, w: 44, W: 7 }, acquireWaitCount: { w: 7, W: 3 }, timeAcquiringMicros: { w: 27962, W: 9508 } }, Collection: { acquireCount: { r: 1, w: 24 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 108ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.680-0400 m31100| 2015-07-09T13:56:03.677-0400 I COMMAND [conn15] CMD: drop db6.tmp.mrs.coll6_1436464563_26 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.680-0400 m31200| 2015-07-09T13:56:03.678-0400 I COMMAND [conn48] CMD: drop db6.tmp.mrs.coll6_1436464563_26 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.680-0400 m31101| 2015-07-09T13:56:03.679-0400 I COMMAND [repl writer worker 6] CMD: drop db6.map_reduce_replace_nonexistent3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.682-0400 m31201| 2015-07-09T13:56:03.682-0400 I COMMAND [repl writer worker 5] CMD: drop db6.tmp.mrs.coll6_1436464563_26 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.682-0400 m30999| 2015-07-09T13:56:03.682-0400 I COMMAND [conn44] DROP: db6.map_reduce_replace_nonexistent4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.682-0400 m30999| 2015-07-09T13:56:03.682-0400 I COMMAND [conn44] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.683-0400 m31100| 2015-07-09T13:56:03.682-0400 I COMMAND [conn59] CMD: drop db6.map_reduce_replace_nonexistent4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.683-0400 m31202| 2015-07-09T13:56:03.683-0400 I COMMAND [repl writer worker 12] CMD: drop db6.tmp.mrs.coll6_1436464563_26 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.686-0400 m31102| 2015-07-09T13:56:03.686-0400 I COMMAND [repl writer worker 5] CMD: drop db6.map_reduce_replace_nonexistent3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.690-0400 m31101| 2015-07-09T13:56:03.689-0400 I COMMAND [repl writer worker 13] CMD: drop db6.tmp.mrs.coll6_1436464563_26 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.692-0400 m30999| 2015-07-09T13:56:03.691-0400 I NETWORK [conn44] end connection 127.0.0.1:62761 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.693-0400 m31100| 2015-07-09T13:56:03.693-0400 I COMMAND [conn58] CMD: drop db6.map_reduce_replace_nonexistent1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.695-0400 m31101| 2015-07-09T13:56:03.694-0400 I COMMAND [repl writer worker 0] CMD: drop db6.map_reduce_replace_nonexistent4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.697-0400 m31100| 2015-07-09T13:56:03.696-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_89 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.697-0400 m31100| 2015-07-09T13:56:03.696-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_89 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.698-0400 m31100| 2015-07-09T13:56:03.696-0400 I COMMAND [conn58] CMD: drop db6.tmp.mr.coll6_89 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.698-0400 m31100| 2015-07-09T13:56:03.697-0400 I COMMAND [conn32] CMD: drop db6.tmp.mrs.coll6_1436464563_17 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.698-0400 m31102| 2015-07-09T13:56:03.698-0400 I COMMAND [repl writer worker 7] CMD: drop db6.tmp.mrs.coll6_1436464563_26 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.702-0400 m31200| 2015-07-09T13:56:03.702-0400 I COMMAND [conn47] CMD: drop db6.tmp.mrs.coll6_1436464563_17 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.703-0400 m31102| 2015-07-09T13:56:03.703-0400 I COMMAND [repl writer worker 8] CMD: drop db6.map_reduce_replace_nonexistent4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.705-0400 m30998| 2015-07-09T13:56:03.704-0400 I COMMAND [conn43] DROP: db6.map_reduce_replace_nonexistent1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.705-0400 m30998| 2015-07-09T13:56:03.704-0400 I COMMAND [conn43] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.705-0400 m31100| 2015-07-09T13:56:03.704-0400 I COMMAND [conn58] CMD: drop db6.map_reduce_replace_nonexistent1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.707-0400 m31202| 2015-07-09T13:56:03.706-0400 I COMMAND [repl writer worker 8] CMD: drop db6.tmp.mrs.coll6_1436464563_17 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.708-0400 m31201| 2015-07-09T13:56:03.707-0400 I COMMAND [repl writer worker 2] CMD: drop db6.tmp.mrs.coll6_1436464563_17 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.713-0400 m31101| 2015-07-09T13:56:03.711-0400 I COMMAND [repl writer worker 6] CMD: drop db6.tmp.mrs.coll6_1436464563_17 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.715-0400 m31101| 2015-07-09T13:56:03.715-0400 I COMMAND [repl writer worker 4] CMD: drop db6.map_reduce_replace_nonexistent1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.716-0400 m31102| 2015-07-09T13:56:03.715-0400 I COMMAND [repl writer worker 5] CMD: drop db6.tmp.mrs.coll6_1436464563_17 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.718-0400 m30998| 2015-07-09T13:56:03.717-0400 I NETWORK [conn43] end connection 127.0.0.1:62760 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.719-0400 m31102| 2015-07-09T13:56:03.718-0400 I COMMAND [repl writer worker 0] CMD: drop db6.map_reduce_replace_nonexistent1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.737-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.737-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.737-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.737-0400 jstests/concurrency/fsm_workloads/map_reduce_replace_nonexistent.js: Workload completed in 7311 ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.737-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.737-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.737-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.738-0400 m30999| 2015-07-09T13:56:03.737-0400 I COMMAND [conn1] DROP: db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.738-0400 m30999| 2015-07-09T13:56:03.738-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:03.738-0400-559eb5b3ca4787b9985d1bcd", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464563738), what: "dropCollection.start", ns: "db6.coll6", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.795-0400 m30999| 2015-07-09T13:56:03.794-0400 I SHARDING [conn1] distributed lock 'db6.coll6/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5b3ca4787b9985d1bce [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.796-0400 m31100| 2015-07-09T13:56:03.795-0400 I COMMAND [conn15] CMD: drop db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.798-0400 m31200| 2015-07-09T13:56:03.797-0400 I COMMAND [conn48] CMD: drop db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.799-0400 m31101| 2015-07-09T13:56:03.799-0400 I COMMAND [repl writer worker 12] CMD: drop db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.800-0400 m31102| 2015-07-09T13:56:03.799-0400 I COMMAND [repl writer worker 6] CMD: drop db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.801-0400 m31202| 2015-07-09T13:56:03.801-0400 I COMMAND [repl writer worker 9] CMD: drop db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.801-0400 m31201| 2015-07-09T13:56:03.801-0400 I COMMAND [repl writer worker 1] CMD: drop db6.coll6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.854-0400 m31100| 2015-07-09T13:56:03.853-0400 I SHARDING [conn15] remotely refreshing metadata for db6.coll6 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559eb5abca4787b9985d1bcb, current metadata version is 2|3||559eb5abca4787b9985d1bcb [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.855-0400 m31100| 2015-07-09T13:56:03.855-0400 W SHARDING [conn15] no chunks found when reloading db6.coll6, previous version was 0|0||559eb5abca4787b9985d1bcb, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.856-0400 m31100| 2015-07-09T13:56:03.855-0400 I SHARDING [conn15] dropping metadata for db6.coll6 at shard version 2|3||559eb5abca4787b9985d1bcb, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.857-0400 m31200| 2015-07-09T13:56:03.856-0400 I SHARDING [conn48] remotely refreshing metadata for db6.coll6 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559eb5abca4787b9985d1bcb, current metadata version is 2|5||559eb5abca4787b9985d1bcb [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.858-0400 m31200| 2015-07-09T13:56:03.858-0400 W SHARDING [conn48] no chunks found when reloading db6.coll6, previous version was 0|0||559eb5abca4787b9985d1bcb, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.858-0400 m31200| 2015-07-09T13:56:03.858-0400 I SHARDING [conn48] dropping metadata for db6.coll6 at shard version 2|5||559eb5abca4787b9985d1bcb, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.859-0400 m30999| 2015-07-09T13:56:03.859-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:03.859-0400-559eb5b3ca4787b9985d1bcf", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464563859), what: "dropCollection", ns: "db6.coll6", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.914-0400 m30999| 2015-07-09T13:56:03.913-0400 I SHARDING [conn1] distributed lock 'db6.coll6/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.970-0400 m30999| 2015-07-09T13:56:03.969-0400 I COMMAND [conn1] DROP DATABASE: db6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.970-0400 m30999| 2015-07-09T13:56:03.970-0400 I SHARDING [conn1] DBConfig::dropDatabase: db6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:03.970-0400 m30999| 2015-07-09T13:56:03.970-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:03.970-0400-559eb5b3ca4787b9985d1bd0", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464563970), what: "dropDatabase.start", ns: "db6", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.076-0400 m30999| 2015-07-09T13:56:04.075-0400 I SHARDING [conn1] DBConfig::dropDatabase: db6 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.076-0400 m31100| 2015-07-09T13:56:04.076-0400 I COMMAND [conn28] dropDatabase db6 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.077-0400 m31100| 2015-07-09T13:56:04.076-0400 I COMMAND [conn28] dropDatabase db6 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.077-0400 m30999| 2015-07-09T13:56:04.076-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:04.076-0400-559eb5b4ca4787b9985d1bd1", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464564076), what: "dropDatabase", ns: "db6", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.077-0400 m31101| 2015-07-09T13:56:04.077-0400 I COMMAND [repl writer worker 2] dropDatabase db6 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.077-0400 m31101| 2015-07-09T13:56:04.077-0400 I COMMAND [repl writer worker 2] dropDatabase db6 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.078-0400 m31102| 2015-07-09T13:56:04.077-0400 I COMMAND [repl writer worker 1] dropDatabase db6 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.078-0400 m31102| 2015-07-09T13:56:04.077-0400 I COMMAND [repl writer worker 1] dropDatabase db6 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.168-0400 m31100| 2015-07-09T13:56:04.168-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.169-0400 m31102| 2015-07-09T13:56:04.169-0400 I COMMAND [repl writer worker 13] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.171-0400 m31101| 2015-07-09T13:56:04.171-0400 I COMMAND [repl writer worker 1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.203-0400 m31200| 2015-07-09T13:56:04.203-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.207-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.207-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.207-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.207-0400 jstests/concurrency/fsm_workloads/indexed_insert_large.js [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.734-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.735-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.735-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.735-0400 m31201| 2015-07-09T13:56:04.207-0400 I COMMAND [repl writer worker 7] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.735-0400 m31202| 2015-07-09T13:56:04.208-0400 I COMMAND [repl writer worker 7] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.735-0400 m30999| 2015-07-09T13:56:04.213-0400 I SHARDING [conn1] distributed lock 'db7/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5b4ca4787b9985d1bd2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.735-0400 m30999| 2015-07-09T13:56:04.217-0400 I SHARDING [conn1] Placing [db7] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.736-0400 m30999| 2015-07-09T13:56:04.217-0400 I SHARDING [conn1] Enabling sharding for database [db7] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.736-0400 m30999| 2015-07-09T13:56:04.272-0400 I SHARDING [conn1] distributed lock 'db7/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.736-0400 m31100| 2015-07-09T13:56:04.310-0400 I INDEX [conn29] build index on: db7.coll7 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db7.coll7" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.736-0400 m31100| 2015-07-09T13:56:04.310-0400 I INDEX [conn29] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.736-0400 m31100| 2015-07-09T13:56:04.323-0400 I INDEX [conn29] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.736-0400 m30999| 2015-07-09T13:56:04.325-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db7.coll7", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.737-0400 m30999| 2015-07-09T13:56:04.328-0400 I SHARDING [conn1] distributed lock 'db7.coll7/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5b4ca4787b9985d1bd3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.737-0400 m30999| 2015-07-09T13:56:04.328-0400 I SHARDING [conn1] enable sharding on: db7.coll7 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.737-0400 m30999| 2015-07-09T13:56:04.329-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:04.329-0400-559eb5b4ca4787b9985d1bd4", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464564329), what: "shardCollection.start", ns: "db7.coll7", details: { shardKey: { _id: "hashed" }, collection: "db7.coll7", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.737-0400 m31102| 2015-07-09T13:56:04.332-0400 I INDEX [repl writer worker 15] build index on: db7.coll7 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db7.coll7" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.737-0400 m31102| 2015-07-09T13:56:04.333-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.738-0400 m31101| 2015-07-09T13:56:04.338-0400 I INDEX [repl writer worker 15] build index on: db7.coll7 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db7.coll7" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.738-0400 m31101| 2015-07-09T13:56:04.338-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.738-0400 m31102| 2015-07-09T13:56:04.339-0400 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.738-0400 m31101| 2015-07-09T13:56:04.346-0400 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.738-0400 m30999| 2015-07-09T13:56:04.382-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db7.coll7 using new epoch 559eb5b4ca4787b9985d1bd5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.738-0400 m30999| 2015-07-09T13:56:04.469-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T13:56:04.463-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30999:1436464534:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.738-0400 m30999| 2015-07-09T13:56:04.470-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db7.coll7: 0ms sequenceNumber: 35 version: 1|1||559eb5b4ca4787b9985d1bd5 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.739-0400 m30999| 2015-07-09T13:56:04.525-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db7.coll7: 0ms sequenceNumber: 36 version: 1|1||559eb5b4ca4787b9985d1bd5 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.739-0400 m31100| 2015-07-09T13:56:04.527-0400 I SHARDING [conn59] remotely refreshing metadata for db7.coll7 with requested shard version 1|1||559eb5b4ca4787b9985d1bd5, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.739-0400 m31100| 2015-07-09T13:56:04.528-0400 I SHARDING [conn59] collection db7.coll7 was previously unsharded, new metadata loaded with shard version 1|1||559eb5b4ca4787b9985d1bd5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.739-0400 m31100| 2015-07-09T13:56:04.528-0400 I SHARDING [conn59] collection version was loaded at version 1|1||559eb5b4ca4787b9985d1bd5, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.740-0400 m30999| 2015-07-09T13:56:04.529-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:04.529-0400-559eb5b4ca4787b9985d1bd6", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464564529), what: "shardCollection", ns: "db7.coll7", details: { version: "1|1||559eb5b4ca4787b9985d1bd5" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.740-0400 m30999| 2015-07-09T13:56:04.583-0400 I SHARDING [conn1] distributed lock 'db7.coll7/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.740-0400 m30999| 2015-07-09T13:56:04.584-0400 I SHARDING [conn1] moving chunk ns: db7.coll7 moving ( ns: db7.coll7, shard: test-rs0, lastmod: 1|1||000000000000000000000000, min: { _id: 0 }, max: { _id: MaxKey }) test-rs0 -> test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.740-0400 m31100| 2015-07-09T13:56:04.584-0400 I SHARDING [conn15] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.740-0400 m31100| 2015-07-09T13:56:04.585-0400 I SHARDING [conn15] received moveChunk request: { moveChunk: "db7.coll7", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb5b4ca4787b9985d1bd5') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.741-0400 m31100| 2015-07-09T13:56:04.589-0400 I SHARDING [conn15] distributed lock 'db7.coll7/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb5b4792e00bb67274903 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.741-0400 m31100| 2015-07-09T13:56:04.589-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:04.589-0400-559eb5b4792e00bb67274904", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464564589), what: "moveChunk.start", ns: "db7.coll7", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.741-0400 m31100| 2015-07-09T13:56:04.643-0400 I SHARDING [conn15] remotely refreshing metadata for db7.coll7 based on current shard version 1|1||559eb5b4ca4787b9985d1bd5, current metadata version is 1|1||559eb5b4ca4787b9985d1bd5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.741-0400 m31100| 2015-07-09T13:56:04.644-0400 I SHARDING [conn15] metadata of collection db7.coll7 already up to date (shard version : 1|1||559eb5b4ca4787b9985d1bd5, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.741-0400 m31100| 2015-07-09T13:56:04.644-0400 I SHARDING [conn15] moveChunk request accepted at version 1|1||559eb5b4ca4787b9985d1bd5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.741-0400 m31100| 2015-07-09T13:56:04.645-0400 I SHARDING [conn15] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.742-0400 m31200| 2015-07-09T13:56:04.645-0400 I SHARDING [conn16] remotely refreshing metadata for db7.coll7, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.742-0400 m31200| 2015-07-09T13:56:04.647-0400 I SHARDING [conn16] collection db7.coll7 was previously unsharded, new metadata loaded with shard version 0|0||559eb5b4ca4787b9985d1bd5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.742-0400 m31200| 2015-07-09T13:56:04.647-0400 I SHARDING [conn16] collection version was loaded at version 1|1||559eb5b4ca4787b9985d1bd5, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.742-0400 m31200| 2015-07-09T13:56:04.647-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: 0 } -> { _id: MaxKey } for collection db7.coll7 from test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 at epoch 559eb5b4ca4787b9985d1bd5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.742-0400 m31100| 2015-07-09T13:56:04.649-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db7.coll7", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.743-0400 m31100| 2015-07-09T13:56:04.653-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db7.coll7", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.743-0400 m31100| 2015-07-09T13:56:04.658-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db7.coll7", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.743-0400 m31200| 2015-07-09T13:56:04.665-0400 I INDEX [migrateThread] build index on: db7.coll7 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db7.coll7" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.743-0400 m31200| 2015-07-09T13:56:04.665-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.744-0400 m31100| 2015-07-09T13:56:04.667-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db7.coll7", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.744-0400 m31200| 2015-07-09T13:56:04.677-0400 I INDEX [migrateThread] build index on: db7.coll7 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db7.coll7" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.744-0400 m31200| 2015-07-09T13:56:04.677-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.744-0400 m31100| 2015-07-09T13:56:04.685-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db7.coll7", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.744-0400 m31200| 2015-07-09T13:56:04.687-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.744-0400 m31200| 2015-07-09T13:56:04.687-0400 I SHARDING [migrateThread] Deleter starting delete for: db7.coll7 from { _id: 0 } -> { _id: MaxKey }, with opId: 4419 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.744-0400 m31200| 2015-07-09T13:56:04.688-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db7.coll7 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.745-0400 m31201| 2015-07-09T13:56:04.695-0400 I INDEX [repl writer worker 15] build index on: db7.coll7 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db7.coll7" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.745-0400 m31201| 2015-07-09T13:56:04.695-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.745-0400 m31202| 2015-07-09T13:56:04.697-0400 I INDEX [repl writer worker 15] build index on: db7.coll7 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db7.coll7" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.745-0400 m31202| 2015-07-09T13:56:04.697-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.745-0400 m31201| 2015-07-09T13:56:04.702-0400 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.745-0400 m31200| 2015-07-09T13:56:04.704-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.745-0400 m31200| 2015-07-09T13:56:04.705-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db7.coll7' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.746-0400 m31202| 2015-07-09T13:56:04.706-0400 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.746-0400 m31100| 2015-07-09T13:56:04.717-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db7.coll7", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.746-0400 m31100| 2015-07-09T13:56:04.718-0400 I SHARDING [conn15] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.746-0400 m31100| 2015-07-09T13:56:04.718-0400 I SHARDING [conn15] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.746-0400 m31100| 2015-07-09T13:56:04.718-0400 I SHARDING [conn15] moveChunk setting version to: 2|0||559eb5b4ca4787b9985d1bd5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.746-0400 m31200| 2015-07-09T13:56:04.727-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db7.coll7' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.747-0400 m31200| 2015-07-09T13:56:04.728-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:04.728-0400-559eb5b4d5a107a5b9c0da95", server: "bs-osx108-8", clientAddr: "", time: new Date(1436464564728), what: "moveChunk.to", ns: "db7.coll7", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 5: 40, step 2 of 5: 15, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 23, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.782-0400 m31100| 2015-07-09T13:56:04.781-0400 I SHARDING [conn15] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db7.coll7", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.782-0400 m31100| 2015-07-09T13:56:04.781-0400 I SHARDING [conn15] moveChunk updating self version to: 2|1||559eb5b4ca4787b9985d1bd5 through { _id: MinKey } -> { _id: 0 } for collection 'db7.coll7' [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.783-0400 m31100| 2015-07-09T13:56:04.783-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:04.783-0400-559eb5b4792e00bb67274905", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464564783), what: "moveChunk.commit", ns: "db7.coll7", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.837-0400 m31100| 2015-07-09T13:56:04.836-0400 I SHARDING [conn15] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.837-0400 m31100| 2015-07-09T13:56:04.836-0400 I SHARDING [conn15] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.837-0400 m31100| 2015-07-09T13:56:04.836-0400 I SHARDING [conn15] Deleter starting delete for: db7.coll7 from { _id: 0 } -> { _id: MaxKey }, with opId: 7503 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.837-0400 m31100| 2015-07-09T13:56:04.836-0400 I SHARDING [conn15] rangeDeleter deleted 0 documents for db7.coll7 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.837-0400 m31100| 2015-07-09T13:56:04.836-0400 I SHARDING [conn15] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.837-0400 m31100| 2015-07-09T13:56:04.837-0400 I SHARDING [conn15] distributed lock 'db7.coll7/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.838-0400 m31100| 2015-07-09T13:56:04.837-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:04.837-0400-559eb5b4792e00bb67274906", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464564837), what: "moveChunk.from", ns: "db7.coll7", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 6: 0, step 2 of 6: 58, step 3 of 6: 2, step 4 of 6: 70, step 5 of 6: 118, step 6 of 6: 0, to: "test-rs1", from: "test-rs0", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.891-0400 m31100| 2015-07-09T13:56:04.890-0400 I COMMAND [conn15] command db7.coll7 command: moveChunk { moveChunk: "db7.coll7", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb5b4ca4787b9985d1bd5') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 305ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.893-0400 m30999| 2015-07-09T13:56:04.892-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db7.coll7: 0ms sequenceNumber: 37 version: 2|1||559eb5b4ca4787b9985d1bd5 based on: 1|1||559eb5b4ca4787b9985d1bd5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.894-0400 m31100| 2015-07-09T13:56:04.893-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db7.coll7", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5b4ca4787b9985d1bd5') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.898-0400 m31100| 2015-07-09T13:56:04.898-0400 I SHARDING [conn15] distributed lock 'db7.coll7/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb5b4792e00bb67274907 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.898-0400 m31100| 2015-07-09T13:56:04.898-0400 I SHARDING [conn15] remotely refreshing metadata for db7.coll7 based on current shard version 2|0||559eb5b4ca4787b9985d1bd5, current metadata version is 2|0||559eb5b4ca4787b9985d1bd5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.899-0400 m31100| 2015-07-09T13:56:04.899-0400 I SHARDING [conn15] updating metadata for db7.coll7 from shard version 2|0||559eb5b4ca4787b9985d1bd5 to shard version 2|1||559eb5b4ca4787b9985d1bd5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.900-0400 m31100| 2015-07-09T13:56:04.899-0400 I SHARDING [conn15] collection version was loaded at version 2|1||559eb5b4ca4787b9985d1bd5, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.900-0400 m31100| 2015-07-09T13:56:04.899-0400 I SHARDING [conn15] splitChunk accepted at version 2|1||559eb5b4ca4787b9985d1bd5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.901-0400 m31100| 2015-07-09T13:56:04.900-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:04.900-0400-559eb5b4792e00bb67274908", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464564900), what: "split", ns: "db7.coll7", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559eb5b4ca4787b9985d1bd5') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559eb5b4ca4787b9985d1bd5') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.955-0400 m31100| 2015-07-09T13:56:04.954-0400 I SHARDING [conn15] distributed lock 'db7.coll7/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.957-0400 m30999| 2015-07-09T13:56:04.956-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db7.coll7: 0ms sequenceNumber: 38 version: 2|3||559eb5b4ca4787b9985d1bd5 based on: 2|1||559eb5b4ca4787b9985d1bd5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.957-0400 m31200| 2015-07-09T13:56:04.957-0400 I SHARDING [conn48] received splitChunk request: { splitChunk: "db7.coll7", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5b4ca4787b9985d1bd5') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.961-0400 m31200| 2015-07-09T13:56:04.960-0400 I SHARDING [conn48] distributed lock 'db7.coll7/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb5b4d5a107a5b9c0da96 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.961-0400 m31200| 2015-07-09T13:56:04.960-0400 I SHARDING [conn48] remotely refreshing metadata for db7.coll7 based on current shard version 0|0||559eb5b4ca4787b9985d1bd5, current metadata version is 1|1||559eb5b4ca4787b9985d1bd5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.963-0400 m31200| 2015-07-09T13:56:04.962-0400 I SHARDING [conn48] updating metadata for db7.coll7 from shard version 0|0||559eb5b4ca4787b9985d1bd5 to shard version 2|0||559eb5b4ca4787b9985d1bd5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.963-0400 m31200| 2015-07-09T13:56:04.962-0400 I SHARDING [conn48] collection version was loaded at version 2|3||559eb5b4ca4787b9985d1bd5, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.963-0400 m31200| 2015-07-09T13:56:04.962-0400 I SHARDING [conn48] splitChunk accepted at version 2|0||559eb5b4ca4787b9985d1bd5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:04.964-0400 m31200| 2015-07-09T13:56:04.964-0400 I SHARDING [conn48] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:04.964-0400-559eb5b4d5a107a5b9c0da97", server: "bs-osx108-8", clientAddr: "127.0.0.1:62774", time: new Date(1436464564964), what: "split", ns: "db7.coll7", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559eb5b4ca4787b9985d1bd5') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559eb5b4ca4787b9985d1bd5') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.018-0400 m31200| 2015-07-09T13:56:05.018-0400 I SHARDING [conn48] distributed lock 'db7.coll7/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.020-0400 m30999| 2015-07-09T13:56:05.020-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db7.coll7: 0ms sequenceNumber: 39 version: 2|5||559eb5b4ca4787b9985d1bd5 based on: 2|3||559eb5b4ca4787b9985d1bd5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.028-0400 m31100| 2015-07-09T13:56:05.027-0400 I INDEX [conn59] build index on: db7.coll7 properties: { v: 1, key: { indexed_insert_large: 1.0 }, name: "indexed_insert_large_1", ns: "db7.coll7" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.028-0400 m31100| 2015-07-09T13:56:05.027-0400 I INDEX [conn59] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.029-0400 m31200| 2015-07-09T13:56:05.029-0400 I INDEX [conn40] build index on: db7.coll7 properties: { v: 1, key: { indexed_insert_large: 1.0 }, name: "indexed_insert_large_1", ns: "db7.coll7" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.030-0400 m31200| 2015-07-09T13:56:05.029-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.031-0400 m31100| 2015-07-09T13:56:05.030-0400 I INDEX [conn59] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.035-0400 m31200| 2015-07-09T13:56:05.035-0400 I INDEX [conn40] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.036-0400 Using 20 threads (requested 20) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.128-0400 m31102| 2015-07-09T13:56:05.128-0400 I INDEX [repl writer worker 12] build index on: db7.coll7 properties: { v: 1, key: { indexed_insert_large: 1.0 }, name: "indexed_insert_large_1", ns: "db7.coll7" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.128-0400 m31102| 2015-07-09T13:56:05.128-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.152-0400 m31201| 2015-07-09T13:56:05.151-0400 I INDEX [repl writer worker 13] build index on: db7.coll7 properties: { v: 1, key: { indexed_insert_large: 1.0 }, name: "indexed_insert_large_1", ns: "db7.coll7" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.152-0400 m31201| 2015-07-09T13:56:05.151-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.154-0400 m31202| 2015-07-09T13:56:05.152-0400 I INDEX [repl writer worker 3] build index on: db7.coll7 properties: { v: 1, key: { indexed_insert_large: 1.0 }, name: "indexed_insert_large_1", ns: "db7.coll7" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.154-0400 m31202| 2015-07-09T13:56:05.152-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.155-0400 m31101| 2015-07-09T13:56:05.153-0400 I INDEX [repl writer worker 5] build index on: db7.coll7 properties: { v: 1, key: { indexed_insert_large: 1.0 }, name: "indexed_insert_large_1", ns: "db7.coll7" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.155-0400 m31101| 2015-07-09T13:56:05.153-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.161-0400 m31102| 2015-07-09T13:56:05.161-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.168-0400 m31101| 2015-07-09T13:56:05.167-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.168-0400 m31202| 2015-07-09T13:56:05.167-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.180-0400 m31201| 2015-07-09T13:56:05.179-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.183-0400 m30999| 2015-07-09T13:56:05.180-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62777 #45 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.201-0400 m30998| 2015-07-09T13:56:05.200-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62778 #44 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.217-0400 m30998| 2015-07-09T13:56:05.213-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62779 #45 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.234-0400 m30999| 2015-07-09T13:56:05.233-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62780 #46 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.234-0400 m30998| 2015-07-09T13:56:05.234-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62781 #46 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.243-0400 m30999| 2015-07-09T13:56:05.242-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62782 #47 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.243-0400 m30998| 2015-07-09T13:56:05.243-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62783 #47 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.249-0400 m30999| 2015-07-09T13:56:05.243-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62785 #48 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.253-0400 m30998| 2015-07-09T13:56:05.253-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62784 #48 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.255-0400 m30998| 2015-07-09T13:56:05.254-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62786 #49 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.280-0400 m30999| 2015-07-09T13:56:05.280-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62787 #49 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.280-0400 m30998| 2015-07-09T13:56:05.280-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62788 #50 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.281-0400 m30998| 2015-07-09T13:56:05.280-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62789 #51 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.285-0400 m30998| 2015-07-09T13:56:05.281-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62790 #52 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.290-0400 m30999| 2015-07-09T13:56:05.290-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62791 #50 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.296-0400 m30999| 2015-07-09T13:56:05.291-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62792 #51 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.296-0400 m30998| 2015-07-09T13:56:05.291-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62793 #53 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.296-0400 m30999| 2015-07-09T13:56:05.293-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62794 #52 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.296-0400 m30999| 2015-07-09T13:56:05.293-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62795 #53 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.297-0400 m30999| 2015-07-09T13:56:05.297-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62796 #54 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.305-0400 setting random seed: 1178047540597 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.305-0400 setting random seed: 8014105744659 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.305-0400 setting random seed: 6789293601177 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.311-0400 setting random seed: 2185712410137 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.311-0400 setting random seed: 2798821753822 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.311-0400 setting random seed: 1789896995760 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.311-0400 setting random seed: 7657608124427 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.312-0400 setting random seed: 7781813186593 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.314-0400 setting random seed: 9445024039596 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.314-0400 setting random seed: 8638333692215 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.315-0400 setting random seed: 8578043552115 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.316-0400 setting random seed: 8934287973679 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.324-0400 setting random seed: 9797238390892 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.325-0400 setting random seed: 2519107414409 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.325-0400 setting random seed: 3075313218869 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.326-0400 m30998| 2015-07-09T13:56:05.322-0400 I SHARDING [conn49] ChunkManager: time to load chunks for db7.coll7: 0ms sequenceNumber: 9 version: 2|5||559eb5b4ca4787b9985d1bd5 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.326-0400 setting random seed: 7578866784460 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.326-0400 setting random seed: 4474432668648 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.334-0400 setting random seed: 5268494277261 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.334-0400 setting random seed: 7583359158597 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.335-0400 setting random seed: 3517437903210 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.409-0400 m31100| 2015-07-09T13:56:05.408-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62797 #66 (60 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.420-0400 m31100| 2015-07-09T13:56:05.418-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62798 #67 (61 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.432-0400 m31200| 2015-07-09T13:56:05.431-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62799 #49 (45 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.441-0400 m31200| 2015-07-09T13:56:05.441-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62800 #50 (46 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.469-0400 m31200| 2015-07-09T13:56:05.468-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62801 #51 (47 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.507-0400 m31100| 2015-07-09T13:56:05.507-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62802 #68 (62 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.518-0400 m31100| 2015-07-09T13:56:05.517-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62803 #69 (63 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.528-0400 m31100| 2015-07-09T13:56:05.527-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62804 #70 (64 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.664-0400 m31200| 2015-07-09T13:56:05.663-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62805 #52 (48 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.703-0400 m31200| 2015-07-09T13:56:05.700-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62806 #53 (49 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.763-0400 m30999| 2015-07-09T13:56:05.763-0400 I NETWORK [conn45] end connection 127.0.0.1:62777 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.822-0400 m31100| 2015-07-09T13:56:05.822-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62807 #71 (65 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.823-0400 m30999| 2015-07-09T13:56:05.822-0400 I NETWORK [conn48] end connection 127.0.0.1:62785 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.831-0400 m31100| 2015-07-09T13:56:05.831-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62808 #72 (66 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.841-0400 m31100| 2015-07-09T13:56:05.840-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62809 #73 (67 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.851-0400 m30998| 2015-07-09T13:56:05.851-0400 I NETWORK [conn45] end connection 127.0.0.1:62779 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.861-0400 m30998| 2015-07-09T13:56:05.860-0400 I NETWORK [conn47] end connection 127.0.0.1:62783 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.868-0400 m30999| 2015-07-09T13:56:05.868-0400 I NETWORK [conn47] end connection 127.0.0.1:62782 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.878-0400 m30998| 2015-07-09T13:56:05.877-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T13:56:05.876-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30998:1436464535:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.912-0400 m30998| 2015-07-09T13:56:05.911-0400 I NETWORK [conn53] end connection 127.0.0.1:62793 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.921-0400 m30999| 2015-07-09T13:56:05.918-0400 I NETWORK [conn46] end connection 127.0.0.1:62780 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.921-0400 m30998| 2015-07-09T13:56:05.920-0400 I NETWORK [conn48] end connection 127.0.0.1:62784 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.936-0400 m30999| 2015-07-09T13:56:05.936-0400 I NETWORK [conn50] end connection 127.0.0.1:62791 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.944-0400 m30998| 2015-07-09T13:56:05.944-0400 I NETWORK [conn50] end connection 127.0.0.1:62788 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.949-0400 m30999| 2015-07-09T13:56:05.948-0400 I NETWORK [conn49] end connection 127.0.0.1:62787 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.961-0400 m30999| 2015-07-09T13:56:05.960-0400 I NETWORK [conn54] end connection 127.0.0.1:62796 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.967-0400 m30998| 2015-07-09T13:56:05.967-0400 I NETWORK [conn44] end connection 127.0.0.1:62778 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:05.998-0400 m30998| 2015-07-09T13:56:05.995-0400 I NETWORK [conn46] end connection 127.0.0.1:62781 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.029-0400 m30998| 2015-07-09T13:56:06.027-0400 I NETWORK [conn49] end connection 127.0.0.1:62786 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.039-0400 m30998| 2015-07-09T13:56:06.035-0400 I NETWORK [conn51] end connection 127.0.0.1:62789 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.043-0400 m30999| 2015-07-09T13:56:06.043-0400 I NETWORK [conn51] end connection 127.0.0.1:62792 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.044-0400 m30999| 2015-07-09T13:56:06.044-0400 I NETWORK [conn52] end connection 127.0.0.1:62794 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.090-0400 m30999| 2015-07-09T13:56:06.090-0400 I NETWORK [conn53] end connection 127.0.0.1:62795 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.098-0400 m30998| 2015-07-09T13:56:06.098-0400 I NETWORK [conn52] end connection 127.0.0.1:62790 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.120-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.120-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.121-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.121-0400 jstests/concurrency/fsm_workloads/indexed_insert_large.js: Workload completed in 1084 ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.121-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.121-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.121-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.121-0400 m30999| 2015-07-09T13:56:06.121-0400 I COMMAND [conn1] DROP: db7.coll7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.121-0400 m30999| 2015-07-09T13:56:06.121-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:06.121-0400-559eb5b6ca4787b9985d1bd7", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464566121), what: "dropCollection.start", ns: "db7.coll7", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.178-0400 m30999| 2015-07-09T13:56:06.177-0400 I SHARDING [conn1] distributed lock 'db7.coll7/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5b6ca4787b9985d1bd8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.179-0400 m31100| 2015-07-09T13:56:06.179-0400 I COMMAND [conn15] CMD: drop db7.coll7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.182-0400 m31200| 2015-07-09T13:56:06.181-0400 I COMMAND [conn48] CMD: drop db7.coll7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.183-0400 m31102| 2015-07-09T13:56:06.183-0400 I COMMAND [repl writer worker 0] CMD: drop db7.coll7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.183-0400 m31101| 2015-07-09T13:56:06.183-0400 I COMMAND [repl writer worker 11] CMD: drop db7.coll7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.185-0400 m31202| 2015-07-09T13:56:06.185-0400 I COMMAND [repl writer worker 3] CMD: drop db7.coll7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.186-0400 m31201| 2015-07-09T13:56:06.185-0400 I COMMAND [repl writer worker 2] CMD: drop db7.coll7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.238-0400 m31100| 2015-07-09T13:56:06.238-0400 I SHARDING [conn15] remotely refreshing metadata for db7.coll7 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559eb5b4ca4787b9985d1bd5, current metadata version is 2|3||559eb5b4ca4787b9985d1bd5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.240-0400 m31100| 2015-07-09T13:56:06.239-0400 W SHARDING [conn15] no chunks found when reloading db7.coll7, previous version was 0|0||559eb5b4ca4787b9985d1bd5, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.240-0400 m31100| 2015-07-09T13:56:06.239-0400 I SHARDING [conn15] dropping metadata for db7.coll7 at shard version 2|3||559eb5b4ca4787b9985d1bd5, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.241-0400 m31200| 2015-07-09T13:56:06.240-0400 I SHARDING [conn48] remotely refreshing metadata for db7.coll7 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559eb5b4ca4787b9985d1bd5, current metadata version is 2|5||559eb5b4ca4787b9985d1bd5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.243-0400 m31200| 2015-07-09T13:56:06.242-0400 W SHARDING [conn48] no chunks found when reloading db7.coll7, previous version was 0|0||559eb5b4ca4787b9985d1bd5, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.243-0400 m31200| 2015-07-09T13:56:06.242-0400 I SHARDING [conn48] dropping metadata for db7.coll7 at shard version 2|5||559eb5b4ca4787b9985d1bd5, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.244-0400 m30999| 2015-07-09T13:56:06.243-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:06.243-0400-559eb5b6ca4787b9985d1bd9", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464566243), what: "dropCollection", ns: "db7.coll7", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.304-0400 m30999| 2015-07-09T13:56:06.304-0400 I SHARDING [conn1] distributed lock 'db7.coll7/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.365-0400 m30999| 2015-07-09T13:56:06.364-0400 I COMMAND [conn1] DROP DATABASE: db7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.365-0400 m30999| 2015-07-09T13:56:06.365-0400 I SHARDING [conn1] DBConfig::dropDatabase: db7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.365-0400 m30999| 2015-07-09T13:56:06.365-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:06.365-0400-559eb5b6ca4787b9985d1bda", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464566365), what: "dropDatabase.start", ns: "db7", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.471-0400 m30999| 2015-07-09T13:56:06.470-0400 I SHARDING [conn1] DBConfig::dropDatabase: db7 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.471-0400 m31100| 2015-07-09T13:56:06.471-0400 I COMMAND [conn28] dropDatabase db7 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.472-0400 m31100| 2015-07-09T13:56:06.471-0400 I COMMAND [conn28] dropDatabase db7 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.472-0400 m30999| 2015-07-09T13:56:06.471-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:06.471-0400-559eb5b6ca4787b9985d1bdb", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464566471), what: "dropDatabase", ns: "db7", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.472-0400 m31101| 2015-07-09T13:56:06.472-0400 I COMMAND [repl writer worker 4] dropDatabase db7 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.473-0400 m31101| 2015-07-09T13:56:06.472-0400 I COMMAND [repl writer worker 4] dropDatabase db7 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.473-0400 m31102| 2015-07-09T13:56:06.472-0400 I COMMAND [repl writer worker 7] dropDatabase db7 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.473-0400 m31102| 2015-07-09T13:56:06.472-0400 I COMMAND [repl writer worker 7] dropDatabase db7 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.554-0400 m31100| 2015-07-09T13:56:06.553-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.557-0400 m31102| 2015-07-09T13:56:06.557-0400 I COMMAND [repl writer worker 8] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.557-0400 m31101| 2015-07-09T13:56:06.557-0400 I COMMAND [repl writer worker 8] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.588-0400 m31200| 2015-07-09T13:56:06.588-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.591-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.591-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.591-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.591-0400 jstests/concurrency/fsm_workloads/update_inc.js [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.591-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.592-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.592-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.592-0400 m31201| 2015-07-09T13:56:06.591-0400 I COMMAND [repl writer worker 4] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.592-0400 m31202| 2015-07-09T13:56:06.592-0400 I COMMAND [repl writer worker 14] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.599-0400 m30999| 2015-07-09T13:56:06.598-0400 I SHARDING [conn1] distributed lock 'db8/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5b6ca4787b9985d1bdc [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.602-0400 m30999| 2015-07-09T13:56:06.602-0400 I SHARDING [conn1] Placing [db8] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.602-0400 m30999| 2015-07-09T13:56:06.602-0400 I SHARDING [conn1] Enabling sharding for database [db8] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.657-0400 m30999| 2015-07-09T13:56:06.657-0400 I SHARDING [conn1] distributed lock 'db8/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.678-0400 m31100| 2015-07-09T13:56:06.677-0400 I INDEX [conn23] build index on: db8.coll8 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db8.coll8" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.678-0400 m31100| 2015-07-09T13:56:06.677-0400 I INDEX [conn23] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.687-0400 m31100| 2015-07-09T13:56:06.686-0400 I INDEX [conn23] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.688-0400 m30999| 2015-07-09T13:56:06.688-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db8.coll8", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.691-0400 m30999| 2015-07-09T13:56:06.691-0400 I SHARDING [conn1] distributed lock 'db8.coll8/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5b6ca4787b9985d1bdd [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.692-0400 m30999| 2015-07-09T13:56:06.691-0400 I SHARDING [conn1] enable sharding on: db8.coll8 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.693-0400 m30999| 2015-07-09T13:56:06.692-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:06.692-0400-559eb5b6ca4787b9985d1bde", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464566692), what: "shardCollection.start", ns: "db8.coll8", details: { shardKey: { _id: "hashed" }, collection: "db8.coll8", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.698-0400 m31101| 2015-07-09T13:56:06.697-0400 I INDEX [repl writer worker 2] build index on: db8.coll8 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db8.coll8" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.698-0400 m31101| 2015-07-09T13:56:06.697-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.700-0400 m31102| 2015-07-09T13:56:06.699-0400 I INDEX [repl writer worker 13] build index on: db8.coll8 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db8.coll8" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.700-0400 m31102| 2015-07-09T13:56:06.699-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.703-0400 m31101| 2015-07-09T13:56:06.702-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.705-0400 m31102| 2015-07-09T13:56:06.705-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.745-0400 m30999| 2015-07-09T13:56:06.745-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db8.coll8 using new epoch 559eb5b6ca4787b9985d1bdf [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.794-0400 m31100| 2015-07-09T13:56:06.794-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T13:56:06.787-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31100:1436464536:197041335', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.847-0400 m30999| 2015-07-09T13:56:06.846-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db8.coll8: 0ms sequenceNumber: 40 version: 1|1||559eb5b6ca4787b9985d1bdf based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.902-0400 m30999| 2015-07-09T13:56:06.901-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db8.coll8: 0ms sequenceNumber: 41 version: 1|1||559eb5b6ca4787b9985d1bdf based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.903-0400 m31100| 2015-07-09T13:56:06.903-0400 I SHARDING [conn20] remotely refreshing metadata for db8.coll8 with requested shard version 1|1||559eb5b6ca4787b9985d1bdf, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.905-0400 m31100| 2015-07-09T13:56:06.904-0400 I SHARDING [conn20] collection db8.coll8 was previously unsharded, new metadata loaded with shard version 1|1||559eb5b6ca4787b9985d1bdf [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.905-0400 m31100| 2015-07-09T13:56:06.904-0400 I SHARDING [conn20] collection version was loaded at version 1|1||559eb5b6ca4787b9985d1bdf, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.905-0400 m30999| 2015-07-09T13:56:06.905-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:06.905-0400-559eb5b6ca4787b9985d1be0", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464566905), what: "shardCollection", ns: "db8.coll8", details: { version: "1|1||559eb5b6ca4787b9985d1bdf" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.960-0400 m30999| 2015-07-09T13:56:06.959-0400 I SHARDING [conn1] distributed lock 'db8.coll8/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.961-0400 m30999| 2015-07-09T13:56:06.960-0400 I SHARDING [conn1] moving chunk ns: db8.coll8 moving ( ns: db8.coll8, shard: test-rs0, lastmod: 1|1||000000000000000000000000, min: { _id: 0 }, max: { _id: MaxKey }) test-rs0 -> test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.961-0400 m31100| 2015-07-09T13:56:06.961-0400 I SHARDING [conn15] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.962-0400 m31100| 2015-07-09T13:56:06.962-0400 I SHARDING [conn15] received moveChunk request: { moveChunk: "db8.coll8", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb5b6ca4787b9985d1bdf') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.965-0400 m31100| 2015-07-09T13:56:06.965-0400 I SHARDING [conn15] distributed lock 'db8.coll8/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb5b6792e00bb6727490a [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:06.966-0400 m31100| 2015-07-09T13:56:06.965-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:06.965-0400-559eb5b6792e00bb6727490b", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464566965), what: "moveChunk.start", ns: "db8.coll8", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.019-0400 m31100| 2015-07-09T13:56:07.018-0400 I SHARDING [conn15] remotely refreshing metadata for db8.coll8 based on current shard version 1|1||559eb5b6ca4787b9985d1bdf, current metadata version is 1|1||559eb5b6ca4787b9985d1bdf [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.020-0400 m31100| 2015-07-09T13:56:07.020-0400 I SHARDING [conn15] metadata of collection db8.coll8 already up to date (shard version : 1|1||559eb5b6ca4787b9985d1bdf, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.020-0400 m31100| 2015-07-09T13:56:07.020-0400 I SHARDING [conn15] moveChunk request accepted at version 1|1||559eb5b6ca4787b9985d1bdf [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.022-0400 m31100| 2015-07-09T13:56:07.021-0400 I SHARDING [conn15] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.022-0400 m31200| 2015-07-09T13:56:07.022-0400 I SHARDING [conn16] remotely refreshing metadata for db8.coll8, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.024-0400 m31200| 2015-07-09T13:56:07.023-0400 I SHARDING [conn16] collection db8.coll8 was previously unsharded, new metadata loaded with shard version 0|0||559eb5b6ca4787b9985d1bdf [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.024-0400 m31200| 2015-07-09T13:56:07.023-0400 I SHARDING [conn16] collection version was loaded at version 1|1||559eb5b6ca4787b9985d1bdf, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.024-0400 m31200| 2015-07-09T13:56:07.024-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: 0 } -> { _id: MaxKey } for collection db8.coll8 from test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 at epoch 559eb5b6ca4787b9985d1bdf [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.027-0400 m31100| 2015-07-09T13:56:07.026-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db8.coll8", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.030-0400 m31100| 2015-07-09T13:56:07.029-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db8.coll8", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.035-0400 m31100| 2015-07-09T13:56:07.034-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db8.coll8", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.038-0400 m31200| 2015-07-09T13:56:07.038-0400 I INDEX [migrateThread] build index on: db8.coll8 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db8.coll8" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.039-0400 m31200| 2015-07-09T13:56:07.038-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.044-0400 m31100| 2015-07-09T13:56:07.044-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db8.coll8", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.045-0400 m31200| 2015-07-09T13:56:07.045-0400 I INDEX [migrateThread] build index on: db8.coll8 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db8.coll8" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.046-0400 m31200| 2015-07-09T13:56:07.045-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.062-0400 m31100| 2015-07-09T13:56:07.062-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db8.coll8", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.063-0400 m31200| 2015-07-09T13:56:07.062-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.063-0400 m31200| 2015-07-09T13:56:07.063-0400 I SHARDING [migrateThread] Deleter starting delete for: db8.coll8 from { _id: 0 } -> { _id: MaxKey }, with opId: 5980 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.064-0400 m31200| 2015-07-09T13:56:07.064-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db8.coll8 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.071-0400 m31201| 2015-07-09T13:56:07.070-0400 I INDEX [repl writer worker 3] build index on: db8.coll8 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db8.coll8" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.071-0400 m31201| 2015-07-09T13:56:07.070-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.074-0400 m31202| 2015-07-09T13:56:07.073-0400 I INDEX [repl writer worker 15] build index on: db8.coll8 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db8.coll8" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.074-0400 m31202| 2015-07-09T13:56:07.073-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.074-0400 m31201| 2015-07-09T13:56:07.074-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.076-0400 m31200| 2015-07-09T13:56:07.076-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.076-0400 m31200| 2015-07-09T13:56:07.076-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db8.coll8' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.080-0400 m31202| 2015-07-09T13:56:07.079-0400 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.096-0400 m31100| 2015-07-09T13:56:07.096-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db8.coll8", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.096-0400 m31100| 2015-07-09T13:56:07.096-0400 I SHARDING [conn15] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.097-0400 m31100| 2015-07-09T13:56:07.096-0400 I SHARDING [conn15] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.097-0400 m31100| 2015-07-09T13:56:07.097-0400 I SHARDING [conn15] moveChunk setting version to: 2|0||559eb5b6ca4787b9985d1bdf [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.100-0400 m31200| 2015-07-09T13:56:07.099-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db8.coll8' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.100-0400 m31200| 2015-07-09T13:56:07.100-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:07.100-0400-559eb5b7d5a107a5b9c0da98", server: "bs-osx108-8", clientAddr: "", time: new Date(1436464567100), what: "moveChunk.to", ns: "db8.coll8", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 5: 39, step 2 of 5: 11, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 23, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.154-0400 m31100| 2015-07-09T13:56:07.153-0400 I SHARDING [conn15] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db8.coll8", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.154-0400 m31100| 2015-07-09T13:56:07.153-0400 I SHARDING [conn15] moveChunk updating self version to: 2|1||559eb5b6ca4787b9985d1bdf through { _id: MinKey } -> { _id: 0 } for collection 'db8.coll8' [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.155-0400 m31100| 2015-07-09T13:56:07.154-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:07.154-0400-559eb5b7792e00bb6727490c", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464567154), what: "moveChunk.commit", ns: "db8.coll8", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.192-0400 m31200| 2015-07-09T13:56:07.191-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T13:56:07.187-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31200:1436464537:809424560', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.192-0400 m31100| 2015-07-09T13:56:07.192-0400 I SHARDING [conn15] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.193-0400 m31100| 2015-07-09T13:56:07.192-0400 I SHARDING [conn15] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.193-0400 m31100| 2015-07-09T13:56:07.192-0400 I SHARDING [conn15] Deleter starting delete for: db8.coll8 from { _id: 0 } -> { _id: MaxKey }, with opId: 9133 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.193-0400 m31100| 2015-07-09T13:56:07.192-0400 I SHARDING [conn15] rangeDeleter deleted 0 documents for db8.coll8 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.193-0400 m31100| 2015-07-09T13:56:07.192-0400 I SHARDING [conn15] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.194-0400 m31100| 2015-07-09T13:56:07.193-0400 I SHARDING [conn15] distributed lock 'db8.coll8/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.194-0400 m31100| 2015-07-09T13:56:07.193-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:07.193-0400-559eb5b7792e00bb6727490d", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464567193), what: "moveChunk.from", ns: "db8.coll8", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 6: 0, step 2 of 6: 58, step 3 of 6: 3, step 4 of 6: 71, step 5 of 6: 96, step 6 of 6: 0, to: "test-rs1", from: "test-rs0", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.249-0400 m31100| 2015-07-09T13:56:07.248-0400 I COMMAND [conn15] command db8.coll8 command: moveChunk { moveChunk: "db8.coll8", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb5b6ca4787b9985d1bdf') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 286ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.250-0400 m30999| 2015-07-09T13:56:07.250-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db8.coll8: 0ms sequenceNumber: 42 version: 2|1||559eb5b6ca4787b9985d1bdf based on: 1|1||559eb5b6ca4787b9985d1bdf [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.251-0400 m31100| 2015-07-09T13:56:07.251-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db8.coll8", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5b6ca4787b9985d1bdf') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.255-0400 m31100| 2015-07-09T13:56:07.254-0400 I SHARDING [conn15] distributed lock 'db8.coll8/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb5b7792e00bb6727490e [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.255-0400 m31100| 2015-07-09T13:56:07.255-0400 I SHARDING [conn15] remotely refreshing metadata for db8.coll8 based on current shard version 2|0||559eb5b6ca4787b9985d1bdf, current metadata version is 2|0||559eb5b6ca4787b9985d1bdf [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.256-0400 m31100| 2015-07-09T13:56:07.256-0400 I SHARDING [conn15] updating metadata for db8.coll8 from shard version 2|0||559eb5b6ca4787b9985d1bdf to shard version 2|1||559eb5b6ca4787b9985d1bdf [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.256-0400 m31100| 2015-07-09T13:56:07.256-0400 I SHARDING [conn15] collection version was loaded at version 2|1||559eb5b6ca4787b9985d1bdf, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.257-0400 m31100| 2015-07-09T13:56:07.256-0400 I SHARDING [conn15] splitChunk accepted at version 2|1||559eb5b6ca4787b9985d1bdf [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.258-0400 m31100| 2015-07-09T13:56:07.257-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:07.257-0400-559eb5b7792e00bb6727490f", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464567257), what: "split", ns: "db8.coll8", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559eb5b6ca4787b9985d1bdf') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559eb5b6ca4787b9985d1bdf') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.311-0400 m31100| 2015-07-09T13:56:07.311-0400 I SHARDING [conn15] distributed lock 'db8.coll8/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.313-0400 m30999| 2015-07-09T13:56:07.313-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db8.coll8: 0ms sequenceNumber: 43 version: 2|3||559eb5b6ca4787b9985d1bdf based on: 2|1||559eb5b6ca4787b9985d1bdf [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.314-0400 m31200| 2015-07-09T13:56:07.313-0400 I SHARDING [conn48] received splitChunk request: { splitChunk: "db8.coll8", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5b6ca4787b9985d1bdf') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.317-0400 m31200| 2015-07-09T13:56:07.317-0400 I SHARDING [conn48] distributed lock 'db8.coll8/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb5b7d5a107a5b9c0da99 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.318-0400 m31200| 2015-07-09T13:56:07.317-0400 I SHARDING [conn48] remotely refreshing metadata for db8.coll8 based on current shard version 0|0||559eb5b6ca4787b9985d1bdf, current metadata version is 1|1||559eb5b6ca4787b9985d1bdf [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.319-0400 m31200| 2015-07-09T13:56:07.318-0400 I SHARDING [conn48] updating metadata for db8.coll8 from shard version 0|0||559eb5b6ca4787b9985d1bdf to shard version 2|0||559eb5b6ca4787b9985d1bdf [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.319-0400 m31200| 2015-07-09T13:56:07.319-0400 I SHARDING [conn48] collection version was loaded at version 2|3||559eb5b6ca4787b9985d1bdf, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.319-0400 m31200| 2015-07-09T13:56:07.319-0400 I SHARDING [conn48] splitChunk accepted at version 2|0||559eb5b6ca4787b9985d1bdf [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.320-0400 m31200| 2015-07-09T13:56:07.320-0400 I SHARDING [conn48] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:07.320-0400-559eb5b7d5a107a5b9c0da9a", server: "bs-osx108-8", clientAddr: "127.0.0.1:62774", time: new Date(1436464567320), what: "split", ns: "db8.coll8", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559eb5b6ca4787b9985d1bdf') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559eb5b6ca4787b9985d1bdf') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.375-0400 m31200| 2015-07-09T13:56:07.374-0400 I SHARDING [conn48] distributed lock 'db8.coll8/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.377-0400 m30999| 2015-07-09T13:56:07.377-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db8.coll8: 0ms sequenceNumber: 44 version: 2|5||559eb5b6ca4787b9985d1bdf based on: 2|3||559eb5b6ca4787b9985d1bdf [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.381-0400 Using 5 threads (requested 5) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.434-0400 m30999| 2015-07-09T13:56:07.433-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62810 #55 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.444-0400 m30999| 2015-07-09T13:56:07.443-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62811 #56 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.446-0400 m30998| 2015-07-09T13:56:07.444-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62812 #54 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.447-0400 m30998| 2015-07-09T13:56:07.446-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62813 #55 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.447-0400 m30998| 2015-07-09T13:56:07.447-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62814 #56 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.451-0400 setting random seed: 2699345140717 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.451-0400 setting random seed: 1116301408037 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.451-0400 setting random seed: 7893801168538 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.453-0400 setting random seed: 1677773622795 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.453-0400 setting random seed: 1419807183556 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.461-0400 m30998| 2015-07-09T13:56:07.460-0400 I SHARDING [conn56] ChunkManager: time to load chunks for db8.coll8: 0ms sequenceNumber: 10 version: 2|5||559eb5b6ca4787b9985d1bdf based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.487-0400 m30999| 2015-07-09T13:56:07.481-0400 I NETWORK [conn55] end connection 127.0.0.1:62810 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.487-0400 m30998| 2015-07-09T13:56:07.487-0400 I NETWORK [conn54] end connection 127.0.0.1:62812 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.488-0400 m30999| 2015-07-09T13:56:07.488-0400 I NETWORK [conn56] end connection 127.0.0.1:62811 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.491-0400 m30998| 2015-07-09T13:56:07.491-0400 I NETWORK [conn55] end connection 127.0.0.1:62813 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.504-0400 m30998| 2015-07-09T13:56:07.504-0400 I NETWORK [conn56] end connection 127.0.0.1:62814 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.522-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.522-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.522-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.522-0400 jstests/concurrency/fsm_workloads/update_inc.js: Workload completed in 142 ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.522-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.522-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.522-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.523-0400 m30999| 2015-07-09T13:56:07.522-0400 I COMMAND [conn1] DROP: db8.coll8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.523-0400 m30999| 2015-07-09T13:56:07.522-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:07.522-0400-559eb5b7ca4787b9985d1be1", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464567522), what: "dropCollection.start", ns: "db8.coll8", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.579-0400 m30999| 2015-07-09T13:56:07.579-0400 I SHARDING [conn1] distributed lock 'db8.coll8/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5b7ca4787b9985d1be2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.580-0400 m31100| 2015-07-09T13:56:07.580-0400 I COMMAND [conn15] CMD: drop db8.coll8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.583-0400 m31200| 2015-07-09T13:56:07.583-0400 I COMMAND [conn48] CMD: drop db8.coll8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.584-0400 m31101| 2015-07-09T13:56:07.584-0400 I COMMAND [repl writer worker 6] CMD: drop db8.coll8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.584-0400 m31102| 2015-07-09T13:56:07.584-0400 I COMMAND [repl writer worker 5] CMD: drop db8.coll8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.587-0400 m31201| 2015-07-09T13:56:07.587-0400 I COMMAND [repl writer worker 13] CMD: drop db8.coll8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.587-0400 m31202| 2015-07-09T13:56:07.587-0400 I COMMAND [repl writer worker 5] CMD: drop db8.coll8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.640-0400 m31100| 2015-07-09T13:56:07.639-0400 I SHARDING [conn15] remotely refreshing metadata for db8.coll8 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559eb5b6ca4787b9985d1bdf, current metadata version is 2|3||559eb5b6ca4787b9985d1bdf [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.641-0400 m31100| 2015-07-09T13:56:07.641-0400 W SHARDING [conn15] no chunks found when reloading db8.coll8, previous version was 0|0||559eb5b6ca4787b9985d1bdf, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.641-0400 m31100| 2015-07-09T13:56:07.641-0400 I SHARDING [conn15] dropping metadata for db8.coll8 at shard version 2|3||559eb5b6ca4787b9985d1bdf, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.643-0400 m31200| 2015-07-09T13:56:07.642-0400 I SHARDING [conn48] remotely refreshing metadata for db8.coll8 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559eb5b6ca4787b9985d1bdf, current metadata version is 2|5||559eb5b6ca4787b9985d1bdf [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.644-0400 m31200| 2015-07-09T13:56:07.643-0400 W SHARDING [conn48] no chunks found when reloading db8.coll8, previous version was 0|0||559eb5b6ca4787b9985d1bdf, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.644-0400 m31200| 2015-07-09T13:56:07.644-0400 I SHARDING [conn48] dropping metadata for db8.coll8 at shard version 2|5||559eb5b6ca4787b9985d1bdf, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.645-0400 m30999| 2015-07-09T13:56:07.644-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:07.644-0400-559eb5b7ca4787b9985d1be3", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464567644), what: "dropCollection", ns: "db8.coll8", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.699-0400 m30999| 2015-07-09T13:56:07.698-0400 I SHARDING [conn1] distributed lock 'db8.coll8/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.754-0400 m30999| 2015-07-09T13:56:07.754-0400 I COMMAND [conn1] DROP DATABASE: db8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.754-0400 m30999| 2015-07-09T13:56:07.754-0400 I SHARDING [conn1] DBConfig::dropDatabase: db8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.755-0400 m30999| 2015-07-09T13:56:07.754-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:07.754-0400-559eb5b7ca4787b9985d1be4", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464567754), what: "dropDatabase.start", ns: "db8", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.861-0400 m30999| 2015-07-09T13:56:07.860-0400 I SHARDING [conn1] DBConfig::dropDatabase: db8 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.861-0400 m31100| 2015-07-09T13:56:07.860-0400 I COMMAND [conn28] dropDatabase db8 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.861-0400 m31100| 2015-07-09T13:56:07.860-0400 I COMMAND [conn28] dropDatabase db8 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.862-0400 m30999| 2015-07-09T13:56:07.861-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:07.861-0400-559eb5b7ca4787b9985d1be5", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464567861), what: "dropDatabase", ns: "db8", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.862-0400 m31101| 2015-07-09T13:56:07.862-0400 I COMMAND [repl writer worker 12] dropDatabase db8 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.862-0400 m31101| 2015-07-09T13:56:07.862-0400 I COMMAND [repl writer worker 12] dropDatabase db8 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.862-0400 m31102| 2015-07-09T13:56:07.862-0400 I COMMAND [repl writer worker 8] dropDatabase db8 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.862-0400 m31102| 2015-07-09T13:56:07.862-0400 I COMMAND [repl writer worker 8] dropDatabase db8 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.950-0400 m31100| 2015-07-09T13:56:07.949-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.953-0400 m31102| 2015-07-09T13:56:07.953-0400 I COMMAND [repl writer worker 15] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.954-0400 m31101| 2015-07-09T13:56:07.953-0400 I COMMAND [repl writer worker 2] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.993-0400 m31200| 2015-07-09T13:56:07.992-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.995-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.995-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.996-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.996-0400 jstests/concurrency/fsm_workloads/yield.js [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.996-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.996-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.996-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.996-0400 m31201| 2015-07-09T13:56:07.996-0400 I COMMAND [repl writer worker 1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:07.997-0400 m31202| 2015-07-09T13:56:07.996-0400 I COMMAND [repl writer worker 1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.001-0400 m30999| 2015-07-09T13:56:08.001-0400 I SHARDING [conn1] distributed lock 'db9/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5b7ca4787b9985d1be6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.005-0400 m30999| 2015-07-09T13:56:08.005-0400 I SHARDING [conn1] Placing [db9] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.005-0400 m30999| 2015-07-09T13:56:08.005-0400 I SHARDING [conn1] Enabling sharding for database [db9] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.060-0400 m30999| 2015-07-09T13:56:08.060-0400 I SHARDING [conn1] distributed lock 'db9/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.087-0400 m31100| 2015-07-09T13:56:08.086-0400 I INDEX [conn70] build index on: db9.coll9 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db9.coll9" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.088-0400 m31100| 2015-07-09T13:56:08.086-0400 I INDEX [conn70] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.100-0400 m31100| 2015-07-09T13:56:08.100-0400 I INDEX [conn70] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.103-0400 m30999| 2015-07-09T13:56:08.103-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db9.coll9", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.108-0400 m30999| 2015-07-09T13:56:08.107-0400 I SHARDING [conn1] distributed lock 'db9.coll9/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5b8ca4787b9985d1be7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.109-0400 m31101| 2015-07-09T13:56:08.109-0400 I INDEX [repl writer worker 10] build index on: db9.coll9 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db9.coll9" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.110-0400 m31101| 2015-07-09T13:56:08.109-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.110-0400 m30999| 2015-07-09T13:56:08.109-0400 I SHARDING [conn1] enable sharding on: db9.coll9 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.111-0400 m30999| 2015-07-09T13:56:08.109-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:08.109-0400-559eb5b8ca4787b9985d1be8", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464568109), what: "shardCollection.start", ns: "db9.coll9", details: { shardKey: { _id: "hashed" }, collection: "db9.coll9", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.115-0400 m31102| 2015-07-09T13:56:08.114-0400 I INDEX [repl writer worker 2] build index on: db9.coll9 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db9.coll9" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.115-0400 m31102| 2015-07-09T13:56:08.115-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.118-0400 m31101| 2015-07-09T13:56:08.118-0400 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.121-0400 m31102| 2015-07-09T13:56:08.120-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.162-0400 m30999| 2015-07-09T13:56:08.162-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db9.coll9 using new epoch 559eb5b8ca4787b9985d1be9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.269-0400 m30999| 2015-07-09T13:56:08.269-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db9.coll9: 0ms sequenceNumber: 45 version: 1|1||559eb5b8ca4787b9985d1be9 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.324-0400 m30999| 2015-07-09T13:56:08.323-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db9.coll9: 0ms sequenceNumber: 46 version: 1|1||559eb5b8ca4787b9985d1be9 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.325-0400 m31100| 2015-07-09T13:56:08.325-0400 I SHARDING [conn47] remotely refreshing metadata for db9.coll9 with requested shard version 1|1||559eb5b8ca4787b9985d1be9, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.326-0400 m31100| 2015-07-09T13:56:08.326-0400 I SHARDING [conn47] collection db9.coll9 was previously unsharded, new metadata loaded with shard version 1|1||559eb5b8ca4787b9985d1be9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.327-0400 m31100| 2015-07-09T13:56:08.326-0400 I SHARDING [conn47] collection version was loaded at version 1|1||559eb5b8ca4787b9985d1be9, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.327-0400 m30999| 2015-07-09T13:56:08.326-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:08.326-0400-559eb5b8ca4787b9985d1bea", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464568326), what: "shardCollection", ns: "db9.coll9", details: { version: "1|1||559eb5b8ca4787b9985d1be9" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.381-0400 m30999| 2015-07-09T13:56:08.381-0400 I SHARDING [conn1] distributed lock 'db9.coll9/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.382-0400 m30999| 2015-07-09T13:56:08.382-0400 I SHARDING [conn1] moving chunk ns: db9.coll9 moving ( ns: db9.coll9, shard: test-rs0, lastmod: 1|1||000000000000000000000000, min: { _id: 0 }, max: { _id: MaxKey }) test-rs0 -> test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.383-0400 m31100| 2015-07-09T13:56:08.382-0400 I SHARDING [conn15] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.384-0400 m31100| 2015-07-09T13:56:08.383-0400 I SHARDING [conn15] received moveChunk request: { moveChunk: "db9.coll9", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb5b8ca4787b9985d1be9') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.387-0400 m31100| 2015-07-09T13:56:08.387-0400 I SHARDING [conn15] distributed lock 'db9.coll9/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb5b8792e00bb67274911 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.388-0400 m31100| 2015-07-09T13:56:08.387-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:08.387-0400-559eb5b8792e00bb67274912", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464568387), what: "moveChunk.start", ns: "db9.coll9", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.440-0400 m31100| 2015-07-09T13:56:08.440-0400 I SHARDING [conn15] remotely refreshing metadata for db9.coll9 based on current shard version 1|1||559eb5b8ca4787b9985d1be9, current metadata version is 1|1||559eb5b8ca4787b9985d1be9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.442-0400 m31100| 2015-07-09T13:56:08.442-0400 I SHARDING [conn15] metadata of collection db9.coll9 already up to date (shard version : 1|1||559eb5b8ca4787b9985d1be9, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.442-0400 m31100| 2015-07-09T13:56:08.442-0400 I SHARDING [conn15] moveChunk request accepted at version 1|1||559eb5b8ca4787b9985d1be9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.443-0400 m31100| 2015-07-09T13:56:08.442-0400 I SHARDING [conn15] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.443-0400 m31200| 2015-07-09T13:56:08.443-0400 I SHARDING [conn16] remotely refreshing metadata for db9.coll9, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.445-0400 m31200| 2015-07-09T13:56:08.444-0400 I SHARDING [conn16] collection db9.coll9 was previously unsharded, new metadata loaded with shard version 0|0||559eb5b8ca4787b9985d1be9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.445-0400 m31200| 2015-07-09T13:56:08.444-0400 I SHARDING [conn16] collection version was loaded at version 1|1||559eb5b8ca4787b9985d1be9, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.445-0400 m31200| 2015-07-09T13:56:08.445-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: 0 } -> { _id: MaxKey } for collection db9.coll9 from test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 at epoch 559eb5b8ca4787b9985d1be9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.447-0400 m31100| 2015-07-09T13:56:08.446-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db9.coll9", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.450-0400 m31100| 2015-07-09T13:56:08.449-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db9.coll9", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.455-0400 m31100| 2015-07-09T13:56:08.455-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db9.coll9", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.460-0400 m31200| 2015-07-09T13:56:08.460-0400 I INDEX [migrateThread] build index on: db9.coll9 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db9.coll9" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.460-0400 m31200| 2015-07-09T13:56:08.460-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.466-0400 m31100| 2015-07-09T13:56:08.464-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db9.coll9", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.469-0400 m31200| 2015-07-09T13:56:08.468-0400 I INDEX [migrateThread] build index on: db9.coll9 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db9.coll9" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.469-0400 m31200| 2015-07-09T13:56:08.468-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.483-0400 m31100| 2015-07-09T13:56:08.482-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db9.coll9", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.490-0400 m31200| 2015-07-09T13:56:08.489-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.490-0400 m31200| 2015-07-09T13:56:08.490-0400 I SHARDING [migrateThread] Deleter starting delete for: db9.coll9 from { _id: 0 } -> { _id: MaxKey }, with opId: 6052 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.490-0400 m31200| 2015-07-09T13:56:08.490-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db9.coll9 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.498-0400 m31201| 2015-07-09T13:56:08.497-0400 I INDEX [repl writer worker 8] build index on: db9.coll9 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db9.coll9" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.498-0400 m31201| 2015-07-09T13:56:08.498-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.500-0400 m31202| 2015-07-09T13:56:08.500-0400 I INDEX [repl writer worker 11] build index on: db9.coll9 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db9.coll9" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.501-0400 m31202| 2015-07-09T13:56:08.500-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.505-0400 m31201| 2015-07-09T13:56:08.505-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.508-0400 m31200| 2015-07-09T13:56:08.507-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.508-0400 m31200| 2015-07-09T13:56:08.507-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db9.coll9' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.508-0400 m31202| 2015-07-09T13:56:08.508-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.517-0400 m31100| 2015-07-09T13:56:08.516-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db9.coll9", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.517-0400 m31100| 2015-07-09T13:56:08.516-0400 I SHARDING [conn15] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.517-0400 m31100| 2015-07-09T13:56:08.517-0400 I SHARDING [conn15] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.518-0400 m31100| 2015-07-09T13:56:08.517-0400 I SHARDING [conn15] moveChunk setting version to: 2|0||559eb5b8ca4787b9985d1be9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.520-0400 m31200| 2015-07-09T13:56:08.520-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db9.coll9' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.521-0400 m31200| 2015-07-09T13:56:08.520-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:08.520-0400-559eb5b8d5a107a5b9c0da9b", server: "bs-osx108-8", clientAddr: "", time: new Date(1436464568520), what: "moveChunk.to", ns: "db9.coll9", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 5: 44, step 2 of 5: 16, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 12, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.574-0400 m31100| 2015-07-09T13:56:08.573-0400 I SHARDING [conn15] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db9.coll9", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.574-0400 m31100| 2015-07-09T13:56:08.573-0400 I SHARDING [conn15] moveChunk updating self version to: 2|1||559eb5b8ca4787b9985d1be9 through { _id: MinKey } -> { _id: 0 } for collection 'db9.coll9' [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.575-0400 m31100| 2015-07-09T13:56:08.574-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:08.574-0400-559eb5b8792e00bb67274913", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464568574), what: "moveChunk.commit", ns: "db9.coll9", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.628-0400 m31100| 2015-07-09T13:56:08.627-0400 I SHARDING [conn15] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.628-0400 m31100| 2015-07-09T13:56:08.627-0400 I SHARDING [conn15] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.628-0400 m31100| 2015-07-09T13:56:08.627-0400 I SHARDING [conn15] Deleter starting delete for: db9.coll9 from { _id: 0 } -> { _id: MaxKey }, with opId: 9291 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.628-0400 m31100| 2015-07-09T13:56:08.628-0400 I SHARDING [conn15] rangeDeleter deleted 0 documents for db9.coll9 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.628-0400 m31100| 2015-07-09T13:56:08.628-0400 I SHARDING [conn15] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.629-0400 m31100| 2015-07-09T13:56:08.629-0400 I SHARDING [conn15] distributed lock 'db9.coll9/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.629-0400 m31100| 2015-07-09T13:56:08.629-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:08.629-0400-559eb5b8792e00bb67274914", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464568629), what: "moveChunk.from", ns: "db9.coll9", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 6: 0, step 2 of 6: 58, step 3 of 6: 2, step 4 of 6: 71, step 5 of 6: 111, step 6 of 6: 0, to: "test-rs1", from: "test-rs0", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.682-0400 m31100| 2015-07-09T13:56:08.681-0400 I COMMAND [conn15] command db9.coll9 command: moveChunk { moveChunk: "db9.coll9", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb5b8ca4787b9985d1be9') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 298ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.684-0400 m30999| 2015-07-09T13:56:08.683-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db9.coll9: 0ms sequenceNumber: 47 version: 2|1||559eb5b8ca4787b9985d1be9 based on: 1|1||559eb5b8ca4787b9985d1be9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.685-0400 m31100| 2015-07-09T13:56:08.684-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db9.coll9", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5b8ca4787b9985d1be9') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.689-0400 m31100| 2015-07-09T13:56:08.688-0400 I SHARDING [conn15] distributed lock 'db9.coll9/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb5b8792e00bb67274915 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.689-0400 m31100| 2015-07-09T13:56:08.689-0400 I SHARDING [conn15] remotely refreshing metadata for db9.coll9 based on current shard version 2|0||559eb5b8ca4787b9985d1be9, current metadata version is 2|0||559eb5b8ca4787b9985d1be9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.690-0400 m31100| 2015-07-09T13:56:08.690-0400 I SHARDING [conn15] updating metadata for db9.coll9 from shard version 2|0||559eb5b8ca4787b9985d1be9 to shard version 2|1||559eb5b8ca4787b9985d1be9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.691-0400 m31100| 2015-07-09T13:56:08.690-0400 I SHARDING [conn15] collection version was loaded at version 2|1||559eb5b8ca4787b9985d1be9, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.691-0400 m31100| 2015-07-09T13:56:08.690-0400 I SHARDING [conn15] splitChunk accepted at version 2|1||559eb5b8ca4787b9985d1be9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.692-0400 m31100| 2015-07-09T13:56:08.691-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:08.691-0400-559eb5b8792e00bb67274916", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464568691), what: "split", ns: "db9.coll9", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559eb5b8ca4787b9985d1be9') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559eb5b8ca4787b9985d1be9') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.744-0400 m31100| 2015-07-09T13:56:08.744-0400 I SHARDING [conn15] distributed lock 'db9.coll9/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.745-0400 m30999| 2015-07-09T13:56:08.745-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db9.coll9: 0ms sequenceNumber: 48 version: 2|3||559eb5b8ca4787b9985d1be9 based on: 2|1||559eb5b8ca4787b9985d1be9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.746-0400 m31200| 2015-07-09T13:56:08.745-0400 I SHARDING [conn48] received splitChunk request: { splitChunk: "db9.coll9", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5b8ca4787b9985d1be9') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.747-0400 m31200| 2015-07-09T13:56:08.747-0400 I SHARDING [conn48] distributed lock 'db9.coll9/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb5b8d5a107a5b9c0da9c [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.748-0400 m31200| 2015-07-09T13:56:08.747-0400 I SHARDING [conn48] remotely refreshing metadata for db9.coll9 based on current shard version 0|0||559eb5b8ca4787b9985d1be9, current metadata version is 1|1||559eb5b8ca4787b9985d1be9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.749-0400 m31200| 2015-07-09T13:56:08.748-0400 I SHARDING [conn48] updating metadata for db9.coll9 from shard version 0|0||559eb5b8ca4787b9985d1be9 to shard version 2|0||559eb5b8ca4787b9985d1be9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.749-0400 m31200| 2015-07-09T13:56:08.748-0400 I SHARDING [conn48] collection version was loaded at version 2|3||559eb5b8ca4787b9985d1be9, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.749-0400 m31200| 2015-07-09T13:56:08.748-0400 I SHARDING [conn48] splitChunk accepted at version 2|0||559eb5b8ca4787b9985d1be9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.750-0400 m31200| 2015-07-09T13:56:08.750-0400 I SHARDING [conn48] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:08.750-0400-559eb5b8d5a107a5b9c0da9d", server: "bs-osx108-8", clientAddr: "127.0.0.1:62774", time: new Date(1436464568750), what: "split", ns: "db9.coll9", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559eb5b8ca4787b9985d1be9') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559eb5b8ca4787b9985d1be9') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.803-0400 m31200| 2015-07-09T13:56:08.802-0400 I SHARDING [conn48] distributed lock 'db9.coll9/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.804-0400 m30999| 2015-07-09T13:56:08.803-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db9.coll9: 0ms sequenceNumber: 49 version: 2|5||559eb5b8ca4787b9985d1be9 based on: 2|3||559eb5b8ca4787b9985d1be9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.874-0400 Using 5 threads (requested 5) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.926-0400 m30999| 2015-07-09T13:56:08.925-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62815 #57 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.934-0400 m30999| 2015-07-09T13:56:08.934-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62816 #58 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.939-0400 m30998| 2015-07-09T13:56:08.939-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62817 #57 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.943-0400 m30998| 2015-07-09T13:56:08.943-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62818 #58 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.943-0400 m30999| 2015-07-09T13:56:08.943-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62819 #59 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.948-0400 setting random seed: 8341291458345 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.948-0400 setting random seed: 2085605701431 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.949-0400 setting random seed: 2775040282867 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.949-0400 setting random seed: 5253148702904 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.950-0400 setting random seed: 102055878378 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.953-0400 m30998| 2015-07-09T13:56:08.951-0400 I SHARDING [conn57] ChunkManager: time to load chunks for db9.coll9: 0ms sequenceNumber: 11 version: 2|5||559eb5b8ca4787b9985d1be9 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.992-0400 m31200| 2015-07-09T13:56:08.991-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62820 #54 (50 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:08.995-0400 m31200| 2015-07-09T13:56:08.994-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62821 #55 (51 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:09.223-0400 m31200| 2015-07-09T13:56:09.223-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62822 #56 (52 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:09.266-0400 m31100| 2015-07-09T13:56:09.266-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62823 #74 (68 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:10.762-0400 m31200| 2015-07-09T13:56:10.761-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62824 #57 (53 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:13.124-0400 m30999| 2015-07-09T13:56:13.124-0400 I NETWORK [conn59] end connection 127.0.0.1:62819 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:13.174-0400 m30999| 2015-07-09T13:56:13.174-0400 I NETWORK [conn57] end connection 127.0.0.1:62815 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:13.353-0400 m30998| 2015-07-09T13:56:13.353-0400 I NETWORK [conn58] end connection 127.0.0.1:62818 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:13.488-0400 m30998| 2015-07-09T13:56:13.487-0400 I NETWORK [conn57] end connection 127.0.0.1:62817 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:13.969-0400 m30999| 2015-07-09T13:56:13.968-0400 I NETWORK [conn58] end connection 127.0.0.1:62816 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:13.980-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:13.980-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:13.980-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:13.980-0400 jstests/concurrency/fsm_workloads/yield.js: Workload completed in 5095 ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:13.980-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:13.981-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:13.981-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:13.981-0400 m30999| 2015-07-09T13:56:13.980-0400 I COMMAND [conn1] DROP: db9.coll9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:13.981-0400 m30999| 2015-07-09T13:56:13.980-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:13.980-0400-559eb5bdca4787b9985d1beb", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464573980), what: "dropCollection.start", ns: "db9.coll9", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.037-0400 m30999| 2015-07-09T13:56:14.037-0400 I SHARDING [conn1] distributed lock 'db9.coll9/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5beca4787b9985d1bec [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.038-0400 m31100| 2015-07-09T13:56:14.038-0400 I COMMAND [conn15] CMD: drop db9.coll9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.041-0400 m31200| 2015-07-09T13:56:14.041-0400 I COMMAND [conn48] CMD: drop db9.coll9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.042-0400 m31102| 2015-07-09T13:56:14.042-0400 I COMMAND [repl writer worker 10] CMD: drop db9.coll9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.043-0400 m31101| 2015-07-09T13:56:14.042-0400 I COMMAND [repl writer worker 12] CMD: drop db9.coll9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.045-0400 m31201| 2015-07-09T13:56:14.045-0400 I COMMAND [repl writer worker 9] CMD: drop db9.coll9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.046-0400 m31202| 2015-07-09T13:56:14.045-0400 I COMMAND [repl writer worker 8] CMD: drop db9.coll9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.096-0400 m31100| 2015-07-09T13:56:14.096-0400 I SHARDING [conn15] remotely refreshing metadata for db9.coll9 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559eb5b8ca4787b9985d1be9, current metadata version is 2|3||559eb5b8ca4787b9985d1be9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.098-0400 m31100| 2015-07-09T13:56:14.097-0400 W SHARDING [conn15] no chunks found when reloading db9.coll9, previous version was 0|0||559eb5b8ca4787b9985d1be9, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.098-0400 m31100| 2015-07-09T13:56:14.098-0400 I SHARDING [conn15] dropping metadata for db9.coll9 at shard version 2|3||559eb5b8ca4787b9985d1be9, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.099-0400 m31200| 2015-07-09T13:56:14.099-0400 I SHARDING [conn48] remotely refreshing metadata for db9.coll9 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559eb5b8ca4787b9985d1be9, current metadata version is 2|5||559eb5b8ca4787b9985d1be9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.100-0400 m31200| 2015-07-09T13:56:14.100-0400 W SHARDING [conn48] no chunks found when reloading db9.coll9, previous version was 0|0||559eb5b8ca4787b9985d1be9, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.100-0400 m31200| 2015-07-09T13:56:14.100-0400 I SHARDING [conn48] dropping metadata for db9.coll9 at shard version 2|5||559eb5b8ca4787b9985d1be9, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.101-0400 m30999| 2015-07-09T13:56:14.101-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:14.101-0400-559eb5beca4787b9985d1bed", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464574101), what: "dropCollection", ns: "db9.coll9", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.157-0400 m30999| 2015-07-09T13:56:14.156-0400 I SHARDING [conn1] distributed lock 'db9.coll9/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.213-0400 m30999| 2015-07-09T13:56:14.212-0400 I COMMAND [conn1] DROP DATABASE: db9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.213-0400 m30999| 2015-07-09T13:56:14.212-0400 I SHARDING [conn1] DBConfig::dropDatabase: db9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.213-0400 m30999| 2015-07-09T13:56:14.212-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:14.212-0400-559eb5beca4787b9985d1bee", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464574212), what: "dropDatabase.start", ns: "db9", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.319-0400 m30999| 2015-07-09T13:56:14.319-0400 I SHARDING [conn1] DBConfig::dropDatabase: db9 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.320-0400 m31100| 2015-07-09T13:56:14.320-0400 I COMMAND [conn28] dropDatabase db9 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.320-0400 m31100| 2015-07-09T13:56:14.320-0400 I COMMAND [conn28] dropDatabase db9 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.321-0400 m30999| 2015-07-09T13:56:14.320-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:14.320-0400-559eb5beca4787b9985d1bef", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464574320), what: "dropDatabase", ns: "db9", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.321-0400 m31101| 2015-07-09T13:56:14.321-0400 I COMMAND [repl writer worker 13] dropDatabase db9 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.321-0400 m31102| 2015-07-09T13:56:14.321-0400 I COMMAND [repl writer worker 3] dropDatabase db9 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.321-0400 m31101| 2015-07-09T13:56:14.321-0400 I COMMAND [repl writer worker 13] dropDatabase db9 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.321-0400 m31102| 2015-07-09T13:56:14.321-0400 I COMMAND [repl writer worker 3] dropDatabase db9 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.412-0400 m31100| 2015-07-09T13:56:14.412-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.416-0400 m31101| 2015-07-09T13:56:14.415-0400 I COMMAND [repl writer worker 9] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.416-0400 m31102| 2015-07-09T13:56:14.416-0400 I COMMAND [repl writer worker 5] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.452-0400 m31200| 2015-07-09T13:56:14.452-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.454-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.455-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.455-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.455-0400 jstests/concurrency/fsm_workloads/indexed_insert_large_noindex.js [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.455-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.455-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.455-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.455-0400 m31202| 2015-07-09T13:56:14.455-0400 I COMMAND [repl writer worker 13] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.456-0400 m31201| 2015-07-09T13:56:14.455-0400 I COMMAND [repl writer worker 5] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.462-0400 m30999| 2015-07-09T13:56:14.462-0400 I SHARDING [conn1] distributed lock 'db10/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5beca4787b9985d1bf0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.467-0400 m30999| 2015-07-09T13:56:14.466-0400 I SHARDING [conn1] Placing [db10] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.467-0400 m30999| 2015-07-09T13:56:14.466-0400 I SHARDING [conn1] Enabling sharding for database [db10] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.521-0400 m30999| 2015-07-09T13:56:14.520-0400 I SHARDING [conn1] distributed lock 'db10/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.538-0400 m31100| 2015-07-09T13:56:14.537-0400 I INDEX [conn70] build index on: db10.coll10 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db10.coll10" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.538-0400 m31100| 2015-07-09T13:56:14.538-0400 I INDEX [conn70] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.544-0400 m31100| 2015-07-09T13:56:14.543-0400 I INDEX [conn70] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.545-0400 m30999| 2015-07-09T13:56:14.544-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db10.coll10", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.548-0400 m30999| 2015-07-09T13:56:14.548-0400 I SHARDING [conn1] distributed lock 'db10.coll10/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5beca4787b9985d1bf1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.549-0400 m30999| 2015-07-09T13:56:14.549-0400 I SHARDING [conn1] enable sharding on: db10.coll10 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.550-0400 m30999| 2015-07-09T13:56:14.549-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:14.549-0400-559eb5beca4787b9985d1bf2", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464574549), what: "shardCollection.start", ns: "db10.coll10", details: { shardKey: { _id: "hashed" }, collection: "db10.coll10", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.554-0400 m31102| 2015-07-09T13:56:14.553-0400 I INDEX [repl writer worker 4] build index on: db10.coll10 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db10.coll10" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.555-0400 m31102| 2015-07-09T13:56:14.553-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.559-0400 m31101| 2015-07-09T13:56:14.559-0400 I INDEX [repl writer worker 4] build index on: db10.coll10 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db10.coll10" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.560-0400 m31101| 2015-07-09T13:56:14.559-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.562-0400 m31102| 2015-07-09T13:56:14.562-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.563-0400 m31101| 2015-07-09T13:56:14.563-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.603-0400 m30999| 2015-07-09T13:56:14.602-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db10.coll10 using new epoch 559eb5beca4787b9985d1bf3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.710-0400 m30999| 2015-07-09T13:56:14.709-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db10.coll10: 0ms sequenceNumber: 50 version: 1|1||559eb5beca4787b9985d1bf3 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.765-0400 m30999| 2015-07-09T13:56:14.765-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db10.coll10: 0ms sequenceNumber: 51 version: 1|1||559eb5beca4787b9985d1bf3 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.864-0400 m31100| 2015-07-09T13:56:14.767-0400 I SHARDING [conn47] remotely refreshing metadata for db10.coll10 with requested shard version 1|1||559eb5beca4787b9985d1bf3, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.864-0400 m31100| 2015-07-09T13:56:14.768-0400 I SHARDING [conn47] collection db10.coll10 was previously unsharded, new metadata loaded with shard version 1|1||559eb5beca4787b9985d1bf3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.864-0400 m31100| 2015-07-09T13:56:14.768-0400 I SHARDING [conn47] collection version was loaded at version 1|1||559eb5beca4787b9985d1bf3, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.865-0400 m30999| 2015-07-09T13:56:14.769-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:14.769-0400-559eb5beca4787b9985d1bf4", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464574769), what: "shardCollection", ns: "db10.coll10", details: { version: "1|1||559eb5beca4787b9985d1bf3" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.865-0400 m30999| 2015-07-09T13:56:14.822-0400 I SHARDING [conn1] distributed lock 'db10.coll10/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.865-0400 m30999| 2015-07-09T13:56:14.823-0400 I SHARDING [conn1] moving chunk ns: db10.coll10 moving ( ns: db10.coll10, shard: test-rs0, lastmod: 1|1||000000000000000000000000, min: { _id: 0 }, max: { _id: MaxKey }) test-rs0 -> test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.865-0400 m31100| 2015-07-09T13:56:14.823-0400 I SHARDING [conn15] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.866-0400 m31100| 2015-07-09T13:56:14.824-0400 I SHARDING [conn15] received moveChunk request: { moveChunk: "db10.coll10", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb5beca4787b9985d1bf3') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.866-0400 m31100| 2015-07-09T13:56:14.827-0400 I SHARDING [conn15] distributed lock 'db10.coll10/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb5be792e00bb67274918 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.866-0400 m31100| 2015-07-09T13:56:14.827-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:14.827-0400-559eb5be792e00bb67274919", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464574827), what: "moveChunk.start", ns: "db10.coll10", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.882-0400 m31100| 2015-07-09T13:56:14.881-0400 I SHARDING [conn15] remotely refreshing metadata for db10.coll10 based on current shard version 1|1||559eb5beca4787b9985d1bf3, current metadata version is 1|1||559eb5beca4787b9985d1bf3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.883-0400 m31100| 2015-07-09T13:56:14.883-0400 I SHARDING [conn15] metadata of collection db10.coll10 already up to date (shard version : 1|1||559eb5beca4787b9985d1bf3, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.883-0400 m31100| 2015-07-09T13:56:14.883-0400 I SHARDING [conn15] moveChunk request accepted at version 1|1||559eb5beca4787b9985d1bf3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.883-0400 m31100| 2015-07-09T13:56:14.883-0400 I SHARDING [conn15] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.884-0400 m31200| 2015-07-09T13:56:14.883-0400 I SHARDING [conn16] remotely refreshing metadata for db10.coll10, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.885-0400 m31200| 2015-07-09T13:56:14.885-0400 I SHARDING [conn16] collection db10.coll10 was previously unsharded, new metadata loaded with shard version 0|0||559eb5beca4787b9985d1bf3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.886-0400 m31200| 2015-07-09T13:56:14.885-0400 I SHARDING [conn16] collection version was loaded at version 1|1||559eb5beca4787b9985d1bf3, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.886-0400 m31200| 2015-07-09T13:56:14.886-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: 0 } -> { _id: MaxKey } for collection db10.coll10 from test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 at epoch 559eb5beca4787b9985d1bf3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.888-0400 m31100| 2015-07-09T13:56:14.888-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db10.coll10", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.891-0400 m31100| 2015-07-09T13:56:14.891-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db10.coll10", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.897-0400 m31100| 2015-07-09T13:56:14.896-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db10.coll10", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.899-0400 m31200| 2015-07-09T13:56:14.898-0400 I INDEX [migrateThread] build index on: db10.coll10 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db10.coll10" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.899-0400 m31200| 2015-07-09T13:56:14.899-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.906-0400 m31100| 2015-07-09T13:56:14.906-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db10.coll10", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.911-0400 m31200| 2015-07-09T13:56:14.910-0400 I INDEX [migrateThread] build index on: db10.coll10 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db10.coll10" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.911-0400 m31200| 2015-07-09T13:56:14.910-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.920-0400 m31200| 2015-07-09T13:56:14.920-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.921-0400 m31200| 2015-07-09T13:56:14.920-0400 I SHARDING [migrateThread] Deleter starting delete for: db10.coll10 from { _id: 0 } -> { _id: MaxKey }, with opId: 16245 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.921-0400 m31200| 2015-07-09T13:56:14.921-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db10.coll10 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.923-0400 m31100| 2015-07-09T13:56:14.923-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db10.coll10", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.928-0400 m31201| 2015-07-09T13:56:14.927-0400 I INDEX [repl writer worker 14] build index on: db10.coll10 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db10.coll10" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.929-0400 m31201| 2015-07-09T13:56:14.928-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.930-0400 m31202| 2015-07-09T13:56:14.930-0400 I INDEX [repl writer worker 7] build index on: db10.coll10 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db10.coll10" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.931-0400 m31202| 2015-07-09T13:56:14.930-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.934-0400 m31201| 2015-07-09T13:56:14.933-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.935-0400 m31200| 2015-07-09T13:56:14.935-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.936-0400 m31200| 2015-07-09T13:56:14.935-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db10.coll10' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.936-0400 m31202| 2015-07-09T13:56:14.936-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.957-0400 m31100| 2015-07-09T13:56:14.957-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db10.coll10", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.958-0400 m31100| 2015-07-09T13:56:14.957-0400 I SHARDING [conn15] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.958-0400 m31100| 2015-07-09T13:56:14.958-0400 I SHARDING [conn15] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.958-0400 m31100| 2015-07-09T13:56:14.958-0400 I SHARDING [conn15] moveChunk setting version to: 2|0||559eb5beca4787b9985d1bf3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.959-0400 m31200| 2015-07-09T13:56:14.959-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db10.coll10' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:14.960-0400 m31200| 2015-07-09T13:56:14.959-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:14.959-0400-559eb5bed5a107a5b9c0da9e", server: "bs-osx108-8", clientAddr: "", time: new Date(1436464574959), what: "moveChunk.to", ns: "db10.coll10", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 5: 34, step 2 of 5: 13, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 24, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.013-0400 m31100| 2015-07-09T13:56:15.013-0400 I SHARDING [conn15] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db10.coll10", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.013-0400 m31100| 2015-07-09T13:56:15.013-0400 I SHARDING [conn15] moveChunk updating self version to: 2|1||559eb5beca4787b9985d1bf3 through { _id: MinKey } -> { _id: 0 } for collection 'db10.coll10' [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.015-0400 m31100| 2015-07-09T13:56:15.014-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:15.014-0400-559eb5bf792e00bb6727491a", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464575014), what: "moveChunk.commit", ns: "db10.coll10", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.067-0400 m31100| 2015-07-09T13:56:15.067-0400 I SHARDING [conn15] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.067-0400 m31100| 2015-07-09T13:56:15.067-0400 I SHARDING [conn15] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.068-0400 m31100| 2015-07-09T13:56:15.067-0400 I SHARDING [conn15] Deleter starting delete for: db10.coll10 from { _id: 0 } -> { _id: MaxKey }, with opId: 19044 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.068-0400 m31100| 2015-07-09T13:56:15.067-0400 I SHARDING [conn15] rangeDeleter deleted 0 documents for db10.coll10 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.068-0400 m31100| 2015-07-09T13:56:15.067-0400 I SHARDING [conn15] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.068-0400 m31100| 2015-07-09T13:56:15.068-0400 I SHARDING [conn15] distributed lock 'db10.coll10/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.069-0400 m31100| 2015-07-09T13:56:15.068-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:15.068-0400-559eb5bf792e00bb6727491b", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464575068), what: "moveChunk.from", ns: "db10.coll10", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 6: 0, step 2 of 6: 58, step 3 of 6: 2, step 4 of 6: 71, step 5 of 6: 109, step 6 of 6: 0, to: "test-rs1", from: "test-rs0", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.122-0400 m31100| 2015-07-09T13:56:15.121-0400 I COMMAND [conn15] command db10.coll10 command: moveChunk { moveChunk: "db10.coll10", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb5beca4787b9985d1bf3') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 298ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.124-0400 m30999| 2015-07-09T13:56:15.124-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db10.coll10: 0ms sequenceNumber: 52 version: 2|1||559eb5beca4787b9985d1bf3 based on: 1|1||559eb5beca4787b9985d1bf3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.126-0400 m31100| 2015-07-09T13:56:15.125-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db10.coll10", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5beca4787b9985d1bf3') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.129-0400 m31100| 2015-07-09T13:56:15.129-0400 I SHARDING [conn15] distributed lock 'db10.coll10/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb5bf792e00bb6727491c [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.130-0400 m31100| 2015-07-09T13:56:15.129-0400 I SHARDING [conn15] remotely refreshing metadata for db10.coll10 based on current shard version 2|0||559eb5beca4787b9985d1bf3, current metadata version is 2|0||559eb5beca4787b9985d1bf3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.131-0400 m31100| 2015-07-09T13:56:15.131-0400 I SHARDING [conn15] updating metadata for db10.coll10 from shard version 2|0||559eb5beca4787b9985d1bf3 to shard version 2|1||559eb5beca4787b9985d1bf3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.131-0400 m31100| 2015-07-09T13:56:15.131-0400 I SHARDING [conn15] collection version was loaded at version 2|1||559eb5beca4787b9985d1bf3, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.131-0400 m31100| 2015-07-09T13:56:15.131-0400 I SHARDING [conn15] splitChunk accepted at version 2|1||559eb5beca4787b9985d1bf3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.133-0400 m31100| 2015-07-09T13:56:15.132-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:15.132-0400-559eb5bf792e00bb6727491d", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464575132), what: "split", ns: "db10.coll10", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559eb5beca4787b9985d1bf3') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559eb5beca4787b9985d1bf3') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.186-0400 m31100| 2015-07-09T13:56:15.186-0400 I SHARDING [conn15] distributed lock 'db10.coll10/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.189-0400 m30999| 2015-07-09T13:56:15.188-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db10.coll10: 0ms sequenceNumber: 53 version: 2|3||559eb5beca4787b9985d1bf3 based on: 2|1||559eb5beca4787b9985d1bf3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.190-0400 m31200| 2015-07-09T13:56:15.189-0400 I SHARDING [conn48] received splitChunk request: { splitChunk: "db10.coll10", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5beca4787b9985d1bf3') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.193-0400 m31200| 2015-07-09T13:56:15.193-0400 I SHARDING [conn48] distributed lock 'db10.coll10/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb5bfd5a107a5b9c0da9f [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.193-0400 m31200| 2015-07-09T13:56:15.193-0400 I SHARDING [conn48] remotely refreshing metadata for db10.coll10 based on current shard version 0|0||559eb5beca4787b9985d1bf3, current metadata version is 1|1||559eb5beca4787b9985d1bf3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.195-0400 m31200| 2015-07-09T13:56:15.194-0400 I SHARDING [conn48] updating metadata for db10.coll10 from shard version 0|0||559eb5beca4787b9985d1bf3 to shard version 2|0||559eb5beca4787b9985d1bf3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.195-0400 m31200| 2015-07-09T13:56:15.195-0400 I SHARDING [conn48] collection version was loaded at version 2|3||559eb5beca4787b9985d1bf3, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.195-0400 m31200| 2015-07-09T13:56:15.195-0400 I SHARDING [conn48] splitChunk accepted at version 2|0||559eb5beca4787b9985d1bf3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.196-0400 m31200| 2015-07-09T13:56:15.196-0400 I SHARDING [conn48] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:15.196-0400-559eb5bfd5a107a5b9c0daa0", server: "bs-osx108-8", clientAddr: "127.0.0.1:62774", time: new Date(1436464575196), what: "split", ns: "db10.coll10", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559eb5beca4787b9985d1bf3') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559eb5beca4787b9985d1bf3') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.250-0400 m31200| 2015-07-09T13:56:15.250-0400 I SHARDING [conn48] distributed lock 'db10.coll10/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.253-0400 m30999| 2015-07-09T13:56:15.252-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db10.coll10: 1ms sequenceNumber: 54 version: 2|5||559eb5beca4787b9985d1bf3 based on: 2|3||559eb5beca4787b9985d1bf3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.260-0400 m31200| 2015-07-09T13:56:15.260-0400 I INDEX [conn40] build index on: db10.coll10 properties: { v: 1, key: { indexed_insert_large: 1.0 }, name: "indexed_insert_large_1", ns: "db10.coll10" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.261-0400 m31200| 2015-07-09T13:56:15.260-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.261-0400 m31100| 2015-07-09T13:56:15.261-0400 I INDEX [conn47] build index on: db10.coll10 properties: { v: 1, key: { indexed_insert_large: 1.0 }, name: "indexed_insert_large_1", ns: "db10.coll10" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.261-0400 m31100| 2015-07-09T13:56:15.261-0400 I INDEX [conn47] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.267-0400 m31200| 2015-07-09T13:56:15.267-0400 I INDEX [conn40] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.268-0400 m31100| 2015-07-09T13:56:15.268-0400 I INDEX [conn47] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.270-0400 m31100| 2015-07-09T13:56:15.270-0400 I COMMAND [conn15] CMD: dropIndexes db10.coll10 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.270-0400 m31200| 2015-07-09T13:56:15.270-0400 I COMMAND [conn48] CMD: dropIndexes db10.coll10 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.274-0400 Using 20 threads (requested 20) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.285-0400 m31202| 2015-07-09T13:56:15.285-0400 I INDEX [repl writer worker 1] build index on: db10.coll10 properties: { v: 1, key: { indexed_insert_large: 1.0 }, name: "indexed_insert_large_1", ns: "db10.coll10" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.286-0400 m31202| 2015-07-09T13:56:15.285-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.288-0400 m31201| 2015-07-09T13:56:15.287-0400 I INDEX [repl writer worker 12] build index on: db10.coll10 properties: { v: 1, key: { indexed_insert_large: 1.0 }, name: "indexed_insert_large_1", ns: "db10.coll10" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.288-0400 m31201| 2015-07-09T13:56:15.287-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.307-0400 m31102| 2015-07-09T13:56:15.306-0400 I INDEX [repl writer worker 15] build index on: db10.coll10 properties: { v: 1, key: { indexed_insert_large: 1.0 }, name: "indexed_insert_large_1", ns: "db10.coll10" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.307-0400 m31102| 2015-07-09T13:56:15.306-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.307-0400 m31101| 2015-07-09T13:56:15.307-0400 I INDEX [repl writer worker 6] build index on: db10.coll10 properties: { v: 1, key: { indexed_insert_large: 1.0 }, name: "indexed_insert_large_1", ns: "db10.coll10" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.307-0400 m31101| 2015-07-09T13:56:15.307-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.381-0400 m31201| 2015-07-09T13:56:15.379-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.389-0400 m31202| 2015-07-09T13:56:15.382-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.390-0400 m31102| 2015-07-09T13:56:15.383-0400 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.403-0400 m31101| 2015-07-09T13:56:15.403-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.412-0400 m31102| 2015-07-09T13:56:15.408-0400 I COMMAND [repl writer worker 2] CMD: dropIndexes db10.coll10 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.416-0400 m31202| 2015-07-09T13:56:15.413-0400 I COMMAND [repl writer worker 11] CMD: dropIndexes db10.coll10 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.433-0400 m30998| 2015-07-09T13:56:15.425-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62827 #59 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.434-0400 m31101| 2015-07-09T13:56:15.433-0400 I COMMAND [repl writer worker 0] CMD: dropIndexes db10.coll10 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.453-0400 m30999| 2015-07-09T13:56:15.453-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62828 #60 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.463-0400 m30999| 2015-07-09T13:56:15.463-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62829 #61 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.473-0400 m30998| 2015-07-09T13:56:15.467-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62830 #60 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.474-0400 m30999| 2015-07-09T13:56:15.474-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62832 #62 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.478-0400 m30998| 2015-07-09T13:56:15.477-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62831 #61 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.484-0400 m30999| 2015-07-09T13:56:15.484-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62833 #63 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.523-0400 m30998| 2015-07-09T13:56:15.521-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62834 #62 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.524-0400 m30999| 2015-07-09T13:56:15.524-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62835 #64 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.525-0400 m30998| 2015-07-09T13:56:15.525-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62836 #63 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.526-0400 m30999| 2015-07-09T13:56:15.525-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62837 #65 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.527-0400 m30998| 2015-07-09T13:56:15.527-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62838 #64 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.527-0400 m30999| 2015-07-09T13:56:15.527-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62842 #66 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.527-0400 m30998| 2015-07-09T13:56:15.527-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62839 #65 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.535-0400 m30998| 2015-07-09T13:56:15.535-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62840 #66 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.535-0400 m30998| 2015-07-09T13:56:15.535-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62841 #67 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.539-0400 m30998| 2015-07-09T13:56:15.539-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62843 #68 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.542-0400 m30999| 2015-07-09T13:56:15.542-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62844 #67 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.543-0400 m30999| 2015-07-09T13:56:15.543-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62845 #68 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.543-0400 m30999| 2015-07-09T13:56:15.543-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62846 #69 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.558-0400 setting random seed: 7359218802303 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.558-0400 setting random seed: 793559779413 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.558-0400 setting random seed: 8868326502852 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.558-0400 setting random seed: 9963602838106 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.558-0400 setting random seed: 1758931907825 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.558-0400 setting random seed: 124995154328 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.561-0400 setting random seed: 4409754751250 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.563-0400 setting random seed: 8272978030145 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.563-0400 setting random seed: 6390467900782 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.563-0400 setting random seed: 2364595639519 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.563-0400 setting random seed: 7601735261268 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.563-0400 setting random seed: 8577986387535 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.565-0400 m30998| 2015-07-09T13:56:15.565-0400 I SHARDING [conn64] ChunkManager: time to load chunks for db10.coll10: 0ms sequenceNumber: 12 version: 2|5||559eb5beca4787b9985d1bf3 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.570-0400 setting random seed: 217282450757 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.571-0400 setting random seed: 2368588582612 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.571-0400 setting random seed: 4748138771392 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.571-0400 setting random seed: 3941817395389 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.572-0400 setting random seed: 1968367951922 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.585-0400 m31201| 2015-07-09T13:56:15.584-0400 I COMMAND [repl writer worker 6] CMD: dropIndexes db10.coll10 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.590-0400 setting random seed: 1866151588037 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.591-0400 setting random seed: 2360183359123 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.611-0400 setting random seed: 9670290825888 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.670-0400 m31200| 2015-07-09T13:56:15.669-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62847 #58 (54 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:15.684-0400 m31200| 2015-07-09T13:56:15.684-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62848 #59 (55 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.061-0400 m31200| 2015-07-09T13:56:16.061-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62849 #60 (56 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.081-0400 m31200| 2015-07-09T13:56:16.080-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62850 #61 (57 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.108-0400 m30999| 2015-07-09T13:56:16.107-0400 I NETWORK [conn61] end connection 127.0.0.1:62829 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.126-0400 m30999| 2015-07-09T13:56:16.126-0400 I NETWORK [conn62] end connection 127.0.0.1:62832 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.132-0400 m30999| 2015-07-09T13:56:16.131-0400 I NETWORK [conn60] end connection 127.0.0.1:62828 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.139-0400 m30998| 2015-07-09T13:56:16.139-0400 I NETWORK [conn61] end connection 127.0.0.1:62831 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.144-0400 m30998| 2015-07-09T13:56:16.144-0400 I NETWORK [conn68] end connection 127.0.0.1:62843 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.151-0400 m30998| 2015-07-09T13:56:16.149-0400 I NETWORK [conn62] end connection 127.0.0.1:62834 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.151-0400 m30998| 2015-07-09T13:56:16.151-0400 I NETWORK [conn60] end connection 127.0.0.1:62830 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.165-0400 m30998| 2015-07-09T13:56:16.164-0400 I NETWORK [conn65] end connection 127.0.0.1:62839 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.173-0400 m30999| 2015-07-09T13:56:16.173-0400 I NETWORK [conn68] end connection 127.0.0.1:62845 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.182-0400 m30999| 2015-07-09T13:56:16.181-0400 I NETWORK [conn66] end connection 127.0.0.1:62842 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.208-0400 m30998| 2015-07-09T13:56:16.208-0400 I NETWORK [conn63] end connection 127.0.0.1:62836 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.211-0400 m30998| 2015-07-09T13:56:16.211-0400 I NETWORK [conn67] end connection 127.0.0.1:62841 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.212-0400 m30999| 2015-07-09T13:56:16.212-0400 I NETWORK [conn65] end connection 127.0.0.1:62837 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.224-0400 m30999| 2015-07-09T13:56:16.224-0400 I NETWORK [conn64] end connection 127.0.0.1:62835 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.230-0400 m30998| 2015-07-09T13:56:16.229-0400 I NETWORK [conn64] end connection 127.0.0.1:62838 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.301-0400 m30999| 2015-07-09T13:56:16.301-0400 I NETWORK [conn63] end connection 127.0.0.1:62833 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.304-0400 m30998| 2015-07-09T13:56:16.304-0400 I NETWORK [conn66] end connection 127.0.0.1:62840 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.311-0400 m30999| 2015-07-09T13:56:16.308-0400 I NETWORK [conn67] end connection 127.0.0.1:62844 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.311-0400 m30998| 2015-07-09T13:56:16.308-0400 I NETWORK [conn59] end connection 127.0.0.1:62827 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.331-0400 m30999| 2015-07-09T13:56:16.330-0400 I NETWORK [conn69] end connection 127.0.0.1:62846 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.397-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.397-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.398-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.398-0400 jstests/concurrency/fsm_workloads/indexed_insert_large_noindex.js: Workload completed in 1124 ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.398-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.398-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.398-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.398-0400 m30999| 2015-07-09T13:56:16.398-0400 I COMMAND [conn1] DROP: db10.coll10 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.398-0400 m30999| 2015-07-09T13:56:16.398-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:16.398-0400-559eb5c0ca4787b9985d1bf5", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464576398), what: "dropCollection.start", ns: "db10.coll10", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.440-0400 m30999| 2015-07-09T13:56:16.439-0400 I SHARDING [conn1] distributed lock 'db10.coll10/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5c0ca4787b9985d1bf6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.441-0400 m31100| 2015-07-09T13:56:16.441-0400 I COMMAND [conn15] CMD: drop db10.coll10 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.444-0400 m31200| 2015-07-09T13:56:16.443-0400 I COMMAND [conn48] CMD: drop db10.coll10 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.445-0400 m31101| 2015-07-09T13:56:16.445-0400 I COMMAND [repl writer worker 11] CMD: drop db10.coll10 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.445-0400 m31102| 2015-07-09T13:56:16.445-0400 I COMMAND [repl writer worker 9] CMD: drop db10.coll10 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.448-0400 m31201| 2015-07-09T13:56:16.447-0400 I COMMAND [repl writer worker 13] CMD: drop db10.coll10 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.449-0400 m31202| 2015-07-09T13:56:16.448-0400 I COMMAND [repl writer worker 13] CMD: drop db10.coll10 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.501-0400 m31100| 2015-07-09T13:56:16.501-0400 I SHARDING [conn15] remotely refreshing metadata for db10.coll10 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559eb5beca4787b9985d1bf3, current metadata version is 2|3||559eb5beca4787b9985d1bf3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.503-0400 m31100| 2015-07-09T13:56:16.502-0400 W SHARDING [conn15] no chunks found when reloading db10.coll10, previous version was 0|0||559eb5beca4787b9985d1bf3, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.503-0400 m31100| 2015-07-09T13:56:16.503-0400 I SHARDING [conn15] dropping metadata for db10.coll10 at shard version 2|3||559eb5beca4787b9985d1bf3, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.504-0400 m31200| 2015-07-09T13:56:16.504-0400 I SHARDING [conn48] remotely refreshing metadata for db10.coll10 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559eb5beca4787b9985d1bf3, current metadata version is 2|5||559eb5beca4787b9985d1bf3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.505-0400 m31200| 2015-07-09T13:56:16.505-0400 W SHARDING [conn48] no chunks found when reloading db10.coll10, previous version was 0|0||559eb5beca4787b9985d1bf3, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.506-0400 m31200| 2015-07-09T13:56:16.505-0400 I SHARDING [conn48] dropping metadata for db10.coll10 at shard version 2|5||559eb5beca4787b9985d1bf3, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.506-0400 m30999| 2015-07-09T13:56:16.506-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:16.506-0400-559eb5c0ca4787b9985d1bf7", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464576506), what: "dropCollection", ns: "db10.coll10", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.561-0400 m30999| 2015-07-09T13:56:16.561-0400 I SHARDING [conn1] distributed lock 'db10.coll10/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.617-0400 m30999| 2015-07-09T13:56:16.617-0400 I COMMAND [conn1] DROP DATABASE: db10 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.618-0400 m30999| 2015-07-09T13:56:16.617-0400 I SHARDING [conn1] DBConfig::dropDatabase: db10 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.618-0400 m30999| 2015-07-09T13:56:16.617-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:16.617-0400-559eb5c0ca4787b9985d1bf8", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464576617), what: "dropDatabase.start", ns: "db10", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.726-0400 m30999| 2015-07-09T13:56:16.725-0400 I SHARDING [conn1] DBConfig::dropDatabase: db10 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.726-0400 m31100| 2015-07-09T13:56:16.725-0400 I COMMAND [conn28] dropDatabase db10 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.726-0400 m31100| 2015-07-09T13:56:16.726-0400 I COMMAND [conn28] dropDatabase db10 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.727-0400 m30999| 2015-07-09T13:56:16.726-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:16.726-0400-559eb5c0ca4787b9985d1bf9", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464576726), what: "dropDatabase", ns: "db10", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.727-0400 m31101| 2015-07-09T13:56:16.727-0400 I COMMAND [repl writer worker 5] dropDatabase db10 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.727-0400 m31101| 2015-07-09T13:56:16.727-0400 I COMMAND [repl writer worker 5] dropDatabase db10 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.728-0400 m31102| 2015-07-09T13:56:16.727-0400 I COMMAND [repl writer worker 0] dropDatabase db10 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.728-0400 m31102| 2015-07-09T13:56:16.727-0400 I COMMAND [repl writer worker 0] dropDatabase db10 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.817-0400 m31100| 2015-07-09T13:56:16.817-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.821-0400 m31101| 2015-07-09T13:56:16.821-0400 I COMMAND [repl writer worker 12] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.821-0400 m31102| 2015-07-09T13:56:16.821-0400 I COMMAND [repl writer worker 3] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.848-0400 m31200| 2015-07-09T13:56:16.848-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.851-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.851-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.852-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.852-0400 jstests/concurrency/fsm_workloads/explain_count.js [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.852-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.852-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.852-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.852-0400 m31201| 2015-07-09T13:56:16.852-0400 I COMMAND [repl writer worker 15] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.852-0400 m31202| 2015-07-09T13:56:16.852-0400 I COMMAND [repl writer worker 4] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.857-0400 m30999| 2015-07-09T13:56:16.857-0400 I SHARDING [conn1] distributed lock 'db11/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5c0ca4787b9985d1bfa [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.863-0400 m30999| 2015-07-09T13:56:16.863-0400 I SHARDING [conn1] Placing [db11] on: test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.863-0400 m30999| 2015-07-09T13:56:16.863-0400 I SHARDING [conn1] Enabling sharding for database [db11] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.916-0400 m30999| 2015-07-09T13:56:16.915-0400 I SHARDING [conn1] distributed lock 'db11/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.939-0400 m31200| 2015-07-09T13:56:16.939-0400 I INDEX [conn53] build index on: db11.coll11 properties: { v: 1, key: { j: 1.0 }, name: "j_1", ns: "db11.coll11" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.939-0400 m31200| 2015-07-09T13:56:16.939-0400 I INDEX [conn53] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.949-0400 m31200| 2015-07-09T13:56:16.949-0400 I INDEX [conn53] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.952-0400 m30999| 2015-07-09T13:56:16.951-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db11.coll11", key: { j: 1.0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.955-0400 m30999| 2015-07-09T13:56:16.955-0400 I SHARDING [conn1] distributed lock 'db11.coll11/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5c0ca4787b9985d1bfb [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.956-0400 m30999| 2015-07-09T13:56:16.955-0400 I SHARDING [conn1] enable sharding on: db11.coll11 with shard key: { j: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.956-0400 m30999| 2015-07-09T13:56:16.955-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:16.955-0400-559eb5c0ca4787b9985d1bfc", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464576955), what: "shardCollection.start", ns: "db11.coll11", details: { shardKey: { j: 1.0 }, collection: "db11.coll11", primary: "test-rs1:test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", initShards: [], numChunks: 1 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.961-0400 m31202| 2015-07-09T13:56:16.961-0400 I INDEX [repl writer worker 2] build index on: db11.coll11 properties: { v: 1, key: { j: 1.0 }, name: "j_1", ns: "db11.coll11" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.962-0400 m31202| 2015-07-09T13:56:16.961-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.963-0400 m31201| 2015-07-09T13:56:16.963-0400 I INDEX [repl writer worker 14] build index on: db11.coll11 properties: { v: 1, key: { j: 1.0 }, name: "j_1", ns: "db11.coll11" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.964-0400 m31201| 2015-07-09T13:56:16.963-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.967-0400 m31202| 2015-07-09T13:56:16.967-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:16.971-0400 m31201| 2015-07-09T13:56:16.970-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.010-0400 m30999| 2015-07-09T13:56:17.009-0400 I SHARDING [conn1] going to create 1 chunk(s) for: db11.coll11 using new epoch 559eb5c1ca4787b9985d1bfd [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.064-0400 m30999| 2015-07-09T13:56:17.064-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db11.coll11: 0ms sequenceNumber: 55 version: 1|0||559eb5c1ca4787b9985d1bfd based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.120-0400 m30999| 2015-07-09T13:56:17.119-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db11.coll11: 0ms sequenceNumber: 56 version: 1|0||559eb5c1ca4787b9985d1bfd based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.122-0400 m31200| 2015-07-09T13:56:17.122-0400 I SHARDING [conn40] remotely refreshing metadata for db11.coll11 with requested shard version 1|0||559eb5c1ca4787b9985d1bfd, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.124-0400 m31200| 2015-07-09T13:56:17.124-0400 I SHARDING [conn40] collection db11.coll11 was previously unsharded, new metadata loaded with shard version 1|0||559eb5c1ca4787b9985d1bfd [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.124-0400 m31200| 2015-07-09T13:56:17.124-0400 I SHARDING [conn40] collection version was loaded at version 1|0||559eb5c1ca4787b9985d1bfd, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.124-0400 m30999| 2015-07-09T13:56:17.124-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:17.124-0400-559eb5c1ca4787b9985d1bfe", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464577124), what: "shardCollection", ns: "db11.coll11", details: { version: "1|0||559eb5c1ca4787b9985d1bfd" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.178-0400 m30999| 2015-07-09T13:56:17.178-0400 I SHARDING [conn1] distributed lock 'db11.coll11/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.179-0400 Using 10 threads (requested 10) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.252-0400 m30998| 2015-07-09T13:56:17.248-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62851 #69 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.259-0400 m30998| 2015-07-09T13:56:17.259-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62852 #70 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.263-0400 m30999| 2015-07-09T13:56:17.260-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62853 #70 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.275-0400 m30998| 2015-07-09T13:56:17.274-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62854 #71 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.284-0400 m30998| 2015-07-09T13:56:17.284-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62855 #72 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.286-0400 m30999| 2015-07-09T13:56:17.285-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62856 #71 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.286-0400 m30999| 2015-07-09T13:56:17.286-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62857 #72 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.292-0400 m30999| 2015-07-09T13:56:17.292-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62858 #73 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.296-0400 m30998| 2015-07-09T13:56:17.295-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62859 #73 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.300-0400 m30999| 2015-07-09T13:56:17.299-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62860 #74 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.307-0400 setting random seed: 2340723392553 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.307-0400 setting random seed: 1701512914150 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.307-0400 setting random seed: 4594058468937 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.307-0400 setting random seed: 5568614355288 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.308-0400 setting random seed: 5643159304745 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.308-0400 setting random seed: 4578479020856 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.313-0400 setting random seed: 3459604247473 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.313-0400 setting random seed: 4325319398194 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.313-0400 m30998| 2015-07-09T13:56:17.313-0400 I SHARDING [conn70] ChunkManager: time to load chunks for db11.coll11: 0ms sequenceNumber: 13 version: 1|0||559eb5c1ca4787b9985d1bfd based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.314-0400 setting random seed: 7327561448328 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.314-0400 setting random seed: 538076967932 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.324-0400 m31200| 2015-07-09T13:56:17.324-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62861 #62 (58 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.328-0400 m31200| 2015-07-09T13:56:17.328-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62862 #63 (59 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.329-0400 m31200| 2015-07-09T13:56:17.329-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62863 #64 (60 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.339-0400 m31200| 2015-07-09T13:56:17.339-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62864 #65 (61 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.373-0400 m31200| 2015-07-09T13:56:17.372-0400 I SHARDING [conn64] request split points lookup for chunk db11.coll11 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.374-0400 m31200| 2015-07-09T13:56:17.373-0400 I SHARDING [conn64] received splitChunk request: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.376-0400 m31200| 2015-07-09T13:56:17.375-0400 I SHARDING [conn64] distributed lock 'db11.coll11/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb5c1d5a107a5b9c0daa2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.377-0400 m31200| 2015-07-09T13:56:17.375-0400 I SHARDING [conn64] remotely refreshing metadata for db11.coll11 based on current shard version 1|0||559eb5c1ca4787b9985d1bfd, current metadata version is 1|0||559eb5c1ca4787b9985d1bfd [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.379-0400 m31200| 2015-07-09T13:56:17.376-0400 I SHARDING [conn65] request split points lookup for chunk db11.coll11 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.379-0400 m31200| 2015-07-09T13:56:17.376-0400 I SHARDING [conn65] received splitChunk request: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.379-0400 m31200| 2015-07-09T13:56:17.377-0400 I SHARDING [conn64] metadata of collection db11.coll11 already up to date (shard version : 1|0||559eb5c1ca4787b9985d1bfd, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.379-0400 m31200| 2015-07-09T13:56:17.377-0400 I SHARDING [conn64] splitChunk accepted at version 1|0||559eb5c1ca4787b9985d1bfd [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.380-0400 m31200| 2015-07-09T13:56:17.377-0400 I SHARDING [conn63] request split points lookup for chunk db11.coll11 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.380-0400 m31200| 2015-07-09T13:56:17.378-0400 I SHARDING [conn62] request split points lookup for chunk db11.coll11 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.380-0400 m31200| 2015-07-09T13:56:17.378-0400 I SHARDING [conn18] request split points lookup for chunk db11.coll11 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.381-0400 m31200| 2015-07-09T13:56:17.378-0400 W SHARDING [conn65] could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db11.coll11 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.382-0400 m30998| 2015-07-09T13:56:17.378-0400 W SHARDING [conn69] splitChunk failed - cmd: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.382-0400 m31200| 2015-07-09T13:56:17.378-0400 I SHARDING [conn63] received splitChunk request: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.383-0400 m31200| 2015-07-09T13:56:17.379-0400 I SHARDING [conn18] received splitChunk request: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.383-0400 m31200| 2015-07-09T13:56:17.379-0400 I SHARDING [conn64] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:17.379-0400-559eb5c1d5a107a5b9c0daa3", server: "bs-osx108-8", clientAddr: "127.0.0.1:62863", time: new Date(1436464577379), what: "multi-split", ns: "db11.coll11", details: { before: { min: { j: MinKey }, max: { j: MaxKey } }, number: 1, of: 3, chunk: { min: { j: MinKey }, max: { j: 0.0 }, lastmod: Timestamp 1000|1, lastmodEpoch: ObjectId('559eb5c1ca4787b9985d1bfd') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.383-0400 m31200| 2015-07-09T13:56:17.380-0400 I SHARDING [conn62] received splitChunk request: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.384-0400 m31200| 2015-07-09T13:56:17.381-0400 W SHARDING [conn63] could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db11.coll11 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.384-0400 m30999| 2015-07-09T13:56:17.381-0400 W SHARDING [conn73] splitChunk failed - cmd: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.385-0400 m29000| 2015-07-09T13:56:17.381-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62865 #41 (41 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.385-0400 m31200| 2015-07-09T13:56:17.383-0400 W SHARDING [conn18] could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db11.coll11 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.385-0400 m30999| 2015-07-09T13:56:17.383-0400 W SHARDING [conn72] splitChunk failed - cmd: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.386-0400 m29000| 2015-07-09T13:56:17.384-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62866 #42 (42 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.388-0400 m31200| 2015-07-09T13:56:17.387-0400 W SHARDING [conn62] could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db11.coll11 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.388-0400 m30998| 2015-07-09T13:56:17.387-0400 W SHARDING [conn72] splitChunk failed - cmd: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.396-0400 m31200| 2015-07-09T13:56:17.395-0400 I SHARDING [conn62] request split points lookup for chunk db11.coll11 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.396-0400 m31200| 2015-07-09T13:56:17.396-0400 I SHARDING [conn65] request split points lookup for chunk db11.coll11 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.397-0400 m31200| 2015-07-09T13:56:17.396-0400 I SHARDING [conn62] received splitChunk request: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.398-0400 m31200| 2015-07-09T13:56:17.396-0400 I SHARDING [conn47] request split points lookup for chunk db11.coll11 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.398-0400 m31200| 2015-07-09T13:56:17.397-0400 I SHARDING [conn65] received splitChunk request: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.398-0400 m31200| 2015-07-09T13:56:17.397-0400 W SHARDING [conn62] could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db11.coll11 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.399-0400 m30998| 2015-07-09T13:56:17.397-0400 W SHARDING [conn73] splitChunk failed - cmd: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.400-0400 m31200| 2015-07-09T13:56:17.398-0400 I SHARDING [conn47] received splitChunk request: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.400-0400 m31200| 2015-07-09T13:56:17.398-0400 W SHARDING [conn65] could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db11.coll11 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.400-0400 m30998| 2015-07-09T13:56:17.398-0400 W SHARDING [conn72] splitChunk failed - cmd: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.401-0400 m31200| 2015-07-09T13:56:17.399-0400 W SHARDING [conn47] could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db11.coll11 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.401-0400 m30998| 2015-07-09T13:56:17.400-0400 W SHARDING [conn71] splitChunk failed - cmd: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.404-0400 m31200| 2015-07-09T13:56:17.403-0400 I SHARDING [conn18] request split points lookup for chunk db11.coll11 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.405-0400 m31200| 2015-07-09T13:56:17.404-0400 I SHARDING [conn18] received splitChunk request: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.407-0400 m31200| 2015-07-09T13:56:17.406-0400 W SHARDING [conn18] could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db11.coll11 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.408-0400 m30999| 2015-07-09T13:56:17.406-0400 W SHARDING [conn71] splitChunk failed - cmd: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.413-0400 m31200| 2015-07-09T13:56:17.413-0400 I SHARDING [conn47] request split points lookup for chunk db11.coll11 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.414-0400 m31200| 2015-07-09T13:56:17.413-0400 I SHARDING [conn47] received splitChunk request: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 10.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.415-0400 m31200| 2015-07-09T13:56:17.414-0400 W SHARDING [conn47] could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db11.coll11 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.415-0400 m30998| 2015-07-09T13:56:17.415-0400 W SHARDING [conn69] splitChunk failed - cmd: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 10.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.418-0400 m31200| 2015-07-09T13:56:17.418-0400 I SHARDING [conn18] request split points lookup for chunk db11.coll11 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.419-0400 m31200| 2015-07-09T13:56:17.418-0400 I SHARDING [conn18] received splitChunk request: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 10.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.421-0400 m31200| 2015-07-09T13:56:17.420-0400 W SHARDING [conn18] could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db11.coll11 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.421-0400 m30999| 2015-07-09T13:56:17.420-0400 W SHARDING [conn71] splitChunk failed - cmd: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 10.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.428-0400 m31200| 2015-07-09T13:56:17.428-0400 I SHARDING [conn47] request split points lookup for chunk db11.coll11 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.428-0400 m31200| 2015-07-09T13:56:17.428-0400 I SHARDING [conn65] request split points lookup for chunk db11.coll11 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.429-0400 m31200| 2015-07-09T13:56:17.428-0400 I SHARDING [conn62] request split points lookup for chunk db11.coll11 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.430-0400 m31200| 2015-07-09T13:56:17.429-0400 I SHARDING [conn47] received splitChunk request: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 10.0 }, { j: 12.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.430-0400 m31200| 2015-07-09T13:56:17.429-0400 I SHARDING [conn65] received splitChunk request: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 10.0 }, { j: 12.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.431-0400 m31200| 2015-07-09T13:56:17.430-0400 W SHARDING [conn65] could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db11.coll11 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.431-0400 m31200| 2015-07-09T13:56:17.430-0400 W SHARDING [conn47] could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db11.coll11 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.432-0400 m30998| 2015-07-09T13:56:17.430-0400 W SHARDING [conn69] splitChunk failed - cmd: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 10.0 }, { j: 12.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.432-0400 m30998| 2015-07-09T13:56:17.430-0400 W SHARDING [conn73] splitChunk failed - cmd: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 10.0 }, { j: 12.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.432-0400 m31200| 2015-07-09T13:56:17.431-0400 I SHARDING [conn62] received splitChunk request: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 10.0 }, { j: 12.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.433-0400 m31200| 2015-07-09T13:56:17.431-0400 I SHARDING [conn64] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:17.431-0400-559eb5c1d5a107a5b9c0daa4", server: "bs-osx108-8", clientAddr: "127.0.0.1:62863", time: new Date(1436464577431), what: "multi-split", ns: "db11.coll11", details: { before: { min: { j: MinKey }, max: { j: MaxKey } }, number: 2, of: 3, chunk: { min: { j: 0.0 }, max: { j: 4.0 }, lastmod: Timestamp 1000|2, lastmodEpoch: ObjectId('559eb5c1ca4787b9985d1bfd') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.433-0400 m31200| 2015-07-09T13:56:17.432-0400 W SHARDING [conn62] could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db11.coll11 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.434-0400 m30998| 2015-07-09T13:56:17.432-0400 W SHARDING [conn71] splitChunk failed - cmd: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 10.0 }, { j: 12.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.441-0400 m31200| 2015-07-09T13:56:17.441-0400 I SHARDING [conn62] request split points lookup for chunk db11.coll11 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.442-0400 m31200| 2015-07-09T13:56:17.441-0400 I SHARDING [conn62] received splitChunk request: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 20.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.442-0400 m31200| 2015-07-09T13:56:17.441-0400 I SHARDING [conn47] request split points lookup for chunk db11.coll11 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.442-0400 m31200| 2015-07-09T13:56:17.442-0400 I SHARDING [conn47] received splitChunk request: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.445-0400 m31200| 2015-07-09T13:56:17.443-0400 W SHARDING [conn62] could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db11.coll11 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.447-0400 m30998| 2015-07-09T13:56:17.443-0400 W SHARDING [conn72] splitChunk failed - cmd: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 20.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.448-0400 m30998| 2015-07-09T13:56:17.443-0400 W SHARDING [conn70] splitChunk failed - cmd: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.448-0400 m31200| 2015-07-09T13:56:17.443-0400 W SHARDING [conn47] could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db11.coll11 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.448-0400 m31200| 2015-07-09T13:56:17.443-0400 I SHARDING [conn18] request split points lookup for chunk db11.coll11 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.448-0400 m31200| 2015-07-09T13:56:17.444-0400 I SHARDING [conn18] received splitChunk request: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.448-0400 m31200| 2015-07-09T13:56:17.445-0400 I SHARDING [conn63] request split points lookup for chunk db11.coll11 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.449-0400 m31200| 2015-07-09T13:56:17.445-0400 W SHARDING [conn18] could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db11.coll11 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.449-0400 m30999| 2015-07-09T13:56:17.446-0400 W SHARDING [conn72] splitChunk failed - cmd: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.449-0400 m31200| 2015-07-09T13:56:17.446-0400 I SHARDING [conn63] received splitChunk request: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.449-0400 m31200| 2015-07-09T13:56:17.447-0400 W SHARDING [conn63] could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db11.coll11 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.450-0400 m30999| 2015-07-09T13:56:17.447-0400 W SHARDING [conn73] splitChunk failed - cmd: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.463-0400 m31200| 2015-07-09T13:56:17.462-0400 I SHARDING [conn63] request split points lookup for chunk db11.coll11 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.465-0400 m31200| 2015-07-09T13:56:17.464-0400 I SHARDING [conn63] received splitChunk request: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 16.0 }, { j: 24.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.466-0400 m31200| 2015-07-09T13:56:17.465-0400 W SHARDING [conn63] could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db11.coll11 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.467-0400 m30999| 2015-07-09T13:56:17.465-0400 W SHARDING [conn73] splitChunk failed - cmd: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 16.0 }, { j: 24.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.467-0400 m31200| 2015-07-09T13:56:17.466-0400 I SHARDING [conn47] request split points lookup for chunk db11.coll11 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.468-0400 m31200| 2015-07-09T13:56:17.468-0400 I SHARDING [conn47] received splitChunk request: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 22.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.470-0400 m31200| 2015-07-09T13:56:17.469-0400 W SHARDING [conn47] could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db11.coll11 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.471-0400 m30999| 2015-07-09T13:56:17.469-0400 I SHARDING [conn72] ChunkManager: time to load chunks for db11.coll11: 0ms sequenceNumber: 57 version: 1|3||559eb5c1ca4787b9985d1bfd based on: 1|0||559eb5c1ca4787b9985d1bfd [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.471-0400 m30998| 2015-07-09T13:56:17.470-0400 W SHARDING [conn71] splitChunk failed - cmd: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 22.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.476-0400 m31200| 2015-07-09T13:56:17.476-0400 I SHARDING [conn47] request split points lookup for chunk db11.coll11 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.477-0400 m31200| 2015-07-09T13:56:17.476-0400 I SHARDING [conn47] received splitChunk request: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.478-0400 m31200| 2015-07-09T13:56:17.477-0400 I SHARDING [conn62] request split points lookup for chunk db11.coll11 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.479-0400 m31200| 2015-07-09T13:56:17.477-0400 I SHARDING [conn65] request split points lookup for chunk db11.coll11 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.482-0400 m31200| 2015-07-09T13:56:17.478-0400 I SHARDING [conn62] received splitChunk request: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.482-0400 m31200| 2015-07-09T13:56:17.478-0400 I SHARDING [conn65] received splitChunk request: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.482-0400 m31200| 2015-07-09T13:56:17.478-0400 W SHARDING [conn47] could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db11.coll11 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.483-0400 m30998| 2015-07-09T13:56:17.478-0400 W SHARDING [conn73] splitChunk failed - cmd: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.483-0400 m31200| 2015-07-09T13:56:17.480-0400 W SHARDING [conn65] could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db11.coll11 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.483-0400 m30998| 2015-07-09T13:56:17.480-0400 W SHARDING [conn72] splitChunk failed - cmd: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.483-0400 m31200| 2015-07-09T13:56:17.480-0400 W SHARDING [conn62] could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db11.coll11 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.484-0400 m30998| 2015-07-09T13:56:17.480-0400 W SHARDING [conn69] splitChunk failed - cmd: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.484-0400 m31200| 2015-07-09T13:56:17.484-0400 I SHARDING [conn64] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:17.484-0400-559eb5c1d5a107a5b9c0daa5", server: "bs-osx108-8", clientAddr: "127.0.0.1:62863", time: new Date(1436464577484), what: "multi-split", ns: "db11.coll11", details: { before: { min: { j: MinKey }, max: { j: MaxKey } }, number: 3, of: 3, chunk: { min: { j: 4.0 }, max: { j: MaxKey }, lastmod: Timestamp 1000|3, lastmodEpoch: ObjectId('559eb5c1ca4787b9985d1bfd') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.490-0400 m31200| 2015-07-09T13:56:17.489-0400 I SHARDING [conn62] request split points lookup for chunk db11.coll11 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.491-0400 m31200| 2015-07-09T13:56:17.490-0400 I SHARDING [conn62] received splitChunk request: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 18.0 }, { j: 22.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.493-0400 m31200| 2015-07-09T13:56:17.492-0400 W SHARDING [conn62] could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db11.coll11 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.494-0400 m30998| 2015-07-09T13:56:17.493-0400 W SHARDING [conn73] splitChunk failed - cmd: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 18.0 }, { j: 22.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.504-0400 m31200| 2015-07-09T13:56:17.503-0400 I SHARDING [conn62] request split points lookup for chunk db11.coll11 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.504-0400 m31200| 2015-07-09T13:56:17.503-0400 I SHARDING [conn65] request split points lookup for chunk db11.coll11 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.505-0400 m31200| 2015-07-09T13:56:17.504-0400 I SHARDING [conn62] received splitChunk request: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 24.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.505-0400 m31200| 2015-07-09T13:56:17.504-0400 I SHARDING [conn65] received splitChunk request: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 24.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.507-0400 m31200| 2015-07-09T13:56:17.505-0400 W SHARDING [conn65] could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db11.coll11 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.507-0400 m30998| 2015-07-09T13:56:17.506-0400 W SHARDING [conn69] splitChunk failed - cmd: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 24.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.508-0400 m31200| 2015-07-09T13:56:17.506-0400 I SHARDING [conn47] request split points lookup for chunk db11.coll11 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.509-0400 m31200| 2015-07-09T13:56:17.506-0400 W SHARDING [conn62] could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db11.coll11 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.509-0400 m30998| 2015-07-09T13:56:17.506-0400 W SHARDING [conn73] splitChunk failed - cmd: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 24.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.509-0400 m31200| 2015-07-09T13:56:17.507-0400 I SHARDING [conn62] received splitChunk request: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 24.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.511-0400 m31200| 2015-07-09T13:56:17.510-0400 W SHARDING [conn62] could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db11.coll11 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.511-0400 m30998| 2015-07-09T13:56:17.510-0400 W SHARDING [conn70] splitChunk failed - cmd: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 24.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.522-0400 m31200| 2015-07-09T13:56:17.522-0400 I SHARDING [conn62] request split points lookup for chunk db11.coll11 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.524-0400 m31200| 2015-07-09T13:56:17.523-0400 I SHARDING [conn62] received splitChunk request: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.525-0400 m31200| 2015-07-09T13:56:17.524-0400 I SHARDING [conn47] request split points lookup for chunk db11.coll11 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.525-0400 m31200| 2015-07-09T13:56:17.525-0400 W SHARDING [conn62] could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db11.coll11 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.526-0400 m30998| 2015-07-09T13:56:17.525-0400 W SHARDING [conn72] splitChunk failed - cmd: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.526-0400 m31200| 2015-07-09T13:56:17.526-0400 I SHARDING [conn47] received splitChunk request: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.528-0400 m31200| 2015-07-09T13:56:17.527-0400 W SHARDING [conn47] could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db11.coll11 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.528-0400 m30998| 2015-07-09T13:56:17.527-0400 W SHARDING [conn69] splitChunk failed - cmd: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.538-0400 m31200| 2015-07-09T13:56:17.536-0400 I SHARDING [conn64] distributed lock 'db11.coll11/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.539-0400 m31200| 2015-07-09T13:56:17.536-0400 I COMMAND [conn64] command db11.coll11 command: splitChunk { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 4, w: 2 } }, Database: { acquireCount: { r: 1, w: 2 } }, Collection: { acquireCount: { r: 1, W: 2 }, acquireWaitCount: { W: 2 }, timeAcquiringMicros: { W: 1069 } } } protocol:op_command 162ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.539-0400 m30999| 2015-07-09T13:56:17.536-0400 I SHARDING [conn74] autosplitted db11.coll11 shard: ns: db11.coll11, shard: test-rs1, lastmod: 1|0||000000000000000000000000, min: { j: MinKey }, max: { j: MaxKey } into 3 (splitThreshold 921) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.540-0400 m31200| 2015-07-09T13:56:17.539-0400 I SHARDING [conn47] request split points lookup for chunk db11.coll11 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.541-0400 m31200| 2015-07-09T13:56:17.540-0400 I SHARDING [conn62] request split points lookup for chunk db11.coll11 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.541-0400 m31200| 2015-07-09T13:56:17.540-0400 I SHARDING [conn47] received splitChunk request: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 30.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.543-0400 m31200| 2015-07-09T13:56:17.542-0400 I SHARDING [conn62] received splitChunk request: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 30.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.543-0400 m31200| 2015-07-09T13:56:17.542-0400 I SHARDING [conn47] distributed lock 'db11.coll11/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb5c1d5a107a5b9c0daa6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.543-0400 m31200| 2015-07-09T13:56:17.542-0400 I SHARDING [conn47] remotely refreshing metadata for db11.coll11 based on current shard version 1|3||559eb5c1ca4787b9985d1bfd, current metadata version is 1|3||559eb5c1ca4787b9985d1bfd [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.545-0400 m31200| 2015-07-09T13:56:17.544-0400 W SHARDING [conn62] could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db11.coll11 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.546-0400 m30998| 2015-07-09T13:56:17.544-0400 W SHARDING [conn72] splitChunk failed - cmd: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 30.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.546-0400 m31200| 2015-07-09T13:56:17.544-0400 I SHARDING [conn47] metadata of collection db11.coll11 already up to date (shard version : 1|3||559eb5c1ca4787b9985d1bfd, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.547-0400 m31200| 2015-07-09T13:56:17.545-0400 W SHARDING [conn47] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.547-0400 m31200| 2015-07-09T13:56:17.545-0400 I SHARDING [conn47] distributed lock 'db11.coll11/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.547-0400 m30998| 2015-07-09T13:56:17.546-0400 W SHARDING [conn70] splitChunk failed - cmd: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 30.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.567-0400 m31200| 2015-07-09T13:56:17.564-0400 I SHARDING [conn47] request split points lookup for chunk db11.coll11 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.567-0400 m31200| 2015-07-09T13:56:17.565-0400 I SHARDING [conn47] received splitChunk request: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 22.0 }, { j: 24.0 }, { j: 28.0 }, { j: 32.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.567-0400 m31200| 2015-07-09T13:56:17.566-0400 I SHARDING [conn62] request split points lookup for chunk db11.coll11 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.568-0400 m31200| 2015-07-09T13:56:17.568-0400 I SHARDING [conn62] received splitChunk request: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 22.0 }, { j: 24.0 }, { j: 28.0 }, { j: 32.0 }, { j: 38.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.568-0400 m31200| 2015-07-09T13:56:17.568-0400 I SHARDING [conn47] distributed lock 'db11.coll11/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb5c1d5a107a5b9c0daa7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.569-0400 m31200| 2015-07-09T13:56:17.568-0400 I SHARDING [conn47] remotely refreshing metadata for db11.coll11 based on current shard version 1|3||559eb5c1ca4787b9985d1bfd, current metadata version is 1|3||559eb5c1ca4787b9985d1bfd [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.570-0400 m31200| 2015-07-09T13:56:17.569-0400 W SHARDING [conn62] could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db11.coll11 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.570-0400 m30998| 2015-07-09T13:56:17.569-0400 W SHARDING [conn70] splitChunk failed - cmd: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 22.0 }, { j: 24.0 }, { j: 28.0 }, { j: 32.0 }, { j: 38.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db11.coll11 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.571-0400 m31200| 2015-07-09T13:56:17.571-0400 I SHARDING [conn65] request split points lookup for chunk db11.coll11 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.571-0400 m31200| 2015-07-09T13:56:17.571-0400 I SHARDING [conn47] metadata of collection db11.coll11 already up to date (shard version : 1|3||559eb5c1ca4787b9985d1bfd, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.571-0400 m31200| 2015-07-09T13:56:17.571-0400 W SHARDING [conn47] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.572-0400 m31200| 2015-07-09T13:56:17.572-0400 I SHARDING [conn47] distributed lock 'db11.coll11/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.574-0400 m31200| 2015-07-09T13:56:17.572-0400 I SHARDING [conn65] received splitChunk request: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 22.0 }, { j: 24.0 }, { j: 28.0 }, { j: 32.0 }, { j: 36.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.575-0400 m30998| 2015-07-09T13:56:17.572-0400 W SHARDING [conn73] splitChunk failed - cmd: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 22.0 }, { j: 24.0 }, { j: 28.0 }, { j: 32.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.575-0400 m31200| 2015-07-09T13:56:17.574-0400 I SHARDING [conn65] distributed lock 'db11.coll11/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb5c1d5a107a5b9c0daa8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.576-0400 m31200| 2015-07-09T13:56:17.574-0400 I SHARDING [conn65] remotely refreshing metadata for db11.coll11 based on current shard version 1|3||559eb5c1ca4787b9985d1bfd, current metadata version is 1|3||559eb5c1ca4787b9985d1bfd [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.576-0400 m31200| 2015-07-09T13:56:17.574-0400 I SHARDING [conn65] metadata of collection db11.coll11 already up to date (shard version : 1|3||559eb5c1ca4787b9985d1bfd, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.576-0400 m31200| 2015-07-09T13:56:17.574-0400 W SHARDING [conn65] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.576-0400 m31200| 2015-07-09T13:56:17.575-0400 I SHARDING [conn65] distributed lock 'db11.coll11/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.577-0400 m30998| 2015-07-09T13:56:17.576-0400 W SHARDING [conn72] splitChunk failed - cmd: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 22.0 }, { j: 24.0 }, { j: 28.0 }, { j: 32.0 }, { j: 36.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.589-0400 m31200| 2015-07-09T13:56:17.589-0400 I SHARDING [conn65] request split points lookup for chunk db11.coll11 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.591-0400 m31200| 2015-07-09T13:56:17.590-0400 I SHARDING [conn65] received splitChunk request: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 22.0 }, { j: 24.0 }, { j: 26.0 }, { j: 30.0 }, { j: 32.0 }, { j: 38.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.592-0400 m31200| 2015-07-09T13:56:17.591-0400 I SHARDING [conn65] distributed lock 'db11.coll11/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb5c1d5a107a5b9c0daa9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.592-0400 m31200| 2015-07-09T13:56:17.591-0400 I SHARDING [conn65] remotely refreshing metadata for db11.coll11 based on current shard version 1|3||559eb5c1ca4787b9985d1bfd, current metadata version is 1|3||559eb5c1ca4787b9985d1bfd [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.597-0400 m31200| 2015-07-09T13:56:17.597-0400 I SHARDING [conn65] metadata of collection db11.coll11 already up to date (shard version : 1|3||559eb5c1ca4787b9985d1bfd, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.597-0400 m31200| 2015-07-09T13:56:17.597-0400 I SHARDING [conn47] request split points lookup for chunk db11.coll11 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.597-0400 m31200| 2015-07-09T13:56:17.597-0400 W SHARDING [conn65] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.598-0400 m31200| 2015-07-09T13:56:17.598-0400 I SHARDING [conn65] distributed lock 'db11.coll11/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.599-0400 m31200| 2015-07-09T13:56:17.598-0400 I SHARDING [conn47] received splitChunk request: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 24.0 }, { j: 26.0 }, { j: 28.0 }, { j: 32.0 }, { j: 36.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.600-0400 m30998| 2015-07-09T13:56:17.598-0400 W SHARDING [conn72] splitChunk failed - cmd: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 22.0 }, { j: 24.0 }, { j: 26.0 }, { j: 30.0 }, { j: 32.0 }, { j: 38.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.601-0400 m31200| 2015-07-09T13:56:17.600-0400 I SHARDING [conn47] distributed lock 'db11.coll11/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb5c1d5a107a5b9c0daaa [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.601-0400 m31200| 2015-07-09T13:56:17.600-0400 I SHARDING [conn47] remotely refreshing metadata for db11.coll11 based on current shard version 1|3||559eb5c1ca4787b9985d1bfd, current metadata version is 1|3||559eb5c1ca4787b9985d1bfd [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.605-0400 m31200| 2015-07-09T13:56:17.602-0400 I SHARDING [conn47] metadata of collection db11.coll11 already up to date (shard version : 1|3||559eb5c1ca4787b9985d1bfd, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.605-0400 m31200| 2015-07-09T13:56:17.602-0400 W SHARDING [conn47] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.606-0400 m31200| 2015-07-09T13:56:17.603-0400 I SHARDING [conn47] distributed lock 'db11.coll11/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.606-0400 m30998| 2015-07-09T13:56:17.603-0400 W SHARDING [conn70] splitChunk failed - cmd: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 24.0 }, { j: 26.0 }, { j: 28.0 }, { j: 32.0 }, { j: 36.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.629-0400 m31200| 2015-07-09T13:56:17.627-0400 I SHARDING [conn47] request split points lookup for chunk db11.coll11 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.630-0400 m31200| 2015-07-09T13:56:17.629-0400 I SHARDING [conn47] received splitChunk request: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 24.0 }, { j: 26.0 }, { j: 28.0 }, { j: 32.0 }, { j: 34.0 }, { j: 38.0 }, { j: 44.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.638-0400 m31200| 2015-07-09T13:56:17.638-0400 I SHARDING [conn47] distributed lock 'db11.coll11/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb5c1d5a107a5b9c0daab [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.638-0400 m31200| 2015-07-09T13:56:17.638-0400 I SHARDING [conn47] remotely refreshing metadata for db11.coll11 based on current shard version 1|3||559eb5c1ca4787b9985d1bfd, current metadata version is 1|3||559eb5c1ca4787b9985d1bfd [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.639-0400 m31200| 2015-07-09T13:56:17.639-0400 I SHARDING [conn65] request split points lookup for chunk db11.coll11 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.640-0400 m31200| 2015-07-09T13:56:17.639-0400 I SHARDING [conn47] metadata of collection db11.coll11 already up to date (shard version : 1|3||559eb5c1ca4787b9985d1bfd, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.640-0400 m31200| 2015-07-09T13:56:17.640-0400 W SHARDING [conn47] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.642-0400 m31200| 2015-07-09T13:56:17.641-0400 I SHARDING [conn47] distributed lock 'db11.coll11/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.643-0400 m31200| 2015-07-09T13:56:17.641-0400 I SHARDING [conn65] received splitChunk request: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 24.0 }, { j: 26.0 }, { j: 28.0 }, { j: 32.0 }, { j: 34.0 }, { j: 38.0 }, { j: 44.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.643-0400 m30998| 2015-07-09T13:56:17.641-0400 I NETWORK [conn69] end connection 127.0.0.1:62851 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.644-0400 m30998| 2015-07-09T13:56:17.642-0400 W SHARDING [conn73] splitChunk failed - cmd: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 24.0 }, { j: 26.0 }, { j: 28.0 }, { j: 32.0 }, { j: 34.0 }, { j: 38.0 }, { j: 44.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.645-0400 m31200| 2015-07-09T13:56:17.643-0400 I SHARDING [conn65] distributed lock 'db11.coll11/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb5c1d5a107a5b9c0daac [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.646-0400 m31200| 2015-07-09T13:56:17.643-0400 I SHARDING [conn65] remotely refreshing metadata for db11.coll11 based on current shard version 1|3||559eb5c1ca4787b9985d1bfd, current metadata version is 1|3||559eb5c1ca4787b9985d1bfd [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.646-0400 m31200| 2015-07-09T13:56:17.644-0400 I SHARDING [conn65] metadata of collection db11.coll11 already up to date (shard version : 1|3||559eb5c1ca4787b9985d1bfd, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.646-0400 m31200| 2015-07-09T13:56:17.645-0400 W SHARDING [conn65] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.646-0400 m31200| 2015-07-09T13:56:17.645-0400 I SHARDING [conn65] distributed lock 'db11.coll11/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.647-0400 m30998| 2015-07-09T13:56:17.645-0400 W SHARDING [conn71] splitChunk failed - cmd: { splitChunk: "db11.coll11", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 24.0 }, { j: 26.0 }, { j: 28.0 }, { j: 32.0 }, { j: 34.0 }, { j: 38.0 }, { j: 44.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c1ca4787b9985d1bfd') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.661-0400 m30999| 2015-07-09T13:56:17.660-0400 I NETWORK [conn70] end connection 127.0.0.1:62853 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.662-0400 m30999| 2015-07-09T13:56:17.662-0400 I NETWORK [conn71] end connection 127.0.0.1:62856 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.663-0400 m30998| 2015-07-09T13:56:17.662-0400 I NETWORK [conn70] end connection 127.0.0.1:62852 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.675-0400 m30998| 2015-07-09T13:56:17.675-0400 I NETWORK [conn73] end connection 127.0.0.1:62859 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.694-0400 m30999| 2015-07-09T13:56:17.694-0400 I NETWORK [conn72] end connection 127.0.0.1:62857 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.696-0400 m30998| 2015-07-09T13:56:17.695-0400 I NETWORK [conn72] end connection 127.0.0.1:62855 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.698-0400 m30998| 2015-07-09T13:56:17.697-0400 I NETWORK [conn71] end connection 127.0.0.1:62854 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.724-0400 m30999| 2015-07-09T13:56:17.724-0400 I NETWORK [conn73] end connection 127.0.0.1:62858 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.787-0400 m30999| 2015-07-09T13:56:17.786-0400 I NETWORK [conn74] end connection 127.0.0.1:62860 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.787-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.787-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.787-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.787-0400 jstests/concurrency/fsm_workloads/explain_count.js: Workload completed in 607 ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.787-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.787-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.787-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.788-0400 m30999| 2015-07-09T13:56:17.787-0400 I COMMAND [conn1] DROP: db11.coll11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.788-0400 m30999| 2015-07-09T13:56:17.787-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:17.787-0400-559eb5c1ca4787b9985d1bff", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464577787), what: "dropCollection.start", ns: "db11.coll11", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.846-0400 m30999| 2015-07-09T13:56:17.846-0400 I SHARDING [conn1] distributed lock 'db11.coll11/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5c1ca4787b9985d1c00 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.847-0400 m31100| 2015-07-09T13:56:17.847-0400 I COMMAND [conn15] CMD: drop db11.coll11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.848-0400 m31200| 2015-07-09T13:56:17.848-0400 I COMMAND [conn64] CMD: drop db11.coll11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.852-0400 m31201| 2015-07-09T13:56:17.852-0400 I COMMAND [repl writer worker 3] CMD: drop db11.coll11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.852-0400 m31202| 2015-07-09T13:56:17.852-0400 I COMMAND [repl writer worker 1] CMD: drop db11.coll11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.905-0400 m31200| 2015-07-09T13:56:17.904-0400 I SHARDING [conn64] remotely refreshing metadata for db11.coll11 with requested shard version 0|0||000000000000000000000000, current shard version is 1|3||559eb5c1ca4787b9985d1bfd, current metadata version is 1|3||559eb5c1ca4787b9985d1bfd [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.906-0400 m31200| 2015-07-09T13:56:17.906-0400 W SHARDING [conn64] no chunks found when reloading db11.coll11, previous version was 0|0||559eb5c1ca4787b9985d1bfd, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.906-0400 m31200| 2015-07-09T13:56:17.906-0400 I SHARDING [conn64] dropping metadata for db11.coll11 at shard version 1|3||559eb5c1ca4787b9985d1bfd, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.908-0400 m30999| 2015-07-09T13:56:17.907-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:17.907-0400-559eb5c1ca4787b9985d1c01", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464577907), what: "dropCollection", ns: "db11.coll11", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:17.962-0400 m30999| 2015-07-09T13:56:17.961-0400 I SHARDING [conn1] distributed lock 'db11.coll11/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.018-0400 m30999| 2015-07-09T13:56:18.018-0400 I COMMAND [conn1] DROP DATABASE: db11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.019-0400 m30999| 2015-07-09T13:56:18.018-0400 I SHARDING [conn1] DBConfig::dropDatabase: db11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.019-0400 m30999| 2015-07-09T13:56:18.018-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:18.018-0400-559eb5c2ca4787b9985d1c02", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464578018), what: "dropDatabase.start", ns: "db11", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.124-0400 m30999| 2015-07-09T13:56:18.124-0400 I SHARDING [conn1] DBConfig::dropDatabase: db11 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.126-0400 m31200| 2015-07-09T13:56:18.125-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62867 #66 (62 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.127-0400 m31200| 2015-07-09T13:56:18.127-0400 I COMMAND [conn66] dropDatabase db11 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.127-0400 m31200| 2015-07-09T13:56:18.127-0400 I COMMAND [conn66] dropDatabase db11 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.128-0400 m30999| 2015-07-09T13:56:18.127-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:18.127-0400-559eb5c2ca4787b9985d1c03", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464578127), what: "dropDatabase", ns: "db11", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.128-0400 m31201| 2015-07-09T13:56:18.128-0400 I COMMAND [repl writer worker 9] dropDatabase db11 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.128-0400 m31201| 2015-07-09T13:56:18.128-0400 I COMMAND [repl writer worker 9] dropDatabase db11 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.128-0400 m31202| 2015-07-09T13:56:18.128-0400 I COMMAND [repl writer worker 8] dropDatabase db11 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.129-0400 m31202| 2015-07-09T13:56:18.128-0400 I COMMAND [repl writer worker 8] dropDatabase db11 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.218-0400 m31100| 2015-07-09T13:56:18.218-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.222-0400 m31101| 2015-07-09T13:56:18.222-0400 I COMMAND [repl writer worker 13] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.222-0400 m31102| 2015-07-09T13:56:18.222-0400 I COMMAND [repl writer worker 6] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.252-0400 m31200| 2015-07-09T13:56:18.252-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.255-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.255-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.255-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.255-0400 jstests/concurrency/fsm_workloads/remove_multiple_documents.js [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.256-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.256-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.256-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.256-0400 m31201| 2015-07-09T13:56:18.255-0400 I COMMAND [repl writer worker 10] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.256-0400 m31202| 2015-07-09T13:56:18.255-0400 I COMMAND [repl writer worker 7] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.263-0400 m30999| 2015-07-09T13:56:18.263-0400 I SHARDING [conn1] distributed lock 'db12/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5c2ca4787b9985d1c04 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.267-0400 m30999| 2015-07-09T13:56:18.267-0400 I SHARDING [conn1] Placing [db12] on: test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.267-0400 m30999| 2015-07-09T13:56:18.267-0400 I SHARDING [conn1] Enabling sharding for database [db12] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.321-0400 m30999| 2015-07-09T13:56:18.321-0400 I SHARDING [conn1] distributed lock 'db12/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.345-0400 m31200| 2015-07-09T13:56:18.344-0400 I INDEX [conn20] build index on: db12.coll12 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db12.coll12" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.345-0400 m31200| 2015-07-09T13:56:18.345-0400 I INDEX [conn20] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.355-0400 m31200| 2015-07-09T13:56:18.355-0400 I INDEX [conn20] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.357-0400 m30999| 2015-07-09T13:56:18.356-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db12.coll12", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.359-0400 m30999| 2015-07-09T13:56:18.359-0400 I SHARDING [conn1] distributed lock 'db12.coll12/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5c2ca4787b9985d1c05 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.361-0400 m30999| 2015-07-09T13:56:18.360-0400 I SHARDING [conn1] enable sharding on: db12.coll12 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.362-0400 m30999| 2015-07-09T13:56:18.361-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:18.361-0400-559eb5c2ca4787b9985d1c06", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464578361), what: "shardCollection.start", ns: "db12.coll12", details: { shardKey: { _id: "hashed" }, collection: "db12.coll12", primary: "test-rs1:test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.368-0400 m31201| 2015-07-09T13:56:18.367-0400 I INDEX [repl writer worker 1] build index on: db12.coll12 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db12.coll12" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.368-0400 m31201| 2015-07-09T13:56:18.367-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.370-0400 m31202| 2015-07-09T13:56:18.369-0400 I INDEX [repl writer worker 9] build index on: db12.coll12 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db12.coll12" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.371-0400 m31202| 2015-07-09T13:56:18.370-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.371-0400 m31201| 2015-07-09T13:56:18.371-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.375-0400 m31202| 2015-07-09T13:56:18.375-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.413-0400 m30999| 2015-07-09T13:56:18.413-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db12.coll12 using new epoch 559eb5c2ca4787b9985d1c07 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.520-0400 m30999| 2015-07-09T13:56:18.519-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db12.coll12: 0ms sequenceNumber: 58 version: 1|1||559eb5c2ca4787b9985d1c07 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.576-0400 m30999| 2015-07-09T13:56:18.575-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db12.coll12: 0ms sequenceNumber: 59 version: 1|1||559eb5c2ca4787b9985d1c07 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.577-0400 m31200| 2015-07-09T13:56:18.577-0400 I SHARDING [conn61] remotely refreshing metadata for db12.coll12 with requested shard version 1|1||559eb5c2ca4787b9985d1c07, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.579-0400 m31200| 2015-07-09T13:56:18.578-0400 I SHARDING [conn61] collection db12.coll12 was previously unsharded, new metadata loaded with shard version 1|1||559eb5c2ca4787b9985d1c07 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.579-0400 m31200| 2015-07-09T13:56:18.578-0400 I SHARDING [conn61] collection version was loaded at version 1|1||559eb5c2ca4787b9985d1c07, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.579-0400 m30999| 2015-07-09T13:56:18.579-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:18.579-0400-559eb5c2ca4787b9985d1c08", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464578579), what: "shardCollection", ns: "db12.coll12", details: { version: "1|1||559eb5c2ca4787b9985d1c07" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.634-0400 m30999| 2015-07-09T13:56:18.633-0400 I SHARDING [conn1] distributed lock 'db12.coll12/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.635-0400 m30999| 2015-07-09T13:56:18.634-0400 I SHARDING [conn1] moving chunk ns: db12.coll12 moving ( ns: db12.coll12, shard: test-rs1, lastmod: 1|0||000000000000000000000000, min: { _id: MinKey }, max: { _id: 0 }) test-rs1 -> test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.635-0400 m31200| 2015-07-09T13:56:18.635-0400 I SHARDING [conn64] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.636-0400 m31200| 2015-07-09T13:56:18.636-0400 I NETWORK [conn64] starting new replica set monitor for replica set test-rs1 with seeds [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.636-0400 m31200| 2015-07-09T13:56:18.636-0400 I NETWORK [conn64] bs-osx108-8:31200 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.636-0400 m31200| 2015-07-09T13:56:18.636-0400 I NETWORK [conn64] , [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.637-0400 m31200| 2015-07-09T13:56:18.636-0400 I NETWORK [conn64] bs-osx108-8:31201 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.637-0400 m31200| 2015-07-09T13:56:18.636-0400 I NETWORK [conn64] , [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.637-0400 m31200| 2015-07-09T13:56:18.636-0400 I NETWORK [conn64] bs-osx108-8:31202 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.637-0400 m31200| 2015-07-09T13:56:18.636-0400 I SHARDING [conn64] received moveChunk request: { moveChunk: "db12.coll12", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", to: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", fromShard: "test-rs1", toShard: "test-rs0", min: { _id: MinKey }, max: { _id: 0 }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb5c2ca4787b9985d1c07') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.640-0400 m31200| 2015-07-09T13:56:18.640-0400 I SHARDING [conn64] distributed lock 'db12.coll12/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb5c2d5a107a5b9c0daae [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.640-0400 m31200| 2015-07-09T13:56:18.640-0400 I SHARDING [conn64] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:18.640-0400-559eb5c2d5a107a5b9c0daaf", server: "bs-osx108-8", clientAddr: "127.0.0.1:62863", time: new Date(1436464578640), what: "moveChunk.start", ns: "db12.coll12", details: { min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs1", to: "test-rs0" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.694-0400 m31200| 2015-07-09T13:56:18.693-0400 I SHARDING [conn64] remotely refreshing metadata for db12.coll12 based on current shard version 1|1||559eb5c2ca4787b9985d1c07, current metadata version is 1|1||559eb5c2ca4787b9985d1c07 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.695-0400 m31200| 2015-07-09T13:56:18.695-0400 I SHARDING [conn64] metadata of collection db12.coll12 already up to date (shard version : 1|1||559eb5c2ca4787b9985d1c07, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.696-0400 m31200| 2015-07-09T13:56:18.695-0400 I SHARDING [conn64] moveChunk request accepted at version 1|1||559eb5c2ca4787b9985d1c07 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.696-0400 m31200| 2015-07-09T13:56:18.696-0400 I SHARDING [conn64] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.696-0400 m31100| 2015-07-09T13:56:18.696-0400 I SHARDING [conn19] remotely refreshing metadata for db12.coll12, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.698-0400 m31100| 2015-07-09T13:56:18.697-0400 I SHARDING [conn19] collection db12.coll12 was previously unsharded, new metadata loaded with shard version 0|0||559eb5c2ca4787b9985d1c07 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.698-0400 m31100| 2015-07-09T13:56:18.698-0400 I SHARDING [conn19] collection version was loaded at version 1|1||559eb5c2ca4787b9985d1c07, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.698-0400 m31100| 2015-07-09T13:56:18.698-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: MinKey } -> { _id: 0 } for collection db12.coll12 from test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202 at epoch 559eb5c2ca4787b9985d1c07 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.700-0400 m31200| 2015-07-09T13:56:18.700-0400 I SHARDING [conn64] moveChunk data transfer progress: { active: true, ns: "db12.coll12", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.704-0400 m31200| 2015-07-09T13:56:18.703-0400 I SHARDING [conn64] moveChunk data transfer progress: { active: true, ns: "db12.coll12", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.709-0400 m31200| 2015-07-09T13:56:18.708-0400 I SHARDING [conn64] moveChunk data transfer progress: { active: true, ns: "db12.coll12", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.711-0400 m31100| 2015-07-09T13:56:18.711-0400 I INDEX [migrateThread] build index on: db12.coll12 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db12.coll12" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.711-0400 m31100| 2015-07-09T13:56:18.711-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.719-0400 m31200| 2015-07-09T13:56:18.718-0400 I SHARDING [conn64] moveChunk data transfer progress: { active: true, ns: "db12.coll12", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.720-0400 m31100| 2015-07-09T13:56:18.719-0400 I INDEX [migrateThread] build index on: db12.coll12 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db12.coll12" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.720-0400 m31100| 2015-07-09T13:56:18.720-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.732-0400 m31100| 2015-07-09T13:56:18.731-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.734-0400 m31100| 2015-07-09T13:56:18.732-0400 I SHARDING [migrateThread] Deleter starting delete for: db12.coll12 from { _id: MinKey } -> { _id: 0 }, with opId: 20644 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.734-0400 m31100| 2015-07-09T13:56:18.733-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db12.coll12 from { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.736-0400 m31200| 2015-07-09T13:56:18.736-0400 I SHARDING [conn64] moveChunk data transfer progress: { active: true, ns: "db12.coll12", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.740-0400 m31101| 2015-07-09T13:56:18.740-0400 I INDEX [repl writer worker 3] build index on: db12.coll12 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db12.coll12" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.741-0400 m31101| 2015-07-09T13:56:18.740-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.748-0400 m31102| 2015-07-09T13:56:18.746-0400 I INDEX [repl writer worker 7] build index on: db12.coll12 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db12.coll12" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.749-0400 m31102| 2015-07-09T13:56:18.746-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.749-0400 m31101| 2015-07-09T13:56:18.747-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.749-0400 m31100| 2015-07-09T13:56:18.748-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.749-0400 m31100| 2015-07-09T13:56:18.749-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db12.coll12' { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.751-0400 m31102| 2015-07-09T13:56:18.751-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.770-0400 m31200| 2015-07-09T13:56:18.769-0400 I SHARDING [conn64] moveChunk data transfer progress: { active: true, ns: "db12.coll12", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.770-0400 m31200| 2015-07-09T13:56:18.769-0400 I SHARDING [conn64] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.770-0400 m31200| 2015-07-09T13:56:18.770-0400 I SHARDING [conn64] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.770-0400 m31200| 2015-07-09T13:56:18.770-0400 I SHARDING [conn64] moveChunk setting version to: 2|0||559eb5c2ca4787b9985d1c07 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.772-0400 m31100| 2015-07-09T13:56:18.771-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62868 #75 (69 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.785-0400 m31100| 2015-07-09T13:56:18.784-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db12.coll12' { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.785-0400 m31100| 2015-07-09T13:56:18.784-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:18.784-0400-559eb5c2792e00bb6727491e", server: "bs-osx108-8", clientAddr: "", time: new Date(1436464578784), what: "moveChunk.to", ns: "db12.coll12", details: { min: { _id: MinKey }, max: { _id: 0 }, step 1 of 5: 34, step 2 of 5: 15, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 35, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.838-0400 m31200| 2015-07-09T13:56:18.837-0400 I SHARDING [conn64] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db12.coll12", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.838-0400 m31200| 2015-07-09T13:56:18.838-0400 I SHARDING [conn64] moveChunk updating self version to: 2|1||559eb5c2ca4787b9985d1c07 through { _id: 0 } -> { _id: MaxKey } for collection 'db12.coll12' [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.840-0400 m31200| 2015-07-09T13:56:18.839-0400 I SHARDING [conn64] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:18.839-0400-559eb5c2d5a107a5b9c0dab0", server: "bs-osx108-8", clientAddr: "127.0.0.1:62863", time: new Date(1436464578839), what: "moveChunk.commit", ns: "db12.coll12", details: { min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs1", to: "test-rs0", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.893-0400 m31200| 2015-07-09T13:56:18.893-0400 I SHARDING [conn64] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.894-0400 m31200| 2015-07-09T13:56:18.893-0400 I SHARDING [conn64] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.894-0400 m31200| 2015-07-09T13:56:18.893-0400 I SHARDING [conn64] Deleter starting delete for: db12.coll12 from { _id: MinKey } -> { _id: 0 }, with opId: 19349 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.894-0400 m31200| 2015-07-09T13:56:18.893-0400 I SHARDING [conn64] rangeDeleter deleted 0 documents for db12.coll12 from { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.894-0400 m31200| 2015-07-09T13:56:18.893-0400 I SHARDING [conn64] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.895-0400 m31200| 2015-07-09T13:56:18.894-0400 I SHARDING [conn64] distributed lock 'db12.coll12/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.895-0400 m31200| 2015-07-09T13:56:18.895-0400 I SHARDING [conn64] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:18.895-0400-559eb5c2d5a107a5b9c0dab1", server: "bs-osx108-8", clientAddr: "127.0.0.1:62863", time: new Date(1436464578895), what: "moveChunk.from", ns: "db12.coll12", details: { min: { _id: MinKey }, max: { _id: 0 }, step 1 of 6: 0, step 2 of 6: 59, step 3 of 6: 2, step 4 of 6: 71, step 5 of 6: 123, step 6 of 6: 0, to: "test-rs0", from: "test-rs1", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.949-0400 m31200| 2015-07-09T13:56:18.948-0400 I COMMAND [conn64] command db12.coll12 command: moveChunk { moveChunk: "db12.coll12", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", to: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", fromShard: "test-rs1", toShard: "test-rs0", min: { _id: MinKey }, max: { _id: 0 }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb5c2ca4787b9985d1c07') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 312ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.950-0400 m30999| 2015-07-09T13:56:18.950-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db12.coll12: 0ms sequenceNumber: 60 version: 2|1||559eb5c2ca4787b9985d1c07 based on: 1|1||559eb5c2ca4787b9985d1c07 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.952-0400 m31100| 2015-07-09T13:56:18.951-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db12.coll12", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c2ca4787b9985d1c07') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.955-0400 m31100| 2015-07-09T13:56:18.955-0400 I SHARDING [conn15] distributed lock 'db12.coll12/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb5c2792e00bb6727491f [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.955-0400 m31100| 2015-07-09T13:56:18.955-0400 I SHARDING [conn15] remotely refreshing metadata for db12.coll12 based on current shard version 0|0||559eb5c2ca4787b9985d1c07, current metadata version is 1|1||559eb5c2ca4787b9985d1c07 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.957-0400 m31100| 2015-07-09T13:56:18.956-0400 I SHARDING [conn15] updating metadata for db12.coll12 from shard version 0|0||559eb5c2ca4787b9985d1c07 to shard version 2|0||559eb5c2ca4787b9985d1c07 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.957-0400 m31100| 2015-07-09T13:56:18.956-0400 I SHARDING [conn15] collection version was loaded at version 2|1||559eb5c2ca4787b9985d1c07, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.957-0400 m31100| 2015-07-09T13:56:18.956-0400 I SHARDING [conn15] splitChunk accepted at version 2|0||559eb5c2ca4787b9985d1c07 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:18.958-0400 m31100| 2015-07-09T13:56:18.957-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:18.957-0400-559eb5c2792e00bb67274920", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464578957), what: "split", ns: "db12.coll12", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559eb5c2ca4787b9985d1c07') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559eb5c2ca4787b9985d1c07') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:19.012-0400 m31100| 2015-07-09T13:56:19.011-0400 I SHARDING [conn15] distributed lock 'db12.coll12/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:19.014-0400 m30999| 2015-07-09T13:56:19.014-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db12.coll12: 0ms sequenceNumber: 61 version: 2|3||559eb5c2ca4787b9985d1c07 based on: 2|1||559eb5c2ca4787b9985d1c07 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:19.015-0400 m31200| 2015-07-09T13:56:19.014-0400 I SHARDING [conn64] received splitChunk request: { splitChunk: "db12.coll12", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c2ca4787b9985d1c07') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:19.018-0400 m31200| 2015-07-09T13:56:19.018-0400 I SHARDING [conn64] distributed lock 'db12.coll12/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb5c3d5a107a5b9c0dab2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:19.019-0400 m31200| 2015-07-09T13:56:19.018-0400 I SHARDING [conn64] remotely refreshing metadata for db12.coll12 based on current shard version 2|0||559eb5c2ca4787b9985d1c07, current metadata version is 2|0||559eb5c2ca4787b9985d1c07 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:19.020-0400 m31200| 2015-07-09T13:56:19.020-0400 I SHARDING [conn64] updating metadata for db12.coll12 from shard version 2|0||559eb5c2ca4787b9985d1c07 to shard version 2|1||559eb5c2ca4787b9985d1c07 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:19.021-0400 m31200| 2015-07-09T13:56:19.020-0400 I SHARDING [conn64] collection version was loaded at version 2|3||559eb5c2ca4787b9985d1c07, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:19.021-0400 m31200| 2015-07-09T13:56:19.020-0400 I SHARDING [conn64] splitChunk accepted at version 2|1||559eb5c2ca4787b9985d1c07 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:19.022-0400 m31200| 2015-07-09T13:56:19.021-0400 I SHARDING [conn64] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:19.021-0400-559eb5c3d5a107a5b9c0dab3", server: "bs-osx108-8", clientAddr: "127.0.0.1:62863", time: new Date(1436464579021), what: "split", ns: "db12.coll12", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559eb5c2ca4787b9985d1c07') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559eb5c2ca4787b9985d1c07') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:19.076-0400 m31200| 2015-07-09T13:56:19.075-0400 I SHARDING [conn64] distributed lock 'db12.coll12/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:19.078-0400 m30999| 2015-07-09T13:56:19.077-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db12.coll12: 0ms sequenceNumber: 62 version: 2|5||559eb5c2ca4787b9985d1c07 based on: 2|3||559eb5c2ca4787b9985d1c07 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:19.079-0400 Using 10 threads (requested 10) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:19.198-0400 m30999| 2015-07-09T13:56:19.198-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62870 #75 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:19.198-0400 m30998| 2015-07-09T13:56:19.197-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62869 #74 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:19.207-0400 m30999| 2015-07-09T13:56:19.207-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62872 #76 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:19.207-0400 m30998| 2015-07-09T13:56:19.207-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62871 #75 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:19.215-0400 m30998| 2015-07-09T13:56:19.214-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62873 #76 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:19.215-0400 m30999| 2015-07-09T13:56:19.214-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62875 #77 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:19.215-0400 m30998| 2015-07-09T13:56:19.214-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62874 #77 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:19.217-0400 m30999| 2015-07-09T13:56:19.217-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62876 #78 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:19.217-0400 m30999| 2015-07-09T13:56:19.217-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62877 #79 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:19.219-0400 m30998| 2015-07-09T13:56:19.219-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62878 #78 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:19.224-0400 setting random seed: 4807676509954 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:19.224-0400 setting random seed: 948031661100 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:19.224-0400 setting random seed: 6978402961976 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:19.225-0400 setting random seed: 496939364820 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:19.226-0400 setting random seed: 2059908430092 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:19.227-0400 setting random seed: 5224665198475 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:19.228-0400 setting random seed: 6018636557273 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:19.228-0400 setting random seed: 9654001761227 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:19.230-0400 setting random seed: 3568135537207 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:19.232-0400 setting random seed: 881000678054 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:19.240-0400 m30998| 2015-07-09T13:56:19.238-0400 I SHARDING [conn74] ChunkManager: time to load chunks for db12.coll12: 0ms sequenceNumber: 14 version: 2|5||559eb5c2ca4787b9985d1c07 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.233-0400 m30999| 2015-07-09T13:56:20.233-0400 I NETWORK [conn75] end connection 127.0.0.1:62870 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.259-0400 m30998| 2015-07-09T13:56:20.259-0400 I NETWORK [conn76] end connection 127.0.0.1:62873 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.286-0400 m30999| 2015-07-09T13:56:20.285-0400 I NETWORK [conn79] end connection 127.0.0.1:62877 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.297-0400 m30998| 2015-07-09T13:56:20.296-0400 I NETWORK [conn74] end connection 127.0.0.1:62869 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.313-0400 m30999| 2015-07-09T13:56:20.313-0400 I NETWORK [conn76] end connection 127.0.0.1:62872 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.322-0400 m30999| 2015-07-09T13:56:20.322-0400 I NETWORK [conn78] end connection 127.0.0.1:62876 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.339-0400 m30999| 2015-07-09T13:56:20.339-0400 I NETWORK [conn77] end connection 127.0.0.1:62875 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.356-0400 m30998| 2015-07-09T13:56:20.356-0400 I NETWORK [conn78] end connection 127.0.0.1:62878 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.363-0400 m30998| 2015-07-09T13:56:20.363-0400 I NETWORK [conn77] end connection 127.0.0.1:62874 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.377-0400 m30998| 2015-07-09T13:56:20.377-0400 I NETWORK [conn75] end connection 127.0.0.1:62871 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.400-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.400-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.400-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.400-0400 jstests/concurrency/fsm_workloads/remove_multiple_documents.js: Workload completed in 1320 ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.400-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.400-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.400-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.400-0400 m30999| 2015-07-09T13:56:20.400-0400 I COMMAND [conn1] DROP: db12.coll12 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.401-0400 m30999| 2015-07-09T13:56:20.400-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:20.400-0400-559eb5c4ca4787b9985d1c09", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464580400), what: "dropCollection.start", ns: "db12.coll12", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.457-0400 m30999| 2015-07-09T13:56:20.456-0400 I SHARDING [conn1] distributed lock 'db12.coll12/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5c4ca4787b9985d1c0a [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.458-0400 m31100| 2015-07-09T13:56:20.457-0400 I COMMAND [conn15] CMD: drop db12.coll12 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.460-0400 m31200| 2015-07-09T13:56:20.459-0400 I COMMAND [conn63] CMD: drop db12.coll12 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.462-0400 m31102| 2015-07-09T13:56:20.462-0400 I COMMAND [repl writer worker 15] CMD: drop db12.coll12 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.462-0400 m31101| 2015-07-09T13:56:20.462-0400 I COMMAND [repl writer worker 15] CMD: drop db12.coll12 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.464-0400 m31201| 2015-07-09T13:56:20.464-0400 I COMMAND [repl writer worker 14] CMD: drop db12.coll12 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.464-0400 m31202| 2015-07-09T13:56:20.463-0400 I COMMAND [repl writer worker 10] CMD: drop db12.coll12 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.516-0400 m31100| 2015-07-09T13:56:20.515-0400 I SHARDING [conn15] remotely refreshing metadata for db12.coll12 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559eb5c2ca4787b9985d1c07, current metadata version is 2|3||559eb5c2ca4787b9985d1c07 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.517-0400 m31100| 2015-07-09T13:56:20.517-0400 W SHARDING [conn15] no chunks found when reloading db12.coll12, previous version was 0|0||559eb5c2ca4787b9985d1c07, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.517-0400 m31100| 2015-07-09T13:56:20.517-0400 I SHARDING [conn15] dropping metadata for db12.coll12 at shard version 2|3||559eb5c2ca4787b9985d1c07, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.519-0400 m31200| 2015-07-09T13:56:20.518-0400 I SHARDING [conn63] remotely refreshing metadata for db12.coll12 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559eb5c2ca4787b9985d1c07, current metadata version is 2|5||559eb5c2ca4787b9985d1c07 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.520-0400 m31200| 2015-07-09T13:56:20.520-0400 W SHARDING [conn63] no chunks found when reloading db12.coll12, previous version was 0|0||559eb5c2ca4787b9985d1c07, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.520-0400 m31200| 2015-07-09T13:56:20.520-0400 I SHARDING [conn63] dropping metadata for db12.coll12 at shard version 2|5||559eb5c2ca4787b9985d1c07, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.521-0400 m30999| 2015-07-09T13:56:20.521-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:20.521-0400-559eb5c4ca4787b9985d1c0b", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464580521), what: "dropCollection", ns: "db12.coll12", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.575-0400 m30999| 2015-07-09T13:56:20.574-0400 I SHARDING [conn1] distributed lock 'db12.coll12/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.630-0400 m30999| 2015-07-09T13:56:20.630-0400 I COMMAND [conn1] DROP DATABASE: db12 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.631-0400 m30999| 2015-07-09T13:56:20.630-0400 I SHARDING [conn1] DBConfig::dropDatabase: db12 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.631-0400 m30999| 2015-07-09T13:56:20.630-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:20.630-0400-559eb5c4ca4787b9985d1c0c", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464580630), what: "dropDatabase.start", ns: "db12", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.736-0400 m30999| 2015-07-09T13:56:20.735-0400 I SHARDING [conn1] DBConfig::dropDatabase: db12 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.736-0400 m31200| 2015-07-09T13:56:20.736-0400 I COMMAND [conn66] dropDatabase db12 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.736-0400 m31200| 2015-07-09T13:56:20.736-0400 I COMMAND [conn66] dropDatabase db12 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.737-0400 m30999| 2015-07-09T13:56:20.736-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:20.736-0400-559eb5c4ca4787b9985d1c0d", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464580736), what: "dropDatabase", ns: "db12", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.737-0400 m31202| 2015-07-09T13:56:20.737-0400 I COMMAND [repl writer worker 0] dropDatabase db12 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.737-0400 m31202| 2015-07-09T13:56:20.737-0400 I COMMAND [repl writer worker 0] dropDatabase db12 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.738-0400 m31201| 2015-07-09T13:56:20.737-0400 I COMMAND [repl writer worker 7] dropDatabase db12 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.738-0400 m31201| 2015-07-09T13:56:20.737-0400 I COMMAND [repl writer worker 7] dropDatabase db12 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.825-0400 m31100| 2015-07-09T13:56:20.825-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.829-0400 m31101| 2015-07-09T13:56:20.828-0400 I COMMAND [repl writer worker 1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.829-0400 m31102| 2015-07-09T13:56:20.829-0400 I COMMAND [repl writer worker 1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.860-0400 m31200| 2015-07-09T13:56:20.860-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.863-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.863-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.863-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.863-0400 jstests/concurrency/fsm_workloads/update_simple_noindex.js [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.863-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.863-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.863-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.864-0400 m31201| 2015-07-09T13:56:20.863-0400 I COMMAND [repl writer worker 0] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.864-0400 m31202| 2015-07-09T13:56:20.863-0400 I COMMAND [repl writer worker 4] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.868-0400 m30999| 2015-07-09T13:56:20.868-0400 I SHARDING [conn1] distributed lock 'db13/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5c4ca4787b9985d1c0e [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.872-0400 m30999| 2015-07-09T13:56:20.871-0400 I SHARDING [conn1] Placing [db13] on: test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.872-0400 m30999| 2015-07-09T13:56:20.871-0400 I SHARDING [conn1] Enabling sharding for database [db13] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.926-0400 m30999| 2015-07-09T13:56:20.925-0400 I SHARDING [conn1] distributed lock 'db13/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.946-0400 m31200| 2015-07-09T13:56:20.946-0400 I INDEX [conn23] build index on: db13.coll13 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db13.coll13" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.947-0400 m31200| 2015-07-09T13:56:20.946-0400 I INDEX [conn23] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.956-0400 m31200| 2015-07-09T13:56:20.955-0400 I INDEX [conn23] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.957-0400 m30999| 2015-07-09T13:56:20.956-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db13.coll13", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.961-0400 m30999| 2015-07-09T13:56:20.959-0400 I SHARDING [conn1] distributed lock 'db13.coll13/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5c4ca4787b9985d1c0f [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.961-0400 m30999| 2015-07-09T13:56:20.960-0400 I SHARDING [conn1] enable sharding on: db13.coll13 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.962-0400 m30999| 2015-07-09T13:56:20.960-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:20.960-0400-559eb5c4ca4787b9985d1c10", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464580960), what: "shardCollection.start", ns: "db13.coll13", details: { shardKey: { _id: "hashed" }, collection: "db13.coll13", primary: "test-rs1:test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.968-0400 m31202| 2015-07-09T13:56:20.968-0400 I INDEX [repl writer worker 7] build index on: db13.coll13 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db13.coll13" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.968-0400 m31202| 2015-07-09T13:56:20.968-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.972-0400 m31201| 2015-07-09T13:56:20.972-0400 I INDEX [repl writer worker 2] build index on: db13.coll13 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db13.coll13" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.973-0400 m31201| 2015-07-09T13:56:20.972-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.974-0400 m31202| 2015-07-09T13:56:20.974-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:20.977-0400 m31201| 2015-07-09T13:56:20.977-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.014-0400 m30999| 2015-07-09T13:56:21.013-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db13.coll13 using new epoch 559eb5c5ca4787b9985d1c11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.121-0400 m30999| 2015-07-09T13:56:21.120-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db13.coll13: 0ms sequenceNumber: 63 version: 1|1||559eb5c5ca4787b9985d1c11 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.177-0400 m30999| 2015-07-09T13:56:21.176-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db13.coll13: 1ms sequenceNumber: 64 version: 1|1||559eb5c5ca4787b9985d1c11 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.178-0400 m31200| 2015-07-09T13:56:21.178-0400 I SHARDING [conn31] remotely refreshing metadata for db13.coll13 with requested shard version 1|1||559eb5c5ca4787b9985d1c11, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.180-0400 m31200| 2015-07-09T13:56:21.180-0400 I SHARDING [conn31] collection db13.coll13 was previously unsharded, new metadata loaded with shard version 1|1||559eb5c5ca4787b9985d1c11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.180-0400 m31200| 2015-07-09T13:56:21.180-0400 I SHARDING [conn31] collection version was loaded at version 1|1||559eb5c5ca4787b9985d1c11, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.181-0400 m30999| 2015-07-09T13:56:21.180-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:21.180-0400-559eb5c5ca4787b9985d1c12", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464581180), what: "shardCollection", ns: "db13.coll13", details: { version: "1|1||559eb5c5ca4787b9985d1c11" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.235-0400 m30999| 2015-07-09T13:56:21.234-0400 I SHARDING [conn1] distributed lock 'db13.coll13/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.236-0400 m30999| 2015-07-09T13:56:21.235-0400 I SHARDING [conn1] moving chunk ns: db13.coll13 moving ( ns: db13.coll13, shard: test-rs1, lastmod: 1|0||000000000000000000000000, min: { _id: MinKey }, max: { _id: 0 }) test-rs1 -> test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.236-0400 m31200| 2015-07-09T13:56:21.236-0400 I SHARDING [conn63] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.237-0400 m31200| 2015-07-09T13:56:21.237-0400 I SHARDING [conn63] received moveChunk request: { moveChunk: "db13.coll13", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", to: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", fromShard: "test-rs1", toShard: "test-rs0", min: { _id: MinKey }, max: { _id: 0 }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb5c5ca4787b9985d1c11') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.241-0400 m31200| 2015-07-09T13:56:21.240-0400 I SHARDING [conn63] distributed lock 'db13.coll13/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb5c5d5a107a5b9c0dab5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.241-0400 m31200| 2015-07-09T13:56:21.241-0400 I SHARDING [conn63] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:21.241-0400-559eb5c5d5a107a5b9c0dab6", server: "bs-osx108-8", clientAddr: "127.0.0.1:62862", time: new Date(1436464581241), what: "moveChunk.start", ns: "db13.coll13", details: { min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs1", to: "test-rs0" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.294-0400 m31200| 2015-07-09T13:56:21.294-0400 I SHARDING [conn63] remotely refreshing metadata for db13.coll13 based on current shard version 1|1||559eb5c5ca4787b9985d1c11, current metadata version is 1|1||559eb5c5ca4787b9985d1c11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.296-0400 m31200| 2015-07-09T13:56:21.295-0400 I SHARDING [conn63] metadata of collection db13.coll13 already up to date (shard version : 1|1||559eb5c5ca4787b9985d1c11, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.296-0400 m31200| 2015-07-09T13:56:21.295-0400 I SHARDING [conn63] moveChunk request accepted at version 1|1||559eb5c5ca4787b9985d1c11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.296-0400 m31200| 2015-07-09T13:56:21.296-0400 I SHARDING [conn63] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.297-0400 m31100| 2015-07-09T13:56:21.296-0400 I SHARDING [conn19] remotely refreshing metadata for db13.coll13, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.298-0400 m31100| 2015-07-09T13:56:21.298-0400 I SHARDING [conn19] collection db13.coll13 was previously unsharded, new metadata loaded with shard version 0|0||559eb5c5ca4787b9985d1c11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.298-0400 m31100| 2015-07-09T13:56:21.298-0400 I SHARDING [conn19] collection version was loaded at version 1|1||559eb5c5ca4787b9985d1c11, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.299-0400 m31100| 2015-07-09T13:56:21.298-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: MinKey } -> { _id: 0 } for collection db13.coll13 from test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202 at epoch 559eb5c5ca4787b9985d1c11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.301-0400 m31200| 2015-07-09T13:56:21.300-0400 I SHARDING [conn63] moveChunk data transfer progress: { active: true, ns: "db13.coll13", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.304-0400 m31200| 2015-07-09T13:56:21.303-0400 I SHARDING [conn63] moveChunk data transfer progress: { active: true, ns: "db13.coll13", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.309-0400 m31200| 2015-07-09T13:56:21.309-0400 I SHARDING [conn63] moveChunk data transfer progress: { active: true, ns: "db13.coll13", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.312-0400 m31100| 2015-07-09T13:56:21.312-0400 I INDEX [migrateThread] build index on: db13.coll13 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db13.coll13" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.313-0400 m31100| 2015-07-09T13:56:21.312-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.318-0400 m31200| 2015-07-09T13:56:21.318-0400 I SHARDING [conn63] moveChunk data transfer progress: { active: true, ns: "db13.coll13", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.321-0400 m31100| 2015-07-09T13:56:21.320-0400 I INDEX [migrateThread] build index on: db13.coll13 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db13.coll13" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.322-0400 m31100| 2015-07-09T13:56:21.321-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.332-0400 m31100| 2015-07-09T13:56:21.332-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.333-0400 m31100| 2015-07-09T13:56:21.333-0400 I SHARDING [migrateThread] Deleter starting delete for: db13.coll13 from { _id: MinKey } -> { _id: 0 }, with opId: 22690 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.334-0400 m31100| 2015-07-09T13:56:21.334-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db13.coll13 from { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.335-0400 m31200| 2015-07-09T13:56:21.335-0400 I SHARDING [conn63] moveChunk data transfer progress: { active: true, ns: "db13.coll13", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.340-0400 m31102| 2015-07-09T13:56:21.340-0400 I INDEX [repl writer worker 0] build index on: db13.coll13 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db13.coll13" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.340-0400 m31102| 2015-07-09T13:56:21.340-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.341-0400 m31101| 2015-07-09T13:56:21.340-0400 I INDEX [repl writer worker 2] build index on: db13.coll13 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db13.coll13" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.341-0400 m31101| 2015-07-09T13:56:21.340-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.346-0400 m31102| 2015-07-09T13:56:21.346-0400 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.348-0400 m31100| 2015-07-09T13:56:21.348-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.349-0400 m31100| 2015-07-09T13:56:21.348-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db13.coll13' { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.349-0400 m31101| 2015-07-09T13:56:21.348-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.369-0400 m31200| 2015-07-09T13:56:21.368-0400 I SHARDING [conn63] moveChunk data transfer progress: { active: true, ns: "db13.coll13", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.369-0400 m31200| 2015-07-09T13:56:21.368-0400 I SHARDING [conn63] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.369-0400 m31200| 2015-07-09T13:56:21.369-0400 I SHARDING [conn63] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.369-0400 m31200| 2015-07-09T13:56:21.369-0400 I SHARDING [conn63] moveChunk setting version to: 2|0||559eb5c5ca4787b9985d1c11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.372-0400 m31100| 2015-07-09T13:56:21.372-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db13.coll13' { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.373-0400 m31100| 2015-07-09T13:56:21.372-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:21.372-0400-559eb5c5792e00bb67274921", server: "bs-osx108-8", clientAddr: "", time: new Date(1436464581372), what: "moveChunk.to", ns: "db13.coll13", details: { min: { _id: MinKey }, max: { _id: 0 }, step 1 of 5: 34, step 2 of 5: 14, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 24, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.425-0400 m31200| 2015-07-09T13:56:21.425-0400 I SHARDING [conn63] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db13.coll13", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.425-0400 m31200| 2015-07-09T13:56:21.425-0400 I SHARDING [conn63] moveChunk updating self version to: 2|1||559eb5c5ca4787b9985d1c11 through { _id: 0 } -> { _id: MaxKey } for collection 'db13.coll13' [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.426-0400 m31200| 2015-07-09T13:56:21.426-0400 I SHARDING [conn63] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:21.426-0400-559eb5c5d5a107a5b9c0dab7", server: "bs-osx108-8", clientAddr: "127.0.0.1:62862", time: new Date(1436464581426), what: "moveChunk.commit", ns: "db13.coll13", details: { min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs1", to: "test-rs0", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.480-0400 m31200| 2015-07-09T13:56:21.479-0400 I SHARDING [conn63] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.480-0400 m31200| 2015-07-09T13:56:21.480-0400 I SHARDING [conn63] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.480-0400 m31200| 2015-07-09T13:56:21.480-0400 I SHARDING [conn63] Deleter starting delete for: db13.coll13 from { _id: MinKey } -> { _id: 0 }, with opId: 21466 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.481-0400 m31200| 2015-07-09T13:56:21.480-0400 I SHARDING [conn63] rangeDeleter deleted 0 documents for db13.coll13 from { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.481-0400 m31200| 2015-07-09T13:56:21.480-0400 I SHARDING [conn63] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.481-0400 m31200| 2015-07-09T13:56:21.481-0400 I SHARDING [conn63] distributed lock 'db13.coll13/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.482-0400 m31200| 2015-07-09T13:56:21.481-0400 I SHARDING [conn63] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:21.481-0400-559eb5c5d5a107a5b9c0dab8", server: "bs-osx108-8", clientAddr: "127.0.0.1:62862", time: new Date(1436464581481), what: "moveChunk.from", ns: "db13.coll13", details: { min: { _id: MinKey }, max: { _id: 0 }, step 1 of 6: 0, step 2 of 6: 58, step 3 of 6: 3, step 4 of 6: 69, step 5 of 6: 111, step 6 of 6: 0, to: "test-rs0", from: "test-rs1", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.534-0400 m31200| 2015-07-09T13:56:21.533-0400 I COMMAND [conn63] command db13.coll13 command: moveChunk { moveChunk: "db13.coll13", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", to: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", fromShard: "test-rs1", toShard: "test-rs0", min: { _id: MinKey }, max: { _id: 0 }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb5c5ca4787b9985d1c11') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 297ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.536-0400 m30999| 2015-07-09T13:56:21.535-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db13.coll13: 0ms sequenceNumber: 65 version: 2|1||559eb5c5ca4787b9985d1c11 based on: 1|1||559eb5c5ca4787b9985d1c11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.537-0400 m31100| 2015-07-09T13:56:21.537-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db13.coll13", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c5ca4787b9985d1c11') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.541-0400 m31100| 2015-07-09T13:56:21.541-0400 I SHARDING [conn15] distributed lock 'db13.coll13/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb5c5792e00bb67274922 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.541-0400 m31100| 2015-07-09T13:56:21.541-0400 I SHARDING [conn15] remotely refreshing metadata for db13.coll13 based on current shard version 0|0||559eb5c5ca4787b9985d1c11, current metadata version is 1|1||559eb5c5ca4787b9985d1c11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.542-0400 m31100| 2015-07-09T13:56:21.542-0400 I SHARDING [conn15] updating metadata for db13.coll13 from shard version 0|0||559eb5c5ca4787b9985d1c11 to shard version 2|0||559eb5c5ca4787b9985d1c11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.543-0400 m31100| 2015-07-09T13:56:21.542-0400 I SHARDING [conn15] collection version was loaded at version 2|1||559eb5c5ca4787b9985d1c11, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.543-0400 m31100| 2015-07-09T13:56:21.542-0400 I SHARDING [conn15] splitChunk accepted at version 2|0||559eb5c5ca4787b9985d1c11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.544-0400 m31100| 2015-07-09T13:56:21.543-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:21.543-0400-559eb5c5792e00bb67274923", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464581543), what: "split", ns: "db13.coll13", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559eb5c5ca4787b9985d1c11') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559eb5c5ca4787b9985d1c11') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.597-0400 m31100| 2015-07-09T13:56:21.597-0400 I SHARDING [conn15] distributed lock 'db13.coll13/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.599-0400 m30999| 2015-07-09T13:56:21.599-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db13.coll13: 0ms sequenceNumber: 66 version: 2|3||559eb5c5ca4787b9985d1c11 based on: 2|1||559eb5c5ca4787b9985d1c11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.600-0400 m31200| 2015-07-09T13:56:21.600-0400 I SHARDING [conn63] received splitChunk request: { splitChunk: "db13.coll13", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c5ca4787b9985d1c11') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.604-0400 m31200| 2015-07-09T13:56:21.604-0400 I SHARDING [conn63] distributed lock 'db13.coll13/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb5c5d5a107a5b9c0dab9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.604-0400 m31200| 2015-07-09T13:56:21.604-0400 I SHARDING [conn63] remotely refreshing metadata for db13.coll13 based on current shard version 2|0||559eb5c5ca4787b9985d1c11, current metadata version is 2|0||559eb5c5ca4787b9985d1c11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.605-0400 m31200| 2015-07-09T13:56:21.605-0400 I SHARDING [conn63] updating metadata for db13.coll13 from shard version 2|0||559eb5c5ca4787b9985d1c11 to shard version 2|1||559eb5c5ca4787b9985d1c11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.606-0400 m31200| 2015-07-09T13:56:21.605-0400 I SHARDING [conn63] collection version was loaded at version 2|3||559eb5c5ca4787b9985d1c11, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.606-0400 m31200| 2015-07-09T13:56:21.605-0400 I SHARDING [conn63] splitChunk accepted at version 2|1||559eb5c5ca4787b9985d1c11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.607-0400 m31200| 2015-07-09T13:56:21.607-0400 I SHARDING [conn63] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:21.606-0400-559eb5c5d5a107a5b9c0daba", server: "bs-osx108-8", clientAddr: "127.0.0.1:62862", time: new Date(1436464581607), what: "split", ns: "db13.coll13", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559eb5c5ca4787b9985d1c11') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559eb5c5ca4787b9985d1c11') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.661-0400 m31200| 2015-07-09T13:56:21.661-0400 I SHARDING [conn63] distributed lock 'db13.coll13/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.663-0400 m30999| 2015-07-09T13:56:21.663-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db13.coll13: 0ms sequenceNumber: 67 version: 2|5||559eb5c5ca4787b9985d1c11 based on: 2|3||559eb5c5ca4787b9985d1c11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.666-0400 m30999| 2015-07-09T13:56:21.665-0400 I SHARDING [conn1] sharded connection to test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.666-0400 m30999| 2015-07-09T13:56:21.665-0400 I SHARDING [conn1] retrying command: { listIndexes: "coll13" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.666-0400 m31200| 2015-07-09T13:56:21.665-0400 I NETWORK [conn31] end connection 127.0.0.1:62687 (61 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.678-0400 m31100| 2015-07-09T13:56:21.677-0400 I INDEX [conn51] build index on: db13.coll13 properties: { v: 1, key: { value: 1.0 }, name: "value_1", ns: "db13.coll13" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.678-0400 m31200| 2015-07-09T13:56:21.677-0400 I INDEX [conn61] build index on: db13.coll13 properties: { v: 1, key: { value: 1.0 }, name: "value_1", ns: "db13.coll13" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.679-0400 m31100| 2015-07-09T13:56:21.678-0400 I INDEX [conn51] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.679-0400 m31200| 2015-07-09T13:56:21.678-0400 I INDEX [conn61] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.681-0400 m31100| 2015-07-09T13:56:21.680-0400 I INDEX [conn51] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.684-0400 m31200| 2015-07-09T13:56:21.683-0400 I INDEX [conn61] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.686-0400 m31101| 2015-07-09T13:56:21.685-0400 I INDEX [repl writer worker 0] build index on: db13.coll13 properties: { v: 1, key: { value: 1.0 }, name: "value_1", ns: "db13.coll13" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.686-0400 m31101| 2015-07-09T13:56:21.685-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.693-0400 m31101| 2015-07-09T13:56:21.692-0400 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.694-0400 m31201| 2015-07-09T13:56:21.693-0400 I INDEX [repl writer worker 1] build index on: db13.coll13 properties: { v: 1, key: { value: 1.0 }, name: "value_1", ns: "db13.coll13" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.694-0400 m31201| 2015-07-09T13:56:21.693-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.699-0400 m31200| 2015-07-09T13:56:21.698-0400 I COMMAND [conn63] CMD: dropIndexes db13.coll13 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.699-0400 m31100| 2015-07-09T13:56:21.698-0400 I COMMAND [conn15] CMD: dropIndexes db13.coll13 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.700-0400 Using 20 threads (requested 20) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.703-0400 m31202| 2015-07-09T13:56:21.702-0400 I INDEX [repl writer worker 11] build index on: db13.coll13 properties: { v: 1, key: { value: 1.0 }, name: "value_1", ns: "db13.coll13" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.703-0400 m31202| 2015-07-09T13:56:21.702-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.740-0400 m31102| 2015-07-09T13:56:21.705-0400 I INDEX [repl writer worker 5] build index on: db13.coll13 properties: { v: 1, key: { value: 1.0 }, name: "value_1", ns: "db13.coll13" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.740-0400 m31102| 2015-07-09T13:56:21.705-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.786-0400 m31101| 2015-07-09T13:56:21.762-0400 I COMMAND [repl writer worker 4] CMD: dropIndexes db13.coll13 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.786-0400 m31201| 2015-07-09T13:56:21.771-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.831-0400 m30998| 2015-07-09T13:56:21.821-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62879 #79 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.831-0400 m31102| 2015-07-09T13:56:21.821-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.843-0400 m31201| 2015-07-09T13:56:21.839-0400 I COMMAND [repl writer worker 6] CMD: dropIndexes db13.coll13 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.888-0400 m31102| 2015-07-09T13:56:21.888-0400 I COMMAND [repl writer worker 3] CMD: dropIndexes db13.coll13 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.893-0400 m31202| 2015-07-09T13:56:21.891-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.893-0400 m30998| 2015-07-09T13:56:21.892-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62880 #80 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.935-0400 m30998| 2015-07-09T13:56:21.934-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62881 #81 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.938-0400 m31202| 2015-07-09T13:56:21.938-0400 I COMMAND [repl writer worker 3] CMD: dropIndexes db13.coll13 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.939-0400 m30998| 2015-07-09T13:56:21.938-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62882 #82 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.939-0400 m30999| 2015-07-09T13:56:21.939-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62883 #80 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.942-0400 m30999| 2015-07-09T13:56:21.942-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62884 #81 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.946-0400 m30998| 2015-07-09T13:56:21.946-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62885 #83 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.946-0400 m30999| 2015-07-09T13:56:21.946-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62886 #82 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.956-0400 m30998| 2015-07-09T13:56:21.956-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62888 #84 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.956-0400 m30999| 2015-07-09T13:56:21.956-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62887 #83 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.957-0400 m30999| 2015-07-09T13:56:21.957-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62889 #84 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.958-0400 m30999| 2015-07-09T13:56:21.957-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62891 #85 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.959-0400 m30998| 2015-07-09T13:56:21.959-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62890 #85 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.959-0400 m30998| 2015-07-09T13:56:21.959-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62892 #86 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.960-0400 m30999| 2015-07-09T13:56:21.960-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62894 #86 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.963-0400 m30998| 2015-07-09T13:56:21.963-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62893 #87 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.964-0400 m30999| 2015-07-09T13:56:21.963-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62895 #87 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.964-0400 m30998| 2015-07-09T13:56:21.964-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62896 #88 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.964-0400 m30999| 2015-07-09T13:56:21.964-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62897 #88 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.966-0400 m30999| 2015-07-09T13:56:21.966-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62898 #89 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.985-0400 setting random seed: 373323033563 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.985-0400 setting random seed: 8560672164894 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.985-0400 setting random seed: 6673919237218 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.986-0400 setting random seed: 4465203420259 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.986-0400 setting random seed: 151018188335 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.986-0400 setting random seed: 9893108224496 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.986-0400 setting random seed: 9217142378911 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.988-0400 setting random seed: 9763845801353 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.988-0400 setting random seed: 3470006533898 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.990-0400 setting random seed: 7946954551152 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.991-0400 setting random seed: 4211202077567 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.992-0400 m30998| 2015-07-09T13:56:21.991-0400 I SHARDING [conn79] ChunkManager: time to load chunks for db13.coll13: 0ms sequenceNumber: 15 version: 2|5||559eb5c5ca4787b9985d1c11 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.996-0400 setting random seed: 5219236300326 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.996-0400 setting random seed: 956459762528 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.997-0400 setting random seed: 8162692524492 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.999-0400 setting random seed: 6912473784759 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:21.999-0400 setting random seed: 399842387996 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.000-0400 setting random seed: 9704540437087 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.005-0400 setting random seed: 4440228026360 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.010-0400 setting random seed: 2512034499086 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.017-0400 setting random seed: 2619140422903 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.032-0400 m31200| 2015-07-09T13:56:22.030-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62899 #67 (62 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.065-0400 m31200| 2015-07-09T13:56:22.065-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62900 #68 (63 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.102-0400 m31200| 2015-07-09T13:56:22.102-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62901 #69 (64 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.111-0400 m31200| 2015-07-09T13:56:22.111-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62902 #70 (65 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.114-0400 m31200| 2015-07-09T13:56:22.113-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62903 #71 (66 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.120-0400 m31200| 2015-07-09T13:56:22.120-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62904 #72 (67 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.170-0400 m30999| 2015-07-09T13:56:22.170-0400 I NETWORK [conn81] end connection 127.0.0.1:62884 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.204-0400 m30998| 2015-07-09T13:56:22.203-0400 I NETWORK [conn84] end connection 127.0.0.1:62888 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.213-0400 m30999| 2015-07-09T13:56:22.212-0400 I NETWORK [conn88] end connection 127.0.0.1:62897 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.221-0400 m30999| 2015-07-09T13:56:22.221-0400 I NETWORK [conn83] end connection 127.0.0.1:62887 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.233-0400 m30998| 2015-07-09T13:56:22.227-0400 I NETWORK [conn83] end connection 127.0.0.1:62885 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.250-0400 m30998| 2015-07-09T13:56:22.249-0400 I NETWORK [conn80] end connection 127.0.0.1:62880 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.251-0400 m30999| 2015-07-09T13:56:22.250-0400 I NETWORK [conn86] end connection 127.0.0.1:62894 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.255-0400 m30998| 2015-07-09T13:56:22.255-0400 I NETWORK [conn86] end connection 127.0.0.1:62892 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.259-0400 m30998| 2015-07-09T13:56:22.258-0400 I NETWORK [conn87] end connection 127.0.0.1:62893 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.268-0400 m30998| 2015-07-09T13:56:22.262-0400 I NETWORK [conn82] end connection 127.0.0.1:62882 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.287-0400 m30999| 2015-07-09T13:56:22.285-0400 I NETWORK [conn85] end connection 127.0.0.1:62891 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.301-0400 m30998| 2015-07-09T13:56:22.298-0400 I NETWORK [conn79] end connection 127.0.0.1:62879 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.305-0400 m30998| 2015-07-09T13:56:22.304-0400 I NETWORK [conn81] end connection 127.0.0.1:62881 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.308-0400 m30999| 2015-07-09T13:56:22.308-0400 I NETWORK [conn87] end connection 127.0.0.1:62895 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.312-0400 m30999| 2015-07-09T13:56:22.312-0400 I NETWORK [conn80] end connection 127.0.0.1:62883 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.344-0400 m30999| 2015-07-09T13:56:22.344-0400 I NETWORK [conn89] end connection 127.0.0.1:62898 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.349-0400 m30999| 2015-07-09T13:56:22.347-0400 I NETWORK [conn82] end connection 127.0.0.1:62886 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.352-0400 m30998| 2015-07-09T13:56:22.351-0400 I NETWORK [conn85] end connection 127.0.0.1:62890 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.361-0400 m30999| 2015-07-09T13:56:22.361-0400 I NETWORK [conn84] end connection 127.0.0.1:62889 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.380-0400 m30998| 2015-07-09T13:56:22.380-0400 I NETWORK [conn88] end connection 127.0.0.1:62896 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.408-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.408-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.408-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.408-0400 jstests/concurrency/fsm_workloads/update_simple_noindex.js: Workload completed in 708 ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.408-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.408-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.408-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.408-0400 m30999| 2015-07-09T13:56:22.408-0400 I COMMAND [conn1] DROP: db13.coll13 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.409-0400 m30999| 2015-07-09T13:56:22.408-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:22.408-0400-559eb5c6ca4787b9985d1c13", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464582408), what: "dropCollection.start", ns: "db13.coll13", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.465-0400 m30999| 2015-07-09T13:56:22.465-0400 I SHARDING [conn1] distributed lock 'db13.coll13/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5c6ca4787b9985d1c14 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.467-0400 m31100| 2015-07-09T13:56:22.466-0400 I COMMAND [conn15] CMD: drop db13.coll13 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.469-0400 m31200| 2015-07-09T13:56:22.469-0400 I COMMAND [conn63] CMD: drop db13.coll13 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.470-0400 m31102| 2015-07-09T13:56:22.470-0400 I COMMAND [repl writer worker 13] CMD: drop db13.coll13 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.471-0400 m31101| 2015-07-09T13:56:22.470-0400 I COMMAND [repl writer worker 15] CMD: drop db13.coll13 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.473-0400 m31202| 2015-07-09T13:56:22.473-0400 I COMMAND [repl writer worker 1] CMD: drop db13.coll13 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.473-0400 m31201| 2015-07-09T13:56:22.473-0400 I COMMAND [repl writer worker 3] CMD: drop db13.coll13 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.526-0400 m31100| 2015-07-09T13:56:22.525-0400 I SHARDING [conn15] remotely refreshing metadata for db13.coll13 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559eb5c5ca4787b9985d1c11, current metadata version is 2|3||559eb5c5ca4787b9985d1c11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.527-0400 m31100| 2015-07-09T13:56:22.527-0400 W SHARDING [conn15] no chunks found when reloading db13.coll13, previous version was 0|0||559eb5c5ca4787b9985d1c11, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.528-0400 m31100| 2015-07-09T13:56:22.527-0400 I SHARDING [conn15] dropping metadata for db13.coll13 at shard version 2|3||559eb5c5ca4787b9985d1c11, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.529-0400 m31200| 2015-07-09T13:56:22.528-0400 I SHARDING [conn63] remotely refreshing metadata for db13.coll13 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559eb5c5ca4787b9985d1c11, current metadata version is 2|5||559eb5c5ca4787b9985d1c11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.530-0400 m31200| 2015-07-09T13:56:22.530-0400 W SHARDING [conn63] no chunks found when reloading db13.coll13, previous version was 0|0||559eb5c5ca4787b9985d1c11, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.530-0400 m31200| 2015-07-09T13:56:22.530-0400 I SHARDING [conn63] dropping metadata for db13.coll13 at shard version 2|5||559eb5c5ca4787b9985d1c11, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.531-0400 m30999| 2015-07-09T13:56:22.531-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:22.531-0400-559eb5c6ca4787b9985d1c15", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464582531), what: "dropCollection", ns: "db13.coll13", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.584-0400 m30999| 2015-07-09T13:56:22.584-0400 I SHARDING [conn1] distributed lock 'db13.coll13/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.640-0400 m30999| 2015-07-09T13:56:22.640-0400 I COMMAND [conn1] DROP DATABASE: db13 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.640-0400 m30999| 2015-07-09T13:56:22.640-0400 I SHARDING [conn1] DBConfig::dropDatabase: db13 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.641-0400 m30999| 2015-07-09T13:56:22.640-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:22.640-0400-559eb5c6ca4787b9985d1c16", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464582640), what: "dropDatabase.start", ns: "db13", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.745-0400 m30999| 2015-07-09T13:56:22.745-0400 I SHARDING [conn1] DBConfig::dropDatabase: db13 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.746-0400 m31200| 2015-07-09T13:56:22.745-0400 I COMMAND [conn66] dropDatabase db13 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.746-0400 m31200| 2015-07-09T13:56:22.746-0400 I COMMAND [conn66] dropDatabase db13 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.746-0400 m30999| 2015-07-09T13:56:22.746-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:22.746-0400-559eb5c6ca4787b9985d1c17", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464582746), what: "dropDatabase", ns: "db13", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.747-0400 m31202| 2015-07-09T13:56:22.747-0400 I COMMAND [repl writer worker 11] dropDatabase db13 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.747-0400 m31201| 2015-07-09T13:56:22.747-0400 I COMMAND [repl writer worker 14] dropDatabase db13 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.747-0400 m31202| 2015-07-09T13:56:22.747-0400 I COMMAND [repl writer worker 11] dropDatabase db13 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.747-0400 m31201| 2015-07-09T13:56:22.747-0400 I COMMAND [repl writer worker 14] dropDatabase db13 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.844-0400 m31100| 2015-07-09T13:56:22.843-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.847-0400 m31101| 2015-07-09T13:56:22.847-0400 I COMMAND [repl writer worker 1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.848-0400 m31102| 2015-07-09T13:56:22.847-0400 I COMMAND [repl writer worker 9] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.883-0400 m31200| 2015-07-09T13:56:22.882-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.885-0400 m31201| 2015-07-09T13:56:22.885-0400 I COMMAND [repl writer worker 15] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.886-0400 m31202| 2015-07-09T13:56:22.885-0400 I COMMAND [repl writer worker 5] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.886-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.886-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.886-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.886-0400 jstests/concurrency/fsm_workloads/update_rename_noindex.js [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.886-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.886-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.886-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.894-0400 m30999| 2015-07-09T13:56:22.893-0400 I SHARDING [conn1] distributed lock 'db14/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5c6ca4787b9985d1c18 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.897-0400 m30999| 2015-07-09T13:56:22.897-0400 I SHARDING [conn1] Placing [db14] on: test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.897-0400 m30999| 2015-07-09T13:56:22.897-0400 I SHARDING [conn1] Enabling sharding for database [db14] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.952-0400 m30999| 2015-07-09T13:56:22.951-0400 I SHARDING [conn1] distributed lock 'db14/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.972-0400 m31200| 2015-07-09T13:56:22.972-0400 I INDEX [conn59] build index on: db14.coll14 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db14.coll14" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.973-0400 m31200| 2015-07-09T13:56:22.972-0400 I INDEX [conn59] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.984-0400 m31200| 2015-07-09T13:56:22.983-0400 I INDEX [conn59] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.985-0400 m30999| 2015-07-09T13:56:22.985-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db14.coll14", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.988-0400 m30999| 2015-07-09T13:56:22.988-0400 I SHARDING [conn1] distributed lock 'db14.coll14/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5c6ca4787b9985d1c19 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.989-0400 m30999| 2015-07-09T13:56:22.989-0400 I SHARDING [conn1] enable sharding on: db14.coll14 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.990-0400 m30999| 2015-07-09T13:56:22.989-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:22.989-0400-559eb5c6ca4787b9985d1c1a", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464582989), what: "shardCollection.start", ns: "db14.coll14", details: { shardKey: { _id: "hashed" }, collection: "db14.coll14", primary: "test-rs1:test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.995-0400 m31202| 2015-07-09T13:56:22.995-0400 I INDEX [repl writer worker 14] build index on: db14.coll14 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db14.coll14" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.995-0400 m31202| 2015-07-09T13:56:22.995-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.997-0400 m31201| 2015-07-09T13:56:22.996-0400 I INDEX [repl writer worker 0] build index on: db14.coll14 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db14.coll14" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:22.997-0400 m31201| 2015-07-09T13:56:22.997-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.000-0400 m31201| 2015-07-09T13:56:22.999-0400 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.001-0400 m31202| 2015-07-09T13:56:23.001-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.042-0400 m30999| 2015-07-09T13:56:23.042-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db14.coll14 using new epoch 559eb5c7ca4787b9985d1c1b [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.150-0400 m30999| 2015-07-09T13:56:23.149-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db14.coll14: 0ms sequenceNumber: 68 version: 1|1||559eb5c7ca4787b9985d1c1b based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.205-0400 m30999| 2015-07-09T13:56:23.204-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db14.coll14: 0ms sequenceNumber: 69 version: 1|1||559eb5c7ca4787b9985d1c1b based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.206-0400 m31200| 2015-07-09T13:56:23.206-0400 I SHARDING [conn61] remotely refreshing metadata for db14.coll14 with requested shard version 1|1||559eb5c7ca4787b9985d1c1b, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.208-0400 m31200| 2015-07-09T13:56:23.207-0400 I SHARDING [conn61] collection db14.coll14 was previously unsharded, new metadata loaded with shard version 1|1||559eb5c7ca4787b9985d1c1b [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.208-0400 m31200| 2015-07-09T13:56:23.208-0400 I SHARDING [conn61] collection version was loaded at version 1|1||559eb5c7ca4787b9985d1c1b, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.208-0400 m30999| 2015-07-09T13:56:23.208-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:23.208-0400-559eb5c7ca4787b9985d1c1c", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464583208), what: "shardCollection", ns: "db14.coll14", details: { version: "1|1||559eb5c7ca4787b9985d1c1b" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.263-0400 m30999| 2015-07-09T13:56:23.263-0400 I SHARDING [conn1] distributed lock 'db14.coll14/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.264-0400 m30999| 2015-07-09T13:56:23.264-0400 I SHARDING [conn1] moving chunk ns: db14.coll14 moving ( ns: db14.coll14, shard: test-rs1, lastmod: 1|0||000000000000000000000000, min: { _id: MinKey }, max: { _id: 0 }) test-rs1 -> test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.264-0400 m31200| 2015-07-09T13:56:23.264-0400 I SHARDING [conn63] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.266-0400 m31200| 2015-07-09T13:56:23.265-0400 I SHARDING [conn63] received moveChunk request: { moveChunk: "db14.coll14", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", to: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", fromShard: "test-rs1", toShard: "test-rs0", min: { _id: MinKey }, max: { _id: 0 }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb5c7ca4787b9985d1c1b') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.268-0400 m31200| 2015-07-09T13:56:23.268-0400 I SHARDING [conn63] distributed lock 'db14.coll14/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb5c7d5a107a5b9c0dabc [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.269-0400 m31200| 2015-07-09T13:56:23.268-0400 I SHARDING [conn63] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:23.268-0400-559eb5c7d5a107a5b9c0dabd", server: "bs-osx108-8", clientAddr: "127.0.0.1:62862", time: new Date(1436464583268), what: "moveChunk.start", ns: "db14.coll14", details: { min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs1", to: "test-rs0" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.322-0400 m31200| 2015-07-09T13:56:23.321-0400 I SHARDING [conn63] remotely refreshing metadata for db14.coll14 based on current shard version 1|1||559eb5c7ca4787b9985d1c1b, current metadata version is 1|1||559eb5c7ca4787b9985d1c1b [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.323-0400 m31200| 2015-07-09T13:56:23.323-0400 I SHARDING [conn63] metadata of collection db14.coll14 already up to date (shard version : 1|1||559eb5c7ca4787b9985d1c1b, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.323-0400 m31200| 2015-07-09T13:56:23.323-0400 I SHARDING [conn63] moveChunk request accepted at version 1|1||559eb5c7ca4787b9985d1c1b [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.324-0400 m31200| 2015-07-09T13:56:23.323-0400 I SHARDING [conn63] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.324-0400 m31100| 2015-07-09T13:56:23.324-0400 I SHARDING [conn19] remotely refreshing metadata for db14.coll14, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.326-0400 m31100| 2015-07-09T13:56:23.325-0400 I SHARDING [conn19] collection db14.coll14 was previously unsharded, new metadata loaded with shard version 0|0||559eb5c7ca4787b9985d1c1b [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.326-0400 m31100| 2015-07-09T13:56:23.325-0400 I SHARDING [conn19] collection version was loaded at version 1|1||559eb5c7ca4787b9985d1c1b, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.326-0400 m31100| 2015-07-09T13:56:23.326-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: MinKey } -> { _id: 0 } for collection db14.coll14 from test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202 at epoch 559eb5c7ca4787b9985d1c1b [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.328-0400 m31200| 2015-07-09T13:56:23.328-0400 I SHARDING [conn63] moveChunk data transfer progress: { active: true, ns: "db14.coll14", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.332-0400 m31200| 2015-07-09T13:56:23.331-0400 I SHARDING [conn63] moveChunk data transfer progress: { active: true, ns: "db14.coll14", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.337-0400 m31200| 2015-07-09T13:56:23.336-0400 I SHARDING [conn63] moveChunk data transfer progress: { active: true, ns: "db14.coll14", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.338-0400 m31100| 2015-07-09T13:56:23.337-0400 I INDEX [migrateThread] build index on: db14.coll14 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db14.coll14" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.338-0400 m31100| 2015-07-09T13:56:23.338-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.346-0400 m31200| 2015-07-09T13:56:23.345-0400 I SHARDING [conn63] moveChunk data transfer progress: { active: true, ns: "db14.coll14", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.348-0400 m31100| 2015-07-09T13:56:23.347-0400 I INDEX [migrateThread] build index on: db14.coll14 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db14.coll14" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.348-0400 m31100| 2015-07-09T13:56:23.347-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.364-0400 m31200| 2015-07-09T13:56:23.363-0400 I SHARDING [conn63] moveChunk data transfer progress: { active: true, ns: "db14.coll14", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.366-0400 m31100| 2015-07-09T13:56:23.365-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.366-0400 m31100| 2015-07-09T13:56:23.366-0400 I SHARDING [migrateThread] Deleter starting delete for: db14.coll14 from { _id: MinKey } -> { _id: 0 }, with opId: 22974 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.367-0400 m31100| 2015-07-09T13:56:23.366-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db14.coll14 from { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.375-0400 m31102| 2015-07-09T13:56:23.374-0400 I INDEX [repl writer worker 5] build index on: db14.coll14 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db14.coll14" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.375-0400 m31102| 2015-07-09T13:56:23.375-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.375-0400 m31101| 2015-07-09T13:56:23.375-0400 I INDEX [repl writer worker 2] build index on: db14.coll14 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db14.coll14" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.375-0400 m31101| 2015-07-09T13:56:23.375-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.378-0400 m31102| 2015-07-09T13:56:23.378-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.381-0400 m31100| 2015-07-09T13:56:23.380-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.381-0400 m31100| 2015-07-09T13:56:23.380-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db14.coll14' { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.383-0400 m31101| 2015-07-09T13:56:23.383-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.396-0400 m31200| 2015-07-09T13:56:23.396-0400 I SHARDING [conn63] moveChunk data transfer progress: { active: true, ns: "db14.coll14", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.397-0400 m31200| 2015-07-09T13:56:23.396-0400 I SHARDING [conn63] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.397-0400 m31200| 2015-07-09T13:56:23.397-0400 I SHARDING [conn63] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.397-0400 m31200| 2015-07-09T13:56:23.397-0400 I SHARDING [conn63] moveChunk setting version to: 2|0||559eb5c7ca4787b9985d1c1b [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.404-0400 m31100| 2015-07-09T13:56:23.404-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db14.coll14' { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.404-0400 m31100| 2015-07-09T13:56:23.404-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:23.404-0400-559eb5c7792e00bb67274924", server: "bs-osx108-8", clientAddr: "", time: new Date(1436464583404), what: "moveChunk.to", ns: "db14.coll14", details: { min: { _id: MinKey }, max: { _id: 0 }, step 1 of 5: 40, step 2 of 5: 12, step 3 of 5: 1, step 4 of 5: 0, step 5 of 5: 23, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.458-0400 m31200| 2015-07-09T13:56:23.458-0400 I SHARDING [conn63] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db14.coll14", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.459-0400 m31200| 2015-07-09T13:56:23.458-0400 I SHARDING [conn63] moveChunk updating self version to: 2|1||559eb5c7ca4787b9985d1c1b through { _id: 0 } -> { _id: MaxKey } for collection 'db14.coll14' [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.459-0400 m31200| 2015-07-09T13:56:23.459-0400 I SHARDING [conn63] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:23.459-0400-559eb5c7d5a107a5b9c0dabe", server: "bs-osx108-8", clientAddr: "127.0.0.1:62862", time: new Date(1436464583459), what: "moveChunk.commit", ns: "db14.coll14", details: { min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs1", to: "test-rs0", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.512-0400 m31200| 2015-07-09T13:56:23.512-0400 I SHARDING [conn63] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.513-0400 m31200| 2015-07-09T13:56:23.512-0400 I SHARDING [conn63] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.513-0400 m31200| 2015-07-09T13:56:23.512-0400 I SHARDING [conn63] Deleter starting delete for: db14.coll14 from { _id: MinKey } -> { _id: 0 }, with opId: 22033 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.513-0400 m31200| 2015-07-09T13:56:23.512-0400 I SHARDING [conn63] rangeDeleter deleted 0 documents for db14.coll14 from { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.513-0400 m31200| 2015-07-09T13:56:23.512-0400 I SHARDING [conn63] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.514-0400 m31200| 2015-07-09T13:56:23.513-0400 I SHARDING [conn63] distributed lock 'db14.coll14/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.514-0400 m31200| 2015-07-09T13:56:23.514-0400 I SHARDING [conn63] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:23.513-0400-559eb5c7d5a107a5b9c0dabf", server: "bs-osx108-8", clientAddr: "127.0.0.1:62862", time: new Date(1436464583513), what: "moveChunk.from", ns: "db14.coll14", details: { min: { _id: MinKey }, max: { _id: 0 }, step 1 of 6: 0, step 2 of 6: 57, step 3 of 6: 3, step 4 of 6: 70, step 5 of 6: 116, step 6 of 6: 0, to: "test-rs0", from: "test-rs1", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.567-0400 m31200| 2015-07-09T13:56:23.567-0400 I COMMAND [conn63] command db14.coll14 command: moveChunk { moveChunk: "db14.coll14", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", to: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", fromShard: "test-rs1", toShard: "test-rs0", min: { _id: MinKey }, max: { _id: 0 }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb5c7ca4787b9985d1c1b') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 302ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.568-0400 m30999| 2015-07-09T13:56:23.568-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db14.coll14: 0ms sequenceNumber: 70 version: 2|1||559eb5c7ca4787b9985d1c1b based on: 1|1||559eb5c7ca4787b9985d1c1b [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.570-0400 m31100| 2015-07-09T13:56:23.569-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db14.coll14", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c7ca4787b9985d1c1b') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.573-0400 m31100| 2015-07-09T13:56:23.572-0400 I SHARDING [conn15] distributed lock 'db14.coll14/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb5c7792e00bb67274925 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.573-0400 m31100| 2015-07-09T13:56:23.572-0400 I SHARDING [conn15] remotely refreshing metadata for db14.coll14 based on current shard version 0|0||559eb5c7ca4787b9985d1c1b, current metadata version is 1|1||559eb5c7ca4787b9985d1c1b [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.574-0400 m31100| 2015-07-09T13:56:23.574-0400 I SHARDING [conn15] updating metadata for db14.coll14 from shard version 0|0||559eb5c7ca4787b9985d1c1b to shard version 2|0||559eb5c7ca4787b9985d1c1b [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.574-0400 m31100| 2015-07-09T13:56:23.574-0400 I SHARDING [conn15] collection version was loaded at version 2|1||559eb5c7ca4787b9985d1c1b, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.575-0400 m31100| 2015-07-09T13:56:23.574-0400 I SHARDING [conn15] splitChunk accepted at version 2|0||559eb5c7ca4787b9985d1c1b [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.576-0400 m31100| 2015-07-09T13:56:23.576-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:23.576-0400-559eb5c7792e00bb67274926", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464583576), what: "split", ns: "db14.coll14", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559eb5c7ca4787b9985d1c1b') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559eb5c7ca4787b9985d1c1b') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.630-0400 m31100| 2015-07-09T13:56:23.629-0400 I SHARDING [conn15] distributed lock 'db14.coll14/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.632-0400 m30999| 2015-07-09T13:56:23.631-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db14.coll14: 0ms sequenceNumber: 71 version: 2|3||559eb5c7ca4787b9985d1c1b based on: 2|1||559eb5c7ca4787b9985d1c1b [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.632-0400 m31200| 2015-07-09T13:56:23.632-0400 I SHARDING [conn63] received splitChunk request: { splitChunk: "db14.coll14", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5c7ca4787b9985d1c1b') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.636-0400 m31200| 2015-07-09T13:56:23.635-0400 I SHARDING [conn63] distributed lock 'db14.coll14/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb5c7d5a107a5b9c0dac0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.636-0400 m31200| 2015-07-09T13:56:23.635-0400 I SHARDING [conn63] remotely refreshing metadata for db14.coll14 based on current shard version 2|0||559eb5c7ca4787b9985d1c1b, current metadata version is 2|0||559eb5c7ca4787b9985d1c1b [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.637-0400 m31200| 2015-07-09T13:56:23.637-0400 I SHARDING [conn63] updating metadata for db14.coll14 from shard version 2|0||559eb5c7ca4787b9985d1c1b to shard version 2|1||559eb5c7ca4787b9985d1c1b [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.637-0400 m31200| 2015-07-09T13:56:23.637-0400 I SHARDING [conn63] collection version was loaded at version 2|3||559eb5c7ca4787b9985d1c1b, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.638-0400 m31200| 2015-07-09T13:56:23.637-0400 I SHARDING [conn63] splitChunk accepted at version 2|1||559eb5c7ca4787b9985d1c1b [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.639-0400 m31200| 2015-07-09T13:56:23.638-0400 I SHARDING [conn63] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:23.638-0400-559eb5c7d5a107a5b9c0dac1", server: "bs-osx108-8", clientAddr: "127.0.0.1:62862", time: new Date(1436464583638), what: "split", ns: "db14.coll14", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559eb5c7ca4787b9985d1c1b') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559eb5c7ca4787b9985d1c1b') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.693-0400 m31200| 2015-07-09T13:56:23.693-0400 I SHARDING [conn63] distributed lock 'db14.coll14/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.695-0400 m30999| 2015-07-09T13:56:23.695-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db14.coll14: 0ms sequenceNumber: 72 version: 2|5||559eb5c7ca4787b9985d1c1b based on: 2|3||559eb5c7ca4787b9985d1c1b [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.698-0400 m30999| 2015-07-09T13:56:23.697-0400 I SHARDING [conn1] sharded connection to test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.698-0400 m30999| 2015-07-09T13:56:23.697-0400 I SHARDING [conn1] retrying command: { listIndexes: "coll14" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.698-0400 m31200| 2015-07-09T13:56:23.697-0400 I NETWORK [conn61] end connection 127.0.0.1:62850 (66 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.710-0400 m31200| 2015-07-09T13:56:23.708-0400 I INDEX [conn39] build index on: db14.coll14 properties: { v: 1, key: { update_rename_y: 1.0 }, name: "update_rename_y_1", ns: "db14.coll14" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.710-0400 m31200| 2015-07-09T13:56:23.709-0400 I INDEX [conn39] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.710-0400 m31100| 2015-07-09T13:56:23.710-0400 I INDEX [conn51] build index on: db14.coll14 properties: { v: 1, key: { update_rename_y: 1.0 }, name: "update_rename_y_1", ns: "db14.coll14" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.711-0400 m31100| 2015-07-09T13:56:23.710-0400 I INDEX [conn51] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.715-0400 m31200| 2015-07-09T13:56:23.714-0400 I INDEX [conn39] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.717-0400 m31100| 2015-07-09T13:56:23.717-0400 I INDEX [conn51] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.724-0400 m31201| 2015-07-09T13:56:23.724-0400 I INDEX [repl writer worker 1] build index on: db14.coll14 properties: { v: 1, key: { update_rename_y: 1.0 }, name: "update_rename_y_1", ns: "db14.coll14" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.724-0400 m31201| 2015-07-09T13:56:23.724-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.725-0400 m31202| 2015-07-09T13:56:23.724-0400 I INDEX [repl writer worker 13] build index on: db14.coll14 properties: { v: 1, key: { update_rename_y: 1.0 }, name: "update_rename_y_1", ns: "db14.coll14" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.725-0400 m31202| 2015-07-09T13:56:23.724-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.730-0400 m31200| 2015-07-09T13:56:23.730-0400 I INDEX [conn39] build index on: db14.coll14 properties: { v: 1, key: { update_rename_z: 1.0 }, name: "update_rename_z_1", ns: "db14.coll14" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.730-0400 m31100| 2015-07-09T13:56:23.730-0400 I INDEX [conn51] build index on: db14.coll14 properties: { v: 1, key: { update_rename_z: 1.0 }, name: "update_rename_z_1", ns: "db14.coll14" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.731-0400 m31100| 2015-07-09T13:56:23.730-0400 I INDEX [conn51] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.731-0400 m31200| 2015-07-09T13:56:23.730-0400 I INDEX [conn39] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.734-0400 m31101| 2015-07-09T13:56:23.733-0400 I INDEX [repl writer worker 0] build index on: db14.coll14 properties: { v: 1, key: { update_rename_y: 1.0 }, name: "update_rename_y_1", ns: "db14.coll14" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.734-0400 m31101| 2015-07-09T13:56:23.733-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.738-0400 m31102| 2015-07-09T13:56:23.736-0400 I INDEX [repl writer worker 7] build index on: db14.coll14 properties: { v: 1, key: { update_rename_y: 1.0 }, name: "update_rename_y_1", ns: "db14.coll14" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.738-0400 m31102| 2015-07-09T13:56:23.736-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.741-0400 m31202| 2015-07-09T13:56:23.740-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.744-0400 m31201| 2015-07-09T13:56:23.742-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.746-0400 m31100| 2015-07-09T13:56:23.745-0400 I INDEX [conn51] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.747-0400 m31200| 2015-07-09T13:56:23.746-0400 I INDEX [conn39] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.748-0400 m31101| 2015-07-09T13:56:23.747-0400 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.750-0400 m31102| 2015-07-09T13:56:23.749-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.755-0400 m31202| 2015-07-09T13:56:23.755-0400 I INDEX [repl writer worker 10] build index on: db14.coll14 properties: { v: 1, key: { update_rename_z: 1.0 }, name: "update_rename_z_1", ns: "db14.coll14" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.756-0400 m31202| 2015-07-09T13:56:23.755-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.760-0400 m31102| 2015-07-09T13:56:23.760-0400 I INDEX [repl writer worker 3] build index on: db14.coll14 properties: { v: 1, key: { update_rename_z: 1.0 }, name: "update_rename_z_1", ns: "db14.coll14" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.761-0400 m31102| 2015-07-09T13:56:23.760-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.762-0400 m31201| 2015-07-09T13:56:23.760-0400 I INDEX [repl writer worker 2] build index on: db14.coll14 properties: { v: 1, key: { update_rename_z: 1.0 }, name: "update_rename_z_1", ns: "db14.coll14" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.762-0400 m31201| 2015-07-09T13:56:23.760-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.763-0400 m31202| 2015-07-09T13:56:23.763-0400 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.764-0400 m31101| 2015-07-09T13:56:23.763-0400 I INDEX [repl writer worker 7] build index on: db14.coll14 properties: { v: 1, key: { update_rename_z: 1.0 }, name: "update_rename_z_1", ns: "db14.coll14" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.764-0400 m31101| 2015-07-09T13:56:23.763-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.766-0400 m31102| 2015-07-09T13:56:23.766-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.768-0400 m31100| 2015-07-09T13:56:23.767-0400 I COMMAND [conn15] CMD: dropIndexes db14.coll14 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.768-0400 m31200| 2015-07-09T13:56:23.767-0400 I COMMAND [conn63] CMD: dropIndexes db14.coll14 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.768-0400 m31201| 2015-07-09T13:56:23.767-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.769-0400 m31101| 2015-07-09T13:56:23.769-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.771-0400 m31100| 2015-07-09T13:56:23.770-0400 I COMMAND [conn15] CMD: dropIndexes db14.coll14 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.771-0400 m31200| 2015-07-09T13:56:23.771-0400 I COMMAND [conn63] CMD: dropIndexes db14.coll14 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.771-0400 m31102| 2015-07-09T13:56:23.771-0400 I COMMAND [repl writer worker 2] CMD: dropIndexes db14.coll14 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.773-0400 m31201| 2015-07-09T13:56:23.773-0400 I COMMAND [repl writer worker 6] CMD: dropIndexes db14.coll14 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.774-0400 m31202| 2015-07-09T13:56:23.773-0400 I COMMAND [repl writer worker 4] CMD: dropIndexes db14.coll14 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.807-0400 Using 20 threads (requested 20) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.828-0400 m31101| 2015-07-09T13:56:23.808-0400 I COMMAND [repl writer worker 3] CMD: dropIndexes db14.coll14 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.828-0400 m31102| 2015-07-09T13:56:23.821-0400 I COMMAND [repl writer worker 10] CMD: dropIndexes db14.coll14 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.840-0400 m31202| 2015-07-09T13:56:23.839-0400 I COMMAND [repl writer worker 9] CMD: dropIndexes db14.coll14 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.855-0400 m31101| 2015-07-09T13:56:23.854-0400 I COMMAND [repl writer worker 8] CMD: dropIndexes db14.coll14 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.908-0400 m31201| 2015-07-09T13:56:23.902-0400 I COMMAND [repl writer worker 4] CMD: dropIndexes db14.coll14 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.946-0400 m30999| 2015-07-09T13:56:23.946-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62905 #90 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.957-0400 m30999| 2015-07-09T13:56:23.956-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62906 #91 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.957-0400 m30998| 2015-07-09T13:56:23.957-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62908 #89 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.966-0400 m30999| 2015-07-09T13:56:23.966-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62907 #92 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.967-0400 m30998| 2015-07-09T13:56:23.967-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62909 #90 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.977-0400 m30998| 2015-07-09T13:56:23.977-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62910 #91 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:23.990-0400 m30998| 2015-07-09T13:56:23.987-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62911 #92 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:24.000-0400 m30999| 2015-07-09T13:56:24.000-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62912 #93 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:24.004-0400 m30998| 2015-07-09T13:56:24.003-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62913 #93 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:24.005-0400 m30998| 2015-07-09T13:56:24.005-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62914 #94 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:24.006-0400 m30999| 2015-07-09T13:56:24.005-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62915 #94 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:24.011-0400 m30999| 2015-07-09T13:56:24.011-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62916 #95 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:24.012-0400 m30999| 2015-07-09T13:56:24.012-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62917 #96 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:24.015-0400 m30998| 2015-07-09T13:56:24.015-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62918 #95 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:24.017-0400 m30998| 2015-07-09T13:56:24.017-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62919 #96 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:24.017-0400 m30998| 2015-07-09T13:56:24.017-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62920 #97 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:24.018-0400 m30998| 2015-07-09T13:56:24.018-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62923 #98 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:24.022-0400 m30999| 2015-07-09T13:56:24.022-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62921 #97 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:24.023-0400 m30999| 2015-07-09T13:56:24.022-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62922 #98 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:24.025-0400 m30999| 2015-07-09T13:56:24.025-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62924 #99 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:24.035-0400 setting random seed: 3600889160297 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:24.036-0400 setting random seed: 6464174594730 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:24.037-0400 setting random seed: 5223208433017 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:24.037-0400 setting random seed: 7673350633122 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:24.039-0400 setting random seed: 33533461391 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:24.042-0400 setting random seed: 9076153263449 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:24.042-0400 setting random seed: 4248964940197 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:24.042-0400 m30998| 2015-07-09T13:56:24.042-0400 I SHARDING [conn96] ChunkManager: time to load chunks for db14.coll14: 0ms sequenceNumber: 16 version: 2|5||559eb5c7ca4787b9985d1c1b based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:24.043-0400 setting random seed: 2650178079493 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:24.043-0400 setting random seed: 3382464218884 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:24.045-0400 setting random seed: 3596885874867 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:24.068-0400 setting random seed: 6004150933586 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:24.068-0400 setting random seed: 2907969830557 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:24.070-0400 setting random seed: 2860927046276 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:24.076-0400 setting random seed: 1543002785183 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:24.078-0400 setting random seed: 1764787770807 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:24.083-0400 setting random seed: 144145307131 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:24.087-0400 setting random seed: 3247822653502 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:24.089-0400 setting random seed: 6155362827703 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:24.089-0400 setting random seed: 788077777251 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:24.090-0400 setting random seed: 2353783156722 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:26.874-0400 m31202| 2015-07-09T13:56:26.874-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62926 #10 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:26.877-0400 m31200| 2015-07-09T13:56:26.876-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62927 #73 (67 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:26.879-0400 m31201| 2015-07-09T13:56:26.879-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62928 #10 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.138-0400 m30998| 2015-07-09T13:56:30.136-0400 I NETWORK [conn96] end connection 127.0.0.1:62919 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.138-0400 m30999| 2015-07-09T13:56:30.136-0400 I NETWORK [conn99] end connection 127.0.0.1:62924 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.143-0400 m30998| 2015-07-09T13:56:30.137-0400 I NETWORK [conn92] end connection 127.0.0.1:62911 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.143-0400 m30999| 2015-07-09T13:56:30.138-0400 I NETWORK [conn93] end connection 127.0.0.1:62912 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.144-0400 m30999| 2015-07-09T13:56:30.144-0400 I NETWORK [conn94] end connection 127.0.0.1:62915 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.176-0400 m30999| 2015-07-09T13:56:30.161-0400 I NETWORK [conn96] end connection 127.0.0.1:62917 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.181-0400 m30998| 2015-07-09T13:56:30.181-0400 I NETWORK [conn91] end connection 127.0.0.1:62910 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.182-0400 m30998| 2015-07-09T13:56:30.181-0400 I NETWORK [conn95] end connection 127.0.0.1:62918 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.182-0400 m30998| 2015-07-09T13:56:30.181-0400 I NETWORK [conn93] end connection 127.0.0.1:62913 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.192-0400 m29000| 2015-07-09T13:56:30.191-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62929 #43 (43 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.196-0400 m30998| 2015-07-09T13:56:30.195-0400 I NETWORK [conn89] end connection 127.0.0.1:62908 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.213-0400 m30999| 2015-07-09T13:56:30.194-0400 I NETWORK [conn92] end connection 127.0.0.1:62907 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.216-0400 m30998| 2015-07-09T13:56:30.208-0400 I NETWORK [conn98] end connection 127.0.0.1:62923 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.216-0400 m30998| 2015-07-09T13:56:30.208-0400 I NETWORK [conn97] end connection 127.0.0.1:62920 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.217-0400 m30998| 2015-07-09T13:56:30.213-0400 I NETWORK [conn90] end connection 127.0.0.1:62909 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.229-0400 m30999| 2015-07-09T13:56:30.214-0400 I NETWORK [conn90] end connection 127.0.0.1:62905 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.245-0400 m30998| 2015-07-09T13:56:30.235-0400 I NETWORK [conn94] end connection 127.0.0.1:62914 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.246-0400 m30999| 2015-07-09T13:56:30.235-0400 I NETWORK [conn95] end connection 127.0.0.1:62916 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.247-0400 m30999| 2015-07-09T13:56:30.246-0400 I NETWORK [conn97] end connection 127.0.0.1:62921 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.252-0400 m30999| 2015-07-09T13:56:30.251-0400 I NETWORK [conn91] end connection 127.0.0.1:62906 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.252-0400 m30999| 2015-07-09T13:56:30.251-0400 I NETWORK [conn98] end connection 127.0.0.1:62922 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.273-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.273-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.273-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.274-0400 jstests/concurrency/fsm_workloads/update_rename_noindex.js: Workload completed in 6499 ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.274-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.274-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.274-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.274-0400 m30999| 2015-07-09T13:56:30.274-0400 I COMMAND [conn1] DROP: db14.coll14 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.275-0400 m30999| 2015-07-09T13:56:30.274-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:30.274-0400-559eb5ceca4787b9985d1c1d", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464590274), what: "dropCollection.start", ns: "db14.coll14", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.330-0400 m30999| 2015-07-09T13:56:30.329-0400 I SHARDING [conn1] distributed lock 'db14.coll14/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5ceca4787b9985d1c1e [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.331-0400 m31100| 2015-07-09T13:56:30.331-0400 I COMMAND [conn15] CMD: drop db14.coll14 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.334-0400 m31200| 2015-07-09T13:56:30.334-0400 I COMMAND [conn63] CMD: drop db14.coll14 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.334-0400 m31102| 2015-07-09T13:56:30.334-0400 I COMMAND [repl writer worker 4] CMD: drop db14.coll14 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.335-0400 m31101| 2015-07-09T13:56:30.335-0400 I COMMAND [repl writer worker 13] CMD: drop db14.coll14 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.337-0400 m31202| 2015-07-09T13:56:30.337-0400 I COMMAND [repl writer worker 7] CMD: drop db14.coll14 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.337-0400 m31201| 2015-07-09T13:56:30.337-0400 I COMMAND [repl writer worker 9] CMD: drop db14.coll14 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.391-0400 m31100| 2015-07-09T13:56:30.390-0400 I SHARDING [conn15] remotely refreshing metadata for db14.coll14 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559eb5c7ca4787b9985d1c1b, current metadata version is 2|3||559eb5c7ca4787b9985d1c1b [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.392-0400 m31100| 2015-07-09T13:56:30.392-0400 W SHARDING [conn15] no chunks found when reloading db14.coll14, previous version was 0|0||559eb5c7ca4787b9985d1c1b, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.392-0400 m31100| 2015-07-09T13:56:30.392-0400 I SHARDING [conn15] dropping metadata for db14.coll14 at shard version 2|3||559eb5c7ca4787b9985d1c1b, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.394-0400 m31200| 2015-07-09T13:56:30.393-0400 I SHARDING [conn63] remotely refreshing metadata for db14.coll14 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559eb5c7ca4787b9985d1c1b, current metadata version is 2|5||559eb5c7ca4787b9985d1c1b [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.395-0400 m31200| 2015-07-09T13:56:30.395-0400 W SHARDING [conn63] no chunks found when reloading db14.coll14, previous version was 0|0||559eb5c7ca4787b9985d1c1b, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.395-0400 m31200| 2015-07-09T13:56:30.395-0400 I SHARDING [conn63] dropping metadata for db14.coll14 at shard version 2|5||559eb5c7ca4787b9985d1c1b, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.396-0400 m30999| 2015-07-09T13:56:30.396-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:30.396-0400-559eb5ceca4787b9985d1c1f", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464590396), what: "dropCollection", ns: "db14.coll14", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.451-0400 m30999| 2015-07-09T13:56:30.450-0400 I SHARDING [conn1] distributed lock 'db14.coll14/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.506-0400 m30999| 2015-07-09T13:56:30.506-0400 I COMMAND [conn1] DROP DATABASE: db14 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.506-0400 m30999| 2015-07-09T13:56:30.506-0400 I SHARDING [conn1] DBConfig::dropDatabase: db14 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.506-0400 m30999| 2015-07-09T13:56:30.506-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:30.506-0400-559eb5ceca4787b9985d1c20", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464590506), what: "dropDatabase.start", ns: "db14", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.612-0400 m30999| 2015-07-09T13:56:30.611-0400 I SHARDING [conn1] DBConfig::dropDatabase: db14 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.612-0400 m31200| 2015-07-09T13:56:30.612-0400 I COMMAND [conn66] dropDatabase db14 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.612-0400 m31200| 2015-07-09T13:56:30.612-0400 I COMMAND [conn66] dropDatabase db14 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.613-0400 m30999| 2015-07-09T13:56:30.612-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:30.612-0400-559eb5ceca4787b9985d1c21", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464590612), what: "dropDatabase", ns: "db14", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.613-0400 m31201| 2015-07-09T13:56:30.612-0400 I COMMAND [repl writer worker 8] dropDatabase db14 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.613-0400 m31201| 2015-07-09T13:56:30.613-0400 I COMMAND [repl writer worker 8] dropDatabase db14 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.613-0400 m31202| 2015-07-09T13:56:30.613-0400 I COMMAND [repl writer worker 6] dropDatabase db14 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.613-0400 m31202| 2015-07-09T13:56:30.613-0400 I COMMAND [repl writer worker 6] dropDatabase db14 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.695-0400 m31100| 2015-07-09T13:56:30.695-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.699-0400 m31102| 2015-07-09T13:56:30.699-0400 I COMMAND [repl writer worker 15] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.699-0400 m31101| 2015-07-09T13:56:30.699-0400 I COMMAND [repl writer worker 10] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.728-0400 m31200| 2015-07-09T13:56:30.727-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.730-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.731-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.731-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.731-0400 jstests/concurrency/fsm_workloads/update_ordered_bulk_inc.js [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.731-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.731-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.731-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.731-0400 m31202| 2015-07-09T13:56:30.731-0400 I COMMAND [repl writer worker 3] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.731-0400 m31201| 2015-07-09T13:56:30.731-0400 I COMMAND [repl writer worker 7] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.738-0400 m30999| 2015-07-09T13:56:30.738-0400 I SHARDING [conn1] distributed lock 'db15/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5ceca4787b9985d1c22 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.742-0400 m30999| 2015-07-09T13:56:30.741-0400 I SHARDING [conn1] Placing [db15] on: test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.742-0400 m30999| 2015-07-09T13:56:30.741-0400 I SHARDING [conn1] Enabling sharding for database [db15] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.796-0400 m30999| 2015-07-09T13:56:30.795-0400 I SHARDING [conn1] distributed lock 'db15/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.819-0400 m31200| 2015-07-09T13:56:30.819-0400 I INDEX [conn59] build index on: db15.coll15 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db15.coll15" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.820-0400 m31200| 2015-07-09T13:56:30.819-0400 I INDEX [conn59] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.828-0400 m31200| 2015-07-09T13:56:30.827-0400 I INDEX [conn59] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.829-0400 m30999| 2015-07-09T13:56:30.829-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db15.coll15", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.833-0400 m30999| 2015-07-09T13:56:30.833-0400 I SHARDING [conn1] distributed lock 'db15.coll15/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5ceca4787b9985d1c23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.834-0400 m30999| 2015-07-09T13:56:30.834-0400 I SHARDING [conn1] enable sharding on: db15.coll15 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.834-0400 m30999| 2015-07-09T13:56:30.834-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:30.834-0400-559eb5ceca4787b9985d1c24", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464590834), what: "shardCollection.start", ns: "db15.coll15", details: { shardKey: { _id: "hashed" }, collection: "db15.coll15", primary: "test-rs1:test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.838-0400 m31201| 2015-07-09T13:56:30.837-0400 I INDEX [repl writer worker 15] build index on: db15.coll15 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db15.coll15" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.838-0400 m31201| 2015-07-09T13:56:30.837-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.839-0400 m31202| 2015-07-09T13:56:30.838-0400 I INDEX [repl writer worker 5] build index on: db15.coll15 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db15.coll15" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.839-0400 m31202| 2015-07-09T13:56:30.838-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.844-0400 m31202| 2015-07-09T13:56:30.844-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.847-0400 m31201| 2015-07-09T13:56:30.846-0400 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.886-0400 m30999| 2015-07-09T13:56:30.886-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db15.coll15 using new epoch 559eb5ceca4787b9985d1c25 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:30.992-0400 m30999| 2015-07-09T13:56:30.992-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db15.coll15: 0ms sequenceNumber: 73 version: 1|1||559eb5ceca4787b9985d1c25 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.047-0400 m30999| 2015-07-09T13:56:31.046-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db15.coll15: 0ms sequenceNumber: 74 version: 1|1||559eb5ceca4787b9985d1c25 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.049-0400 m31200| 2015-07-09T13:56:31.048-0400 I SHARDING [conn39] remotely refreshing metadata for db15.coll15 with requested shard version 1|1||559eb5ceca4787b9985d1c25, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.050-0400 m31200| 2015-07-09T13:56:31.049-0400 I SHARDING [conn39] collection db15.coll15 was previously unsharded, new metadata loaded with shard version 1|1||559eb5ceca4787b9985d1c25 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.050-0400 m31200| 2015-07-09T13:56:31.049-0400 I SHARDING [conn39] collection version was loaded at version 1|1||559eb5ceca4787b9985d1c25, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.050-0400 m30999| 2015-07-09T13:56:31.050-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:31.050-0400-559eb5cfca4787b9985d1c26", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464591050), what: "shardCollection", ns: "db15.coll15", details: { version: "1|1||559eb5ceca4787b9985d1c25" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.104-0400 m30999| 2015-07-09T13:56:31.104-0400 I SHARDING [conn1] distributed lock 'db15.coll15/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.105-0400 m30999| 2015-07-09T13:56:31.105-0400 I SHARDING [conn1] moving chunk ns: db15.coll15 moving ( ns: db15.coll15, shard: test-rs1, lastmod: 1|0||000000000000000000000000, min: { _id: MinKey }, max: { _id: 0 }) test-rs1 -> test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.106-0400 m31200| 2015-07-09T13:56:31.105-0400 I SHARDING [conn63] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.107-0400 m31200| 2015-07-09T13:56:31.106-0400 I SHARDING [conn63] received moveChunk request: { moveChunk: "db15.coll15", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", to: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", fromShard: "test-rs1", toShard: "test-rs0", min: { _id: MinKey }, max: { _id: 0 }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb5ceca4787b9985d1c25') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.109-0400 m31200| 2015-07-09T13:56:31.109-0400 I SHARDING [conn63] distributed lock 'db15.coll15/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb5cfd5a107a5b9c0dac3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.110-0400 m31200| 2015-07-09T13:56:31.109-0400 I SHARDING [conn63] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:31.109-0400-559eb5cfd5a107a5b9c0dac4", server: "bs-osx108-8", clientAddr: "127.0.0.1:62862", time: new Date(1436464591109), what: "moveChunk.start", ns: "db15.coll15", details: { min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs1", to: "test-rs0" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.163-0400 m31200| 2015-07-09T13:56:31.162-0400 I SHARDING [conn63] remotely refreshing metadata for db15.coll15 based on current shard version 1|1||559eb5ceca4787b9985d1c25, current metadata version is 1|1||559eb5ceca4787b9985d1c25 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.165-0400 m31200| 2015-07-09T13:56:31.164-0400 I SHARDING [conn63] metadata of collection db15.coll15 already up to date (shard version : 1|1||559eb5ceca4787b9985d1c25, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.165-0400 m31200| 2015-07-09T13:56:31.164-0400 I SHARDING [conn63] moveChunk request accepted at version 1|1||559eb5ceca4787b9985d1c25 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.165-0400 m31200| 2015-07-09T13:56:31.165-0400 I SHARDING [conn63] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.166-0400 m31100| 2015-07-09T13:56:31.165-0400 I SHARDING [conn19] remotely refreshing metadata for db15.coll15, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.167-0400 m31100| 2015-07-09T13:56:31.167-0400 I SHARDING [conn19] collection db15.coll15 was previously unsharded, new metadata loaded with shard version 0|0||559eb5ceca4787b9985d1c25 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.167-0400 m31100| 2015-07-09T13:56:31.167-0400 I SHARDING [conn19] collection version was loaded at version 1|1||559eb5ceca4787b9985d1c25, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.168-0400 m31100| 2015-07-09T13:56:31.167-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: MinKey } -> { _id: 0 } for collection db15.coll15 from test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202 at epoch 559eb5ceca4787b9985d1c25 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.170-0400 m31200| 2015-07-09T13:56:31.169-0400 I SHARDING [conn63] moveChunk data transfer progress: { active: true, ns: "db15.coll15", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.174-0400 m31200| 2015-07-09T13:56:31.172-0400 I SHARDING [conn63] moveChunk data transfer progress: { active: true, ns: "db15.coll15", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.178-0400 m31200| 2015-07-09T13:56:31.177-0400 I SHARDING [conn63] moveChunk data transfer progress: { active: true, ns: "db15.coll15", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.181-0400 m31100| 2015-07-09T13:56:31.181-0400 I INDEX [migrateThread] build index on: db15.coll15 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db15.coll15" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.182-0400 m31100| 2015-07-09T13:56:31.181-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.188-0400 m31200| 2015-07-09T13:56:31.187-0400 I SHARDING [conn63] moveChunk data transfer progress: { active: true, ns: "db15.coll15", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.189-0400 m31100| 2015-07-09T13:56:31.189-0400 I INDEX [migrateThread] build index on: db15.coll15 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db15.coll15" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.189-0400 m31100| 2015-07-09T13:56:31.189-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.197-0400 m31100| 2015-07-09T13:56:31.197-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.198-0400 m31100| 2015-07-09T13:56:31.197-0400 I SHARDING [migrateThread] Deleter starting delete for: db15.coll15 from { _id: MinKey } -> { _id: 0 }, with opId: 23066 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.198-0400 m31100| 2015-07-09T13:56:31.198-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db15.coll15 from { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.204-0400 m31101| 2015-07-09T13:56:31.204-0400 I INDEX [repl writer worker 12] build index on: db15.coll15 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db15.coll15" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.205-0400 m31101| 2015-07-09T13:56:31.204-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.205-0400 m31200| 2015-07-09T13:56:31.204-0400 I SHARDING [conn63] moveChunk data transfer progress: { active: true, ns: "db15.coll15", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.206-0400 m31102| 2015-07-09T13:56:31.204-0400 I INDEX [repl writer worker 1] build index on: db15.coll15 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db15.coll15" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.206-0400 m31102| 2015-07-09T13:56:31.204-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.211-0400 m31102| 2015-07-09T13:56:31.210-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.213-0400 m31100| 2015-07-09T13:56:31.212-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.214-0400 m31100| 2015-07-09T13:56:31.213-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db15.coll15' { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.214-0400 m31101| 2015-07-09T13:56:31.214-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.238-0400 m31200| 2015-07-09T13:56:31.237-0400 I SHARDING [conn63] moveChunk data transfer progress: { active: true, ns: "db15.coll15", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.238-0400 m31200| 2015-07-09T13:56:31.237-0400 I SHARDING [conn63] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.238-0400 m31200| 2015-07-09T13:56:31.238-0400 I SHARDING [conn63] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.238-0400 m31200| 2015-07-09T13:56:31.238-0400 I SHARDING [conn63] moveChunk setting version to: 2|0||559eb5ceca4787b9985d1c25 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.248-0400 m31100| 2015-07-09T13:56:31.247-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db15.coll15' { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.248-0400 m31100| 2015-07-09T13:56:31.247-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:31.247-0400-559eb5cf792e00bb67274927", server: "bs-osx108-8", clientAddr: "", time: new Date(1436464591247), what: "moveChunk.to", ns: "db15.coll15", details: { min: { _id: MinKey }, max: { _id: 0 }, step 1 of 5: 30, step 2 of 5: 14, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 34, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.301-0400 m31200| 2015-07-09T13:56:31.301-0400 I SHARDING [conn63] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db15.coll15", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.302-0400 m31200| 2015-07-09T13:56:31.301-0400 I SHARDING [conn63] moveChunk updating self version to: 2|1||559eb5ceca4787b9985d1c25 through { _id: 0 } -> { _id: MaxKey } for collection 'db15.coll15' [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.303-0400 m31200| 2015-07-09T13:56:31.302-0400 I SHARDING [conn63] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:31.302-0400-559eb5cfd5a107a5b9c0dac5", server: "bs-osx108-8", clientAddr: "127.0.0.1:62862", time: new Date(1436464591302), what: "moveChunk.commit", ns: "db15.coll15", details: { min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs1", to: "test-rs0", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.356-0400 m31200| 2015-07-09T13:56:31.355-0400 I SHARDING [conn63] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.356-0400 m31200| 2015-07-09T13:56:31.355-0400 I SHARDING [conn63] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.356-0400 m31200| 2015-07-09T13:56:31.356-0400 I SHARDING [conn63] Deleter starting delete for: db15.coll15 from { _id: MinKey } -> { _id: 0 }, with opId: 22143 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.356-0400 m31200| 2015-07-09T13:56:31.356-0400 I SHARDING [conn63] rangeDeleter deleted 0 documents for db15.coll15 from { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.356-0400 m31200| 2015-07-09T13:56:31.356-0400 I SHARDING [conn63] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.357-0400 m31200| 2015-07-09T13:56:31.356-0400 I SHARDING [conn63] distributed lock 'db15.coll15/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.357-0400 m31200| 2015-07-09T13:56:31.357-0400 I SHARDING [conn63] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:31.357-0400-559eb5cfd5a107a5b9c0dac6", server: "bs-osx108-8", clientAddr: "127.0.0.1:62862", time: new Date(1436464591357), what: "moveChunk.from", ns: "db15.coll15", details: { min: { _id: MinKey }, max: { _id: 0 }, step 1 of 6: 0, step 2 of 6: 58, step 3 of 6: 2, step 4 of 6: 69, step 5 of 6: 118, step 6 of 6: 0, to: "test-rs0", from: "test-rs1", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.411-0400 m31200| 2015-07-09T13:56:31.410-0400 I COMMAND [conn63] command db15.coll15 command: moveChunk { moveChunk: "db15.coll15", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", to: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", fromShard: "test-rs1", toShard: "test-rs0", min: { _id: MinKey }, max: { _id: 0 }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb5ceca4787b9985d1c25') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 304ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.418-0400 m30999| 2015-07-09T13:56:31.417-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db15.coll15: 0ms sequenceNumber: 75 version: 2|1||559eb5ceca4787b9985d1c25 based on: 1|1||559eb5ceca4787b9985d1c25 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.419-0400 m31100| 2015-07-09T13:56:31.418-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db15.coll15", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5ceca4787b9985d1c25') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.422-0400 m31100| 2015-07-09T13:56:31.421-0400 I SHARDING [conn15] distributed lock 'db15.coll15/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb5cf792e00bb67274928 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.422-0400 m31100| 2015-07-09T13:56:31.421-0400 I SHARDING [conn15] remotely refreshing metadata for db15.coll15 based on current shard version 0|0||559eb5ceca4787b9985d1c25, current metadata version is 1|1||559eb5ceca4787b9985d1c25 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.423-0400 m31100| 2015-07-09T13:56:31.423-0400 I SHARDING [conn15] updating metadata for db15.coll15 from shard version 0|0||559eb5ceca4787b9985d1c25 to shard version 2|0||559eb5ceca4787b9985d1c25 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.424-0400 m31100| 2015-07-09T13:56:31.423-0400 I SHARDING [conn15] collection version was loaded at version 2|1||559eb5ceca4787b9985d1c25, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.424-0400 m31100| 2015-07-09T13:56:31.423-0400 I SHARDING [conn15] splitChunk accepted at version 2|0||559eb5ceca4787b9985d1c25 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.425-0400 m31100| 2015-07-09T13:56:31.425-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:31.425-0400-559eb5cf792e00bb67274929", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464591425), what: "split", ns: "db15.coll15", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559eb5ceca4787b9985d1c25') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559eb5ceca4787b9985d1c25') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.478-0400 m31100| 2015-07-09T13:56:31.478-0400 I SHARDING [conn15] distributed lock 'db15.coll15/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.480-0400 m30999| 2015-07-09T13:56:31.480-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db15.coll15: 0ms sequenceNumber: 76 version: 2|3||559eb5ceca4787b9985d1c25 based on: 2|1||559eb5ceca4787b9985d1c25 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.481-0400 m31200| 2015-07-09T13:56:31.480-0400 I SHARDING [conn63] received splitChunk request: { splitChunk: "db15.coll15", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5ceca4787b9985d1c25') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.484-0400 m31200| 2015-07-09T13:56:31.484-0400 I SHARDING [conn63] distributed lock 'db15.coll15/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb5cfd5a107a5b9c0dac7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.484-0400 m31200| 2015-07-09T13:56:31.484-0400 I SHARDING [conn63] remotely refreshing metadata for db15.coll15 based on current shard version 2|0||559eb5ceca4787b9985d1c25, current metadata version is 2|0||559eb5ceca4787b9985d1c25 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.486-0400 m31200| 2015-07-09T13:56:31.485-0400 I SHARDING [conn63] updating metadata for db15.coll15 from shard version 2|0||559eb5ceca4787b9985d1c25 to shard version 2|1||559eb5ceca4787b9985d1c25 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.486-0400 m31200| 2015-07-09T13:56:31.485-0400 I SHARDING [conn63] collection version was loaded at version 2|3||559eb5ceca4787b9985d1c25, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.486-0400 m31200| 2015-07-09T13:56:31.485-0400 I SHARDING [conn63] splitChunk accepted at version 2|1||559eb5ceca4787b9985d1c25 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.487-0400 m31200| 2015-07-09T13:56:31.487-0400 I SHARDING [conn63] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:31.487-0400-559eb5cfd5a107a5b9c0dac8", server: "bs-osx108-8", clientAddr: "127.0.0.1:62862", time: new Date(1436464591487), what: "split", ns: "db15.coll15", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559eb5ceca4787b9985d1c25') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559eb5ceca4787b9985d1c25') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.542-0400 m31200| 2015-07-09T13:56:31.541-0400 I SHARDING [conn63] distributed lock 'db15.coll15/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.543-0400 m30999| 2015-07-09T13:56:31.543-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db15.coll15: 0ms sequenceNumber: 77 version: 2|5||559eb5ceca4787b9985d1c25 based on: 2|3||559eb5ceca4787b9985d1c25 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.570-0400 Using 10 threads (requested 10) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.658-0400 m30998| 2015-07-09T13:56:31.658-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62930 #99 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.668-0400 m30998| 2015-07-09T13:56:31.668-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62931 #100 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.679-0400 m30999| 2015-07-09T13:56:31.679-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62932 #100 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.679-0400 m30999| 2015-07-09T13:56:31.679-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62933 #101 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.681-0400 m30998| 2015-07-09T13:56:31.680-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62934 #101 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.681-0400 m30998| 2015-07-09T13:56:31.681-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62935 #102 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.682-0400 m30999| 2015-07-09T13:56:31.682-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62936 #102 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.684-0400 m30999| 2015-07-09T13:56:31.684-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62937 #103 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.686-0400 m30999| 2015-07-09T13:56:31.686-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62938 #104 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.686-0400 m30998| 2015-07-09T13:56:31.686-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62939 #103 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.691-0400 setting random seed: 3183093382976 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.692-0400 setting random seed: 5289825452491 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.692-0400 setting random seed: 6954559991136 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.692-0400 setting random seed: 1844837488606 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.696-0400 setting random seed: 6183685446158 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.697-0400 setting random seed: 8250512601807 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.697-0400 setting random seed: 6096421424299 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.697-0400 setting random seed: 9201341969892 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.698-0400 m30998| 2015-07-09T13:56:31.698-0400 I SHARDING [conn100] ChunkManager: time to load chunks for db15.coll15: 0ms sequenceNumber: 17 version: 2|5||559eb5ceca4787b9985d1c25 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.702-0400 setting random seed: 2658525900915 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:31.706-0400 setting random seed: 6365363150835 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.064-0400 m30998| 2015-07-09T13:56:32.064-0400 I NETWORK [conn103] end connection 127.0.0.1:62939 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.072-0400 m30998| 2015-07-09T13:56:32.072-0400 I NETWORK [conn100] end connection 127.0.0.1:62931 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.083-0400 m30999| 2015-07-09T13:56:32.083-0400 I NETWORK [conn100] end connection 127.0.0.1:62932 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.092-0400 m30998| 2015-07-09T13:56:32.091-0400 I NETWORK [conn102] end connection 127.0.0.1:62935 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.118-0400 m30999| 2015-07-09T13:56:32.118-0400 I NETWORK [conn104] end connection 127.0.0.1:62938 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.120-0400 m30999| 2015-07-09T13:56:32.120-0400 I NETWORK [conn101] end connection 127.0.0.1:62933 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.133-0400 m30999| 2015-07-09T13:56:32.133-0400 I NETWORK [conn102] end connection 127.0.0.1:62936 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.136-0400 m30998| 2015-07-09T13:56:32.136-0400 I NETWORK [conn99] end connection 127.0.0.1:62930 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.138-0400 m30998| 2015-07-09T13:56:32.138-0400 I NETWORK [conn101] end connection 127.0.0.1:62934 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.170-0400 m30999| 2015-07-09T13:56:32.170-0400 I NETWORK [conn103] end connection 127.0.0.1:62937 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.197-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.197-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.197-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.198-0400 jstests/concurrency/fsm_workloads/update_ordered_bulk_inc.js: Workload completed in 627 ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.198-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.198-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.198-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.198-0400 m30999| 2015-07-09T13:56:32.198-0400 I COMMAND [conn1] DROP: db15.coll15 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.198-0400 m30999| 2015-07-09T13:56:32.198-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:32.198-0400-559eb5d0ca4787b9985d1c27", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464592198), what: "dropCollection.start", ns: "db15.coll15", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.255-0400 m30999| 2015-07-09T13:56:32.254-0400 I SHARDING [conn1] distributed lock 'db15.coll15/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5d0ca4787b9985d1c28 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.255-0400 m31100| 2015-07-09T13:56:32.255-0400 I COMMAND [conn15] CMD: drop db15.coll15 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.258-0400 m31200| 2015-07-09T13:56:32.257-0400 I COMMAND [conn63] CMD: drop db15.coll15 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.259-0400 m31102| 2015-07-09T13:56:32.259-0400 I COMMAND [repl writer worker 3] CMD: drop db15.coll15 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.259-0400 m31101| 2015-07-09T13:56:32.259-0400 I COMMAND [repl writer worker 3] CMD: drop db15.coll15 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.261-0400 m31201| 2015-07-09T13:56:32.261-0400 I COMMAND [repl writer worker 10] CMD: drop db15.coll15 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.262-0400 m31202| 2015-07-09T13:56:32.261-0400 I COMMAND [repl writer worker 6] CMD: drop db15.coll15 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.313-0400 m31100| 2015-07-09T13:56:32.313-0400 I SHARDING [conn15] remotely refreshing metadata for db15.coll15 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559eb5ceca4787b9985d1c25, current metadata version is 2|3||559eb5ceca4787b9985d1c25 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.315-0400 m31100| 2015-07-09T13:56:32.314-0400 W SHARDING [conn15] no chunks found when reloading db15.coll15, previous version was 0|0||559eb5ceca4787b9985d1c25, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.315-0400 m31100| 2015-07-09T13:56:32.314-0400 I SHARDING [conn15] dropping metadata for db15.coll15 at shard version 2|3||559eb5ceca4787b9985d1c25, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.316-0400 m31200| 2015-07-09T13:56:32.315-0400 I SHARDING [conn63] remotely refreshing metadata for db15.coll15 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559eb5ceca4787b9985d1c25, current metadata version is 2|5||559eb5ceca4787b9985d1c25 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.317-0400 m31200| 2015-07-09T13:56:32.317-0400 W SHARDING [conn63] no chunks found when reloading db15.coll15, previous version was 0|0||559eb5ceca4787b9985d1c25, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.318-0400 m31200| 2015-07-09T13:56:32.317-0400 I SHARDING [conn63] dropping metadata for db15.coll15 at shard version 2|5||559eb5ceca4787b9985d1c25, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.319-0400 m30999| 2015-07-09T13:56:32.318-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:32.318-0400-559eb5d0ca4787b9985d1c29", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464592318), what: "dropCollection", ns: "db15.coll15", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.373-0400 m30999| 2015-07-09T13:56:32.372-0400 I SHARDING [conn1] distributed lock 'db15.coll15/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.428-0400 m30999| 2015-07-09T13:56:32.428-0400 I COMMAND [conn1] DROP DATABASE: db15 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.428-0400 m30999| 2015-07-09T13:56:32.428-0400 I SHARDING [conn1] DBConfig::dropDatabase: db15 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.428-0400 m30999| 2015-07-09T13:56:32.428-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:32.428-0400-559eb5d0ca4787b9985d1c2a", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464592428), what: "dropDatabase.start", ns: "db15", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.535-0400 m30999| 2015-07-09T13:56:32.534-0400 I SHARDING [conn1] DBConfig::dropDatabase: db15 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.535-0400 m31200| 2015-07-09T13:56:32.535-0400 I COMMAND [conn66] dropDatabase db15 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.535-0400 m31200| 2015-07-09T13:56:32.535-0400 I COMMAND [conn66] dropDatabase db15 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.536-0400 m30999| 2015-07-09T13:56:32.536-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:32.536-0400-559eb5d0ca4787b9985d1c2b", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464592536), what: "dropDatabase", ns: "db15", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.536-0400 m31201| 2015-07-09T13:56:32.536-0400 I COMMAND [repl writer worker 12] dropDatabase db15 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.536-0400 m31201| 2015-07-09T13:56:32.536-0400 I COMMAND [repl writer worker 12] dropDatabase db15 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.536-0400 m31202| 2015-07-09T13:56:32.536-0400 I COMMAND [repl writer worker 13] dropDatabase db15 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.537-0400 m31202| 2015-07-09T13:56:32.536-0400 I COMMAND [repl writer worker 13] dropDatabase db15 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.627-0400 m31100| 2015-07-09T13:56:32.627-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.631-0400 m31101| 2015-07-09T13:56:32.630-0400 I COMMAND [repl writer worker 0] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.631-0400 m31102| 2015-07-09T13:56:32.631-0400 I COMMAND [repl writer worker 7] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.659-0400 m31200| 2015-07-09T13:56:32.659-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.660-0400 m31202| 2015-07-09T13:56:32.660-0400 I COMMAND [repl writer worker 9] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.660-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.660-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.660-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.661-0400 jstests/concurrency/fsm_workloads/update_replace.js [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.661-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.661-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.661-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.662-0400 m31201| 2015-07-09T13:56:32.662-0400 I COMMAND [repl writer worker 9] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.667-0400 m30999| 2015-07-09T13:56:32.666-0400 I SHARDING [conn1] distributed lock 'db16/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5d0ca4787b9985d1c2c [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.670-0400 m30999| 2015-07-09T13:56:32.670-0400 I SHARDING [conn1] Placing [db16] on: test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.670-0400 m30999| 2015-07-09T13:56:32.670-0400 I SHARDING [conn1] Enabling sharding for database [db16] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.724-0400 m30999| 2015-07-09T13:56:32.724-0400 I SHARDING [conn1] distributed lock 'db16/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.745-0400 m31200| 2015-07-09T13:56:32.744-0400 I INDEX [conn53] build index on: db16.coll16 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db16.coll16" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.746-0400 m31200| 2015-07-09T13:56:32.744-0400 I INDEX [conn53] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.754-0400 m31200| 2015-07-09T13:56:32.754-0400 I INDEX [conn53] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.755-0400 m30999| 2015-07-09T13:56:32.755-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db16.coll16", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.759-0400 m30999| 2015-07-09T13:56:32.759-0400 I SHARDING [conn1] distributed lock 'db16.coll16/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5d0ca4787b9985d1c2d [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.760-0400 m30999| 2015-07-09T13:56:32.759-0400 I SHARDING [conn1] enable sharding on: db16.coll16 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.760-0400 m30999| 2015-07-09T13:56:32.759-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:32.759-0400-559eb5d0ca4787b9985d1c2e", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464592759), what: "shardCollection.start", ns: "db16.coll16", details: { shardKey: { _id: "hashed" }, collection: "db16.coll16", primary: "test-rs1:test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.766-0400 m31201| 2015-07-09T13:56:32.764-0400 I INDEX [repl writer worker 5] build index on: db16.coll16 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db16.coll16" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.766-0400 m31201| 2015-07-09T13:56:32.764-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.768-0400 m31201| 2015-07-09T13:56:32.768-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.771-0400 m31202| 2015-07-09T13:56:32.770-0400 I INDEX [repl writer worker 2] build index on: db16.coll16 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db16.coll16" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.771-0400 m31202| 2015-07-09T13:56:32.770-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.774-0400 m31202| 2015-07-09T13:56:32.774-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.813-0400 m30999| 2015-07-09T13:56:32.812-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db16.coll16 using new epoch 559eb5d0ca4787b9985d1c2f [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.922-0400 m30999| 2015-07-09T13:56:32.922-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db16.coll16: 1ms sequenceNumber: 78 version: 1|1||559eb5d0ca4787b9985d1c2f based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.978-0400 m30999| 2015-07-09T13:56:32.978-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db16.coll16: 0ms sequenceNumber: 79 version: 1|1||559eb5d0ca4787b9985d1c2f based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.980-0400 m31200| 2015-07-09T13:56:32.979-0400 I SHARDING [conn39] remotely refreshing metadata for db16.coll16 with requested shard version 1|1||559eb5d0ca4787b9985d1c2f, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.981-0400 m31200| 2015-07-09T13:56:32.981-0400 I SHARDING [conn39] collection db16.coll16 was previously unsharded, new metadata loaded with shard version 1|1||559eb5d0ca4787b9985d1c2f [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.982-0400 m31200| 2015-07-09T13:56:32.981-0400 I SHARDING [conn39] collection version was loaded at version 1|1||559eb5d0ca4787b9985d1c2f, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:32.982-0400 m30999| 2015-07-09T13:56:32.981-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:32.981-0400-559eb5d0ca4787b9985d1c30", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464592981), what: "shardCollection", ns: "db16.coll16", details: { version: "1|1||559eb5d0ca4787b9985d1c2f" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.037-0400 m30999| 2015-07-09T13:56:33.036-0400 I SHARDING [conn1] distributed lock 'db16.coll16/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.038-0400 m30999| 2015-07-09T13:56:33.037-0400 I SHARDING [conn1] moving chunk ns: db16.coll16 moving ( ns: db16.coll16, shard: test-rs1, lastmod: 1|0||000000000000000000000000, min: { _id: MinKey }, max: { _id: 0 }) test-rs1 -> test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.039-0400 m31200| 2015-07-09T13:56:33.038-0400 I SHARDING [conn63] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.040-0400 m31200| 2015-07-09T13:56:33.039-0400 I SHARDING [conn63] received moveChunk request: { moveChunk: "db16.coll16", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", to: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", fromShard: "test-rs1", toShard: "test-rs0", min: { _id: MinKey }, max: { _id: 0 }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb5d0ca4787b9985d1c2f') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.043-0400 m31200| 2015-07-09T13:56:33.043-0400 I SHARDING [conn63] distributed lock 'db16.coll16/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb5d1d5a107a5b9c0daca [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.044-0400 m31200| 2015-07-09T13:56:33.043-0400 I SHARDING [conn63] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:33.043-0400-559eb5d1d5a107a5b9c0dacb", server: "bs-osx108-8", clientAddr: "127.0.0.1:62862", time: new Date(1436464593043), what: "moveChunk.start", ns: "db16.coll16", details: { min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs1", to: "test-rs0" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.097-0400 m31200| 2015-07-09T13:56:33.096-0400 I SHARDING [conn63] remotely refreshing metadata for db16.coll16 based on current shard version 1|1||559eb5d0ca4787b9985d1c2f, current metadata version is 1|1||559eb5d0ca4787b9985d1c2f [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.098-0400 m31200| 2015-07-09T13:56:33.098-0400 I SHARDING [conn63] metadata of collection db16.coll16 already up to date (shard version : 1|1||559eb5d0ca4787b9985d1c2f, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.098-0400 m31200| 2015-07-09T13:56:33.098-0400 I SHARDING [conn63] moveChunk request accepted at version 1|1||559eb5d0ca4787b9985d1c2f [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.099-0400 m31200| 2015-07-09T13:56:33.098-0400 I SHARDING [conn63] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.099-0400 m31100| 2015-07-09T13:56:33.099-0400 I SHARDING [conn19] remotely refreshing metadata for db16.coll16, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.101-0400 m31100| 2015-07-09T13:56:33.100-0400 I SHARDING [conn19] collection db16.coll16 was previously unsharded, new metadata loaded with shard version 0|0||559eb5d0ca4787b9985d1c2f [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.101-0400 m31100| 2015-07-09T13:56:33.100-0400 I SHARDING [conn19] collection version was loaded at version 1|1||559eb5d0ca4787b9985d1c2f, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.101-0400 m31100| 2015-07-09T13:56:33.101-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: MinKey } -> { _id: 0 } for collection db16.coll16 from test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202 at epoch 559eb5d0ca4787b9985d1c2f [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.103-0400 m31200| 2015-07-09T13:56:33.102-0400 I SHARDING [conn63] moveChunk data transfer progress: { active: true, ns: "db16.coll16", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.107-0400 m31200| 2015-07-09T13:56:33.106-0400 I SHARDING [conn63] moveChunk data transfer progress: { active: true, ns: "db16.coll16", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.112-0400 m31200| 2015-07-09T13:56:33.111-0400 I SHARDING [conn63] moveChunk data transfer progress: { active: true, ns: "db16.coll16", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.119-0400 m31100| 2015-07-09T13:56:33.118-0400 I INDEX [migrateThread] build index on: db16.coll16 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db16.coll16" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.119-0400 m31100| 2015-07-09T13:56:33.118-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.121-0400 m31200| 2015-07-09T13:56:33.120-0400 I SHARDING [conn63] moveChunk data transfer progress: { active: true, ns: "db16.coll16", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.127-0400 m31100| 2015-07-09T13:56:33.127-0400 I INDEX [migrateThread] build index on: db16.coll16 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db16.coll16" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.128-0400 m31100| 2015-07-09T13:56:33.127-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.138-0400 m31200| 2015-07-09T13:56:33.137-0400 I SHARDING [conn63] moveChunk data transfer progress: { active: true, ns: "db16.coll16", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.147-0400 m31100| 2015-07-09T13:56:33.146-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.148-0400 m31100| 2015-07-09T13:56:33.147-0400 I SHARDING [migrateThread] Deleter starting delete for: db16.coll16 from { _id: MinKey } -> { _id: 0 }, with opId: 24049 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.149-0400 m31100| 2015-07-09T13:56:33.148-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db16.coll16 from { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.160-0400 m31101| 2015-07-09T13:56:33.160-0400 I INDEX [repl writer worker 8] build index on: db16.coll16 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db16.coll16" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.161-0400 m31101| 2015-07-09T13:56:33.160-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.163-0400 m31102| 2015-07-09T13:56:33.162-0400 I INDEX [repl writer worker 12] build index on: db16.coll16 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db16.coll16" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.163-0400 m31102| 2015-07-09T13:56:33.162-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.169-0400 m31101| 2015-07-09T13:56:33.169-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.171-0400 m31200| 2015-07-09T13:56:33.170-0400 I SHARDING [conn63] moveChunk data transfer progress: { active: true, ns: "db16.coll16", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.172-0400 m31100| 2015-07-09T13:56:33.171-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.172-0400 m31100| 2015-07-09T13:56:33.171-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db16.coll16' { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.173-0400 m31102| 2015-07-09T13:56:33.173-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.237-0400 m31200| 2015-07-09T13:56:33.236-0400 I SHARDING [conn63] moveChunk data transfer progress: { active: true, ns: "db16.coll16", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.237-0400 m31200| 2015-07-09T13:56:33.237-0400 I SHARDING [conn63] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.238-0400 m31200| 2015-07-09T13:56:33.237-0400 I SHARDING [conn63] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.238-0400 m31200| 2015-07-09T13:56:33.237-0400 I SHARDING [conn63] moveChunk setting version to: 2|0||559eb5d0ca4787b9985d1c2f [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.241-0400 m31100| 2015-07-09T13:56:33.241-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db16.coll16' { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.241-0400 m31100| 2015-07-09T13:56:33.241-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:33.241-0400-559eb5d1792e00bb6727492a", server: "bs-osx108-8", clientAddr: "", time: new Date(1436464593241), what: "moveChunk.to", ns: "db16.coll16", details: { min: { _id: MinKey }, max: { _id: 0 }, step 1 of 5: 46, step 2 of 5: 23, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 69, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.295-0400 m31200| 2015-07-09T13:56:33.294-0400 I SHARDING [conn63] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db16.coll16", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.295-0400 m31200| 2015-07-09T13:56:33.294-0400 I SHARDING [conn63] moveChunk updating self version to: 2|1||559eb5d0ca4787b9985d1c2f through { _id: 0 } -> { _id: MaxKey } for collection 'db16.coll16' [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.296-0400 m31200| 2015-07-09T13:56:33.296-0400 I SHARDING [conn63] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:33.296-0400-559eb5d1d5a107a5b9c0dacc", server: "bs-osx108-8", clientAddr: "127.0.0.1:62862", time: new Date(1436464593296), what: "moveChunk.commit", ns: "db16.coll16", details: { min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs1", to: "test-rs0", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.351-0400 m31200| 2015-07-09T13:56:33.350-0400 I SHARDING [conn63] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.351-0400 m31200| 2015-07-09T13:56:33.350-0400 I SHARDING [conn63] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.351-0400 m31200| 2015-07-09T13:56:33.350-0400 I SHARDING [conn63] Deleter starting delete for: db16.coll16 from { _id: MinKey } -> { _id: 0 }, with opId: 23194 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.351-0400 m31200| 2015-07-09T13:56:33.350-0400 I SHARDING [conn63] rangeDeleter deleted 0 documents for db16.coll16 from { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.351-0400 m31200| 2015-07-09T13:56:33.351-0400 I SHARDING [conn63] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.352-0400 m31200| 2015-07-09T13:56:33.351-0400 I SHARDING [conn63] distributed lock 'db16.coll16/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.352-0400 m31200| 2015-07-09T13:56:33.352-0400 I SHARDING [conn63] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:33.352-0400-559eb5d1d5a107a5b9c0dacd", server: "bs-osx108-8", clientAddr: "127.0.0.1:62862", time: new Date(1436464593352), what: "moveChunk.from", ns: "db16.coll16", details: { min: { _id: MinKey }, max: { _id: 0 }, step 1 of 6: 0, step 2 of 6: 58, step 3 of 6: 3, step 4 of 6: 135, step 5 of 6: 113, step 6 of 6: 0, to: "test-rs0", from: "test-rs1", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.406-0400 m31200| 2015-07-09T13:56:33.405-0400 I COMMAND [conn63] command db16.coll16 command: moveChunk { moveChunk: "db16.coll16", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", to: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", fromShard: "test-rs1", toShard: "test-rs0", min: { _id: MinKey }, max: { _id: 0 }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb5d0ca4787b9985d1c2f') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 366ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.408-0400 m30999| 2015-07-09T13:56:33.407-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db16.coll16: 0ms sequenceNumber: 80 version: 2|1||559eb5d0ca4787b9985d1c2f based on: 1|1||559eb5d0ca4787b9985d1c2f [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.409-0400 m31100| 2015-07-09T13:56:33.408-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db16.coll16", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5d0ca4787b9985d1c2f') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.412-0400 m31100| 2015-07-09T13:56:33.412-0400 I SHARDING [conn15] distributed lock 'db16.coll16/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb5d1792e00bb6727492b [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.412-0400 m31100| 2015-07-09T13:56:33.412-0400 I SHARDING [conn15] remotely refreshing metadata for db16.coll16 based on current shard version 0|0||559eb5d0ca4787b9985d1c2f, current metadata version is 1|1||559eb5d0ca4787b9985d1c2f [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.414-0400 m31100| 2015-07-09T13:56:33.413-0400 I SHARDING [conn15] updating metadata for db16.coll16 from shard version 0|0||559eb5d0ca4787b9985d1c2f to shard version 2|0||559eb5d0ca4787b9985d1c2f [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.414-0400 m31100| 2015-07-09T13:56:33.413-0400 I SHARDING [conn15] collection version was loaded at version 2|1||559eb5d0ca4787b9985d1c2f, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.414-0400 m31100| 2015-07-09T13:56:33.413-0400 I SHARDING [conn15] splitChunk accepted at version 2|0||559eb5d0ca4787b9985d1c2f [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.415-0400 m31100| 2015-07-09T13:56:33.415-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:33.415-0400-559eb5d1792e00bb6727492c", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464593415), what: "split", ns: "db16.coll16", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559eb5d0ca4787b9985d1c2f') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559eb5d0ca4787b9985d1c2f') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.469-0400 m31100| 2015-07-09T13:56:33.469-0400 I SHARDING [conn15] distributed lock 'db16.coll16/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.471-0400 m30999| 2015-07-09T13:56:33.471-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db16.coll16: 0ms sequenceNumber: 81 version: 2|3||559eb5d0ca4787b9985d1c2f based on: 2|1||559eb5d0ca4787b9985d1c2f [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.472-0400 m31200| 2015-07-09T13:56:33.471-0400 I SHARDING [conn63] received splitChunk request: { splitChunk: "db16.coll16", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5d0ca4787b9985d1c2f') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.476-0400 m31200| 2015-07-09T13:56:33.475-0400 I SHARDING [conn63] distributed lock 'db16.coll16/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb5d1d5a107a5b9c0dace [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.476-0400 m31200| 2015-07-09T13:56:33.475-0400 I SHARDING [conn63] remotely refreshing metadata for db16.coll16 based on current shard version 2|0||559eb5d0ca4787b9985d1c2f, current metadata version is 2|0||559eb5d0ca4787b9985d1c2f [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.477-0400 m31200| 2015-07-09T13:56:33.477-0400 I SHARDING [conn63] updating metadata for db16.coll16 from shard version 2|0||559eb5d0ca4787b9985d1c2f to shard version 2|1||559eb5d0ca4787b9985d1c2f [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.477-0400 m31200| 2015-07-09T13:56:33.477-0400 I SHARDING [conn63] collection version was loaded at version 2|3||559eb5d0ca4787b9985d1c2f, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.477-0400 m31200| 2015-07-09T13:56:33.477-0400 I SHARDING [conn63] splitChunk accepted at version 2|1||559eb5d0ca4787b9985d1c2f [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.479-0400 m31200| 2015-07-09T13:56:33.478-0400 I SHARDING [conn63] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:33.478-0400-559eb5d1d5a107a5b9c0dacf", server: "bs-osx108-8", clientAddr: "127.0.0.1:62862", time: new Date(1436464593478), what: "split", ns: "db16.coll16", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559eb5d0ca4787b9985d1c2f') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559eb5d0ca4787b9985d1c2f') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.532-0400 m31200| 2015-07-09T13:56:33.532-0400 I SHARDING [conn63] distributed lock 'db16.coll16/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.534-0400 m30999| 2015-07-09T13:56:33.534-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db16.coll16: 0ms sequenceNumber: 82 version: 2|5||559eb5d0ca4787b9985d1c2f based on: 2|3||559eb5d0ca4787b9985d1c2f [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.545-0400 m31200| 2015-07-09T13:56:33.544-0400 I INDEX [conn39] build index on: db16.coll16 properties: { v: 1, key: { a: 1.0 }, name: "a_1", ns: "db16.coll16" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.545-0400 m31200| 2015-07-09T13:56:33.544-0400 I INDEX [conn39] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.545-0400 m31100| 2015-07-09T13:56:33.544-0400 I INDEX [conn47] build index on: db16.coll16 properties: { v: 1, key: { a: 1.0 }, name: "a_1", ns: "db16.coll16" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.545-0400 m31100| 2015-07-09T13:56:33.544-0400 I INDEX [conn47] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.551-0400 m31200| 2015-07-09T13:56:33.551-0400 I INDEX [conn39] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.555-0400 m31100| 2015-07-09T13:56:33.554-0400 I INDEX [conn47] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.558-0400 m31201| 2015-07-09T13:56:33.557-0400 I INDEX [repl writer worker 11] build index on: db16.coll16 properties: { v: 1, key: { a: 1.0 }, name: "a_1", ns: "db16.coll16" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.558-0400 m31201| 2015-07-09T13:56:33.557-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.568-0400 m31202| 2015-07-09T13:56:33.568-0400 I INDEX [repl writer worker 11] build index on: db16.coll16 properties: { v: 1, key: { a: 1.0 }, name: "a_1", ns: "db16.coll16" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.568-0400 m31202| 2015-07-09T13:56:33.568-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.568-0400 m31102| 2015-07-09T13:56:33.568-0400 I INDEX [repl writer worker 13] build index on: db16.coll16 properties: { v: 1, key: { a: 1.0 }, name: "a_1", ns: "db16.coll16" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.569-0400 m31102| 2015-07-09T13:56:33.568-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.573-0400 m31200| 2015-07-09T13:56:33.572-0400 I INDEX [conn39] build index on: db16.coll16 properties: { v: 1, key: { b: 1.0 }, name: "b_1", ns: "db16.coll16" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.573-0400 m31200| 2015-07-09T13:56:33.572-0400 I INDEX [conn39] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.573-0400 m31100| 2015-07-09T13:56:33.572-0400 I INDEX [conn47] build index on: db16.coll16 properties: { v: 1, key: { b: 1.0 }, name: "b_1", ns: "db16.coll16" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.574-0400 m31100| 2015-07-09T13:56:33.572-0400 I INDEX [conn47] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.581-0400 m31201| 2015-07-09T13:56:33.581-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.581-0400 m31100| 2015-07-09T13:56:33.581-0400 I INDEX [conn47] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.582-0400 m31200| 2015-07-09T13:56:33.581-0400 I INDEX [conn39] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.586-0400 m31202| 2015-07-09T13:56:33.585-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.591-0400 m31102| 2015-07-09T13:56:33.591-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.593-0400 m31101| 2015-07-09T13:56:33.591-0400 I INDEX [repl writer worker 4] build index on: db16.coll16 properties: { v: 1, key: { a: 1.0 }, name: "a_1", ns: "db16.coll16" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.593-0400 m31200| 2015-07-09T13:56:33.591-0400 I INDEX [conn39] build index on: db16.coll16 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db16.coll16" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.594-0400 m31101| 2015-07-09T13:56:33.591-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.594-0400 m31200| 2015-07-09T13:56:33.591-0400 I INDEX [conn39] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.594-0400 m31100| 2015-07-09T13:56:33.593-0400 I INDEX [conn47] build index on: db16.coll16 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db16.coll16" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.595-0400 m31100| 2015-07-09T13:56:33.593-0400 I INDEX [conn47] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.603-0400 m31200| 2015-07-09T13:56:33.603-0400 I INDEX [conn39] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.604-0400 m31101| 2015-07-09T13:56:33.603-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.605-0400 m31202| 2015-07-09T13:56:33.604-0400 I INDEX [repl writer worker 15] build index on: db16.coll16 properties: { v: 1, key: { b: 1.0 }, name: "b_1", ns: "db16.coll16" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.605-0400 m31202| 2015-07-09T13:56:33.604-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.608-0400 m31100| 2015-07-09T13:56:33.608-0400 I INDEX [conn47] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.609-0400 m31201| 2015-07-09T13:56:33.608-0400 I INDEX [repl writer worker 6] build index on: db16.coll16 properties: { v: 1, key: { b: 1.0 }, name: "b_1", ns: "db16.coll16" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.609-0400 m31201| 2015-07-09T13:56:33.608-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.613-0400 m31102| 2015-07-09T13:56:33.612-0400 I INDEX [repl writer worker 9] build index on: db16.coll16 properties: { v: 1, key: { b: 1.0 }, name: "b_1", ns: "db16.coll16" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.613-0400 m31102| 2015-07-09T13:56:33.612-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.615-0400 m31101| 2015-07-09T13:56:33.614-0400 I INDEX [repl writer worker 5] build index on: db16.coll16 properties: { v: 1, key: { b: 1.0 }, name: "b_1", ns: "db16.coll16" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.615-0400 m31101| 2015-07-09T13:56:33.614-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.617-0400 m31201| 2015-07-09T13:56:33.616-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.618-0400 m31202| 2015-07-09T13:56:33.616-0400 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.621-0400 m31100| 2015-07-09T13:56:33.620-0400 I INDEX [conn47] build index on: db16.coll16 properties: { v: 1, key: { y: 1.0 }, name: "y_1", ns: "db16.coll16" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.621-0400 m31100| 2015-07-09T13:56:33.620-0400 I INDEX [conn47] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.630-0400 m31101| 2015-07-09T13:56:33.630-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.632-0400 m31102| 2015-07-09T13:56:33.631-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.632-0400 m31201| 2015-07-09T13:56:33.632-0400 I INDEX [repl writer worker 1] build index on: db16.coll16 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db16.coll16" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.632-0400 m31201| 2015-07-09T13:56:33.632-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.632-0400 m31200| 2015-07-09T13:56:33.632-0400 I INDEX [conn39] build index on: db16.coll16 properties: { v: 1, key: { y: 1.0 }, name: "y_1", ns: "db16.coll16" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.633-0400 m31200| 2015-07-09T13:56:33.632-0400 I INDEX [conn39] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.637-0400 m31202| 2015-07-09T13:56:33.637-0400 I INDEX [repl writer worker 7] build index on: db16.coll16 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db16.coll16" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.637-0400 m31202| 2015-07-09T13:56:33.637-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.642-0400 m31100| 2015-07-09T13:56:33.642-0400 I INDEX [conn47] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.644-0400 m31101| 2015-07-09T13:56:33.644-0400 I INDEX [repl writer worker 9] build index on: db16.coll16 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db16.coll16" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.644-0400 m31101| 2015-07-09T13:56:33.644-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.646-0400 m31200| 2015-07-09T13:56:33.646-0400 I INDEX [conn39] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.646-0400 m31201| 2015-07-09T13:56:33.646-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.652-0400 m31102| 2015-07-09T13:56:33.650-0400 I INDEX [repl writer worker 15] build index on: db16.coll16 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db16.coll16" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.652-0400 m31102| 2015-07-09T13:56:33.650-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.652-0400 m31202| 2015-07-09T13:56:33.652-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.653-0400 m31101| 2015-07-09T13:56:33.652-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.655-0400 m31201| 2015-07-09T13:56:33.654-0400 I INDEX [repl writer worker 4] build index on: db16.coll16 properties: { v: 1, key: { y: 1.0 }, name: "y_1", ns: "db16.coll16" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.655-0400 m31201| 2015-07-09T13:56:33.654-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.661-0400 m31102| 2015-07-09T13:56:33.661-0400 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.689-0400 Using 10 threads (requested 10) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.690-0400 m31202| 2015-07-09T13:56:33.672-0400 I INDEX [repl writer worker 10] build index on: db16.coll16 properties: { v: 1, key: { y: 1.0 }, name: "y_1", ns: "db16.coll16" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.690-0400 m31202| 2015-07-09T13:56:33.672-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.695-0400 m31101| 2015-07-09T13:56:33.694-0400 I INDEX [repl writer worker 13] build index on: db16.coll16 properties: { v: 1, key: { y: 1.0 }, name: "y_1", ns: "db16.coll16" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.695-0400 m31101| 2015-07-09T13:56:33.694-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.700-0400 m31102| 2015-07-09T13:56:33.699-0400 I INDEX [repl writer worker 11] build index on: db16.coll16 properties: { v: 1, key: { y: 1.0 }, name: "y_1", ns: "db16.coll16" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.700-0400 m31102| 2015-07-09T13:56:33.699-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.715-0400 m31201| 2015-07-09T13:56:33.715-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.716-0400 m31202| 2015-07-09T13:56:33.715-0400 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.740-0400 m31101| 2015-07-09T13:56:33.737-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.743-0400 m31102| 2015-07-09T13:56:33.742-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.748-0400 m30999| 2015-07-09T13:56:33.748-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62940 #105 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.771-0400 m30998| 2015-07-09T13:56:33.770-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62941 #104 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.783-0400 m30998| 2015-07-09T13:56:33.783-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62942 #105 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.783-0400 m30999| 2015-07-09T13:56:33.783-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62944 #106 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.784-0400 m30998| 2015-07-09T13:56:33.784-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62943 #106 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.785-0400 m30998| 2015-07-09T13:56:33.784-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62945 #107 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.785-0400 m30999| 2015-07-09T13:56:33.785-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62946 #107 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.786-0400 m30999| 2015-07-09T13:56:33.786-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62947 #108 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.786-0400 m30998| 2015-07-09T13:56:33.786-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62948 #108 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.788-0400 m30999| 2015-07-09T13:56:33.788-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62949 #109 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.797-0400 setting random seed: 7213572179898 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.797-0400 setting random seed: 8507684008218 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.798-0400 setting random seed: 1155222961679 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.798-0400 setting random seed: 3972762040793 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.799-0400 setting random seed: 5832325979135 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.800-0400 setting random seed: 3784801284782 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.802-0400 setting random seed: 3902776981703 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.806-0400 m30998| 2015-07-09T13:56:33.804-0400 I SHARDING [conn104] ChunkManager: time to load chunks for db16.coll16: 0ms sequenceNumber: 18 version: 2|5||559eb5d0ca4787b9985d1c2f based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.806-0400 setting random seed: 211650715209 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.807-0400 setting random seed: 2554925912991 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.815-0400 setting random seed: 639839638024 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.904-0400 m30998| 2015-07-09T13:56:33.903-0400 I NETWORK [conn104] end connection 127.0.0.1:62941 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.909-0400 m30999| 2015-07-09T13:56:33.909-0400 I NETWORK [conn108] end connection 127.0.0.1:62947 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.921-0400 m30998| 2015-07-09T13:56:33.920-0400 I NETWORK [conn107] end connection 127.0.0.1:62945 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.935-0400 m30999| 2015-07-09T13:56:33.934-0400 I NETWORK [conn109] end connection 127.0.0.1:62949 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.940-0400 m30999| 2015-07-09T13:56:33.940-0400 I NETWORK [conn106] end connection 127.0.0.1:62944 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.951-0400 m30998| 2015-07-09T13:56:33.950-0400 I NETWORK [conn105] end connection 127.0.0.1:62942 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.978-0400 m30998| 2015-07-09T13:56:33.975-0400 I NETWORK [conn106] end connection 127.0.0.1:62943 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.978-0400 m30999| 2015-07-09T13:56:33.977-0400 I NETWORK [conn105] end connection 127.0.0.1:62940 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.981-0400 m30998| 2015-07-09T13:56:33.981-0400 I NETWORK [conn108] end connection 127.0.0.1:62948 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:33.984-0400 m30999| 2015-07-09T13:56:33.983-0400 I NETWORK [conn107] end connection 127.0.0.1:62946 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.001-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.001-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.001-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.001-0400 jstests/concurrency/fsm_workloads/update_replace.js: Workload completed in 339 ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.001-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.001-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.001-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.001-0400 m30999| 2015-07-09T13:56:34.001-0400 I COMMAND [conn1] DROP: db16.coll16 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.001-0400 m30999| 2015-07-09T13:56:34.001-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:34.001-0400-559eb5d2ca4787b9985d1c31", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464594001), what: "dropCollection.start", ns: "db16.coll16", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.057-0400 m30999| 2015-07-09T13:56:34.057-0400 I SHARDING [conn1] distributed lock 'db16.coll16/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5d2ca4787b9985d1c32 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.058-0400 m31100| 2015-07-09T13:56:34.058-0400 I COMMAND [conn15] CMD: drop db16.coll16 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.062-0400 m31200| 2015-07-09T13:56:34.062-0400 I COMMAND [conn63] CMD: drop db16.coll16 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.063-0400 m31101| 2015-07-09T13:56:34.062-0400 I COMMAND [repl writer worker 10] CMD: drop db16.coll16 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.064-0400 m31102| 2015-07-09T13:56:34.064-0400 I COMMAND [repl writer worker 8] CMD: drop db16.coll16 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.066-0400 m31202| 2015-07-09T13:56:34.066-0400 I COMMAND [repl writer worker 8] CMD: drop db16.coll16 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.067-0400 m31201| 2015-07-09T13:56:34.067-0400 I COMMAND [repl writer worker 3] CMD: drop db16.coll16 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.118-0400 m31100| 2015-07-09T13:56:34.118-0400 I SHARDING [conn15] remotely refreshing metadata for db16.coll16 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559eb5d0ca4787b9985d1c2f, current metadata version is 2|3||559eb5d0ca4787b9985d1c2f [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.120-0400 m31100| 2015-07-09T13:56:34.119-0400 W SHARDING [conn15] no chunks found when reloading db16.coll16, previous version was 0|0||559eb5d0ca4787b9985d1c2f, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.120-0400 m31100| 2015-07-09T13:56:34.119-0400 I SHARDING [conn15] dropping metadata for db16.coll16 at shard version 2|3||559eb5d0ca4787b9985d1c2f, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.121-0400 m31200| 2015-07-09T13:56:34.120-0400 I SHARDING [conn63] remotely refreshing metadata for db16.coll16 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559eb5d0ca4787b9985d1c2f, current metadata version is 2|5||559eb5d0ca4787b9985d1c2f [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.122-0400 m31200| 2015-07-09T13:56:34.122-0400 W SHARDING [conn63] no chunks found when reloading db16.coll16, previous version was 0|0||559eb5d0ca4787b9985d1c2f, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.122-0400 m31200| 2015-07-09T13:56:34.122-0400 I SHARDING [conn63] dropping metadata for db16.coll16 at shard version 2|5||559eb5d0ca4787b9985d1c2f, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.123-0400 m30999| 2015-07-09T13:56:34.123-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:34.123-0400-559eb5d2ca4787b9985d1c33", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464594123), what: "dropCollection", ns: "db16.coll16", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.176-0400 m30999| 2015-07-09T13:56:34.176-0400 I SHARDING [conn1] distributed lock 'db16.coll16/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.232-0400 m30999| 2015-07-09T13:56:34.232-0400 I COMMAND [conn1] DROP DATABASE: db16 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.232-0400 m30999| 2015-07-09T13:56:34.232-0400 I SHARDING [conn1] DBConfig::dropDatabase: db16 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.232-0400 m30999| 2015-07-09T13:56:34.232-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:34.232-0400-559eb5d2ca4787b9985d1c34", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464594232), what: "dropDatabase.start", ns: "db16", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.338-0400 m30999| 2015-07-09T13:56:34.338-0400 I SHARDING [conn1] DBConfig::dropDatabase: db16 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.339-0400 m31200| 2015-07-09T13:56:34.339-0400 I COMMAND [conn66] dropDatabase db16 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.339-0400 m31200| 2015-07-09T13:56:34.339-0400 I COMMAND [conn66] dropDatabase db16 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.339-0400 m30999| 2015-07-09T13:56:34.339-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:34.339-0400-559eb5d2ca4787b9985d1c35", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464594339), what: "dropDatabase", ns: "db16", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.340-0400 m31202| 2015-07-09T13:56:34.340-0400 I COMMAND [repl writer worker 0] dropDatabase db16 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.340-0400 m31201| 2015-07-09T13:56:34.340-0400 I COMMAND [repl writer worker 1] dropDatabase db16 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.340-0400 m31202| 2015-07-09T13:56:34.340-0400 I COMMAND [repl writer worker 0] dropDatabase db16 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.340-0400 m31201| 2015-07-09T13:56:34.340-0400 I COMMAND [repl writer worker 1] dropDatabase db16 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.427-0400 m31100| 2015-07-09T13:56:34.426-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.430-0400 m31102| 2015-07-09T13:56:34.430-0400 I COMMAND [repl writer worker 5] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.430-0400 m31101| 2015-07-09T13:56:34.430-0400 I COMMAND [repl writer worker 14] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.459-0400 m31200| 2015-07-09T13:56:34.459-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.461-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.461-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.462-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.462-0400 jstests/concurrency/fsm_workloads/map_reduce_replace.js [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.462-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.462-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.462-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.463-0400 m31202| 2015-07-09T13:56:34.462-0400 I COMMAND [repl writer worker 4] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.463-0400 m31201| 2015-07-09T13:56:34.463-0400 I COMMAND [repl writer worker 8] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.467-0400 m30999| 2015-07-09T13:56:34.467-0400 I SHARDING [conn1] distributed lock 'db17/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5d2ca4787b9985d1c36 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.471-0400 m30999| 2015-07-09T13:56:34.471-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T13:56:34.469-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30999:1436464534:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.471-0400 m30999| 2015-07-09T13:56:34.471-0400 I SHARDING [conn1] Placing [db17] on: test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.472-0400 m30999| 2015-07-09T13:56:34.471-0400 I SHARDING [conn1] Enabling sharding for database [db17] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.525-0400 m30999| 2015-07-09T13:56:34.525-0400 I SHARDING [conn1] distributed lock 'db17/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.548-0400 m31200| 2015-07-09T13:56:34.547-0400 I INDEX [conn20] build index on: db17.coll17 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db17.coll17" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.548-0400 m31200| 2015-07-09T13:56:34.547-0400 I INDEX [conn20] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.557-0400 m31200| 2015-07-09T13:56:34.557-0400 I INDEX [conn20] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.560-0400 m30999| 2015-07-09T13:56:34.558-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db17.coll17", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.562-0400 m30999| 2015-07-09T13:56:34.561-0400 I SHARDING [conn1] distributed lock 'db17.coll17/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5d2ca4787b9985d1c37 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.563-0400 m30999| 2015-07-09T13:56:34.563-0400 I SHARDING [conn1] enable sharding on: db17.coll17 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.564-0400 m30999| 2015-07-09T13:56:34.563-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:34.563-0400-559eb5d2ca4787b9985d1c38", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464594563), what: "shardCollection.start", ns: "db17.coll17", details: { shardKey: { _id: "hashed" }, collection: "db17.coll17", primary: "test-rs1:test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.567-0400 m31201| 2015-07-09T13:56:34.567-0400 I INDEX [repl writer worker 12] build index on: db17.coll17 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db17.coll17" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.568-0400 m31201| 2015-07-09T13:56:34.567-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.573-0400 m31202| 2015-07-09T13:56:34.572-0400 I INDEX [repl writer worker 6] build index on: db17.coll17 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db17.coll17" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.573-0400 m31202| 2015-07-09T13:56:34.572-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.575-0400 m31201| 2015-07-09T13:56:34.575-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.577-0400 m31202| 2015-07-09T13:56:34.577-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.616-0400 m30999| 2015-07-09T13:56:34.616-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db17.coll17 using new epoch 559eb5d2ca4787b9985d1c39 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.722-0400 m30999| 2015-07-09T13:56:34.721-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db17.coll17: 0ms sequenceNumber: 83 version: 1|1||559eb5d2ca4787b9985d1c39 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.777-0400 m30999| 2015-07-09T13:56:34.777-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db17.coll17: 0ms sequenceNumber: 84 version: 1|1||559eb5d2ca4787b9985d1c39 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.779-0400 m31200| 2015-07-09T13:56:34.778-0400 I SHARDING [conn39] remotely refreshing metadata for db17.coll17 with requested shard version 1|1||559eb5d2ca4787b9985d1c39, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.780-0400 m31200| 2015-07-09T13:56:34.780-0400 I SHARDING [conn39] collection db17.coll17 was previously unsharded, new metadata loaded with shard version 1|1||559eb5d2ca4787b9985d1c39 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.780-0400 m31200| 2015-07-09T13:56:34.780-0400 I SHARDING [conn39] collection version was loaded at version 1|1||559eb5d2ca4787b9985d1c39, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.781-0400 m30999| 2015-07-09T13:56:34.780-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:34.780-0400-559eb5d2ca4787b9985d1c3a", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464594780), what: "shardCollection", ns: "db17.coll17", details: { version: "1|1||559eb5d2ca4787b9985d1c39" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.836-0400 m30999| 2015-07-09T13:56:34.836-0400 I SHARDING [conn1] distributed lock 'db17.coll17/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.837-0400 m30999| 2015-07-09T13:56:34.837-0400 I SHARDING [conn1] moving chunk ns: db17.coll17 moving ( ns: db17.coll17, shard: test-rs1, lastmod: 1|0||000000000000000000000000, min: { _id: MinKey }, max: { _id: 0 }) test-rs1 -> test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.837-0400 m31200| 2015-07-09T13:56:34.837-0400 I SHARDING [conn63] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.838-0400 m31200| 2015-07-09T13:56:34.838-0400 I SHARDING [conn63] received moveChunk request: { moveChunk: "db17.coll17", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", to: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", fromShard: "test-rs1", toShard: "test-rs0", min: { _id: MinKey }, max: { _id: 0 }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb5d2ca4787b9985d1c39') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.842-0400 m31200| 2015-07-09T13:56:34.841-0400 I SHARDING [conn63] distributed lock 'db17.coll17/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb5d2d5a107a5b9c0dad1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.842-0400 m31200| 2015-07-09T13:56:34.841-0400 I SHARDING [conn63] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:34.841-0400-559eb5d2d5a107a5b9c0dad2", server: "bs-osx108-8", clientAddr: "127.0.0.1:62862", time: new Date(1436464594841), what: "moveChunk.start", ns: "db17.coll17", details: { min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs1", to: "test-rs0" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.895-0400 m31200| 2015-07-09T13:56:34.894-0400 I SHARDING [conn63] remotely refreshing metadata for db17.coll17 based on current shard version 1|1||559eb5d2ca4787b9985d1c39, current metadata version is 1|1||559eb5d2ca4787b9985d1c39 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.896-0400 m31200| 2015-07-09T13:56:34.896-0400 I SHARDING [conn63] metadata of collection db17.coll17 already up to date (shard version : 1|1||559eb5d2ca4787b9985d1c39, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.897-0400 m31200| 2015-07-09T13:56:34.896-0400 I SHARDING [conn63] moveChunk request accepted at version 1|1||559eb5d2ca4787b9985d1c39 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.897-0400 m31200| 2015-07-09T13:56:34.897-0400 I SHARDING [conn63] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.897-0400 m31100| 2015-07-09T13:56:34.897-0400 I SHARDING [conn19] remotely refreshing metadata for db17.coll17, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.899-0400 m31100| 2015-07-09T13:56:34.898-0400 I SHARDING [conn19] collection db17.coll17 was previously unsharded, new metadata loaded with shard version 0|0||559eb5d2ca4787b9985d1c39 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.899-0400 m31100| 2015-07-09T13:56:34.899-0400 I SHARDING [conn19] collection version was loaded at version 1|1||559eb5d2ca4787b9985d1c39, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.899-0400 m31100| 2015-07-09T13:56:34.899-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: MinKey } -> { _id: 0 } for collection db17.coll17 from test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202 at epoch 559eb5d2ca4787b9985d1c39 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.901-0400 m31200| 2015-07-09T13:56:34.901-0400 I SHARDING [conn63] moveChunk data transfer progress: { active: true, ns: "db17.coll17", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.905-0400 m31200| 2015-07-09T13:56:34.904-0400 I SHARDING [conn63] moveChunk data transfer progress: { active: true, ns: "db17.coll17", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.910-0400 m31200| 2015-07-09T13:56:34.909-0400 I SHARDING [conn63] moveChunk data transfer progress: { active: true, ns: "db17.coll17", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.913-0400 m31100| 2015-07-09T13:56:34.913-0400 I INDEX [migrateThread] build index on: db17.coll17 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db17.coll17" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.914-0400 m31100| 2015-07-09T13:56:34.913-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.919-0400 m31200| 2015-07-09T13:56:34.919-0400 I SHARDING [conn63] moveChunk data transfer progress: { active: true, ns: "db17.coll17", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.922-0400 m31100| 2015-07-09T13:56:34.921-0400 I INDEX [migrateThread] build index on: db17.coll17 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db17.coll17" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.922-0400 m31100| 2015-07-09T13:56:34.921-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.932-0400 m31100| 2015-07-09T13:56:34.931-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.932-0400 m31100| 2015-07-09T13:56:34.932-0400 I SHARDING [migrateThread] Deleter starting delete for: db17.coll17 from { _id: MinKey } -> { _id: 0 }, with opId: 24218 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.932-0400 m31100| 2015-07-09T13:56:34.932-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db17.coll17 from { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.937-0400 m31102| 2015-07-09T13:56:34.936-0400 I INDEX [repl writer worker 0] build index on: db17.coll17 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db17.coll17" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.938-0400 m31102| 2015-07-09T13:56:34.936-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.938-0400 m31200| 2015-07-09T13:56:34.937-0400 I SHARDING [conn63] moveChunk data transfer progress: { active: true, ns: "db17.coll17", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.942-0400 m31102| 2015-07-09T13:56:34.941-0400 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.943-0400 m31100| 2015-07-09T13:56:34.943-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.944-0400 m31100| 2015-07-09T13:56:34.943-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db17.coll17' { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.946-0400 m31101| 2015-07-09T13:56:34.946-0400 I INDEX [repl writer worker 12] build index on: db17.coll17 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db17.coll17" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.946-0400 m31101| 2015-07-09T13:56:34.946-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.952-0400 m31101| 2015-07-09T13:56:34.952-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.971-0400 m31200| 2015-07-09T13:56:34.971-0400 I SHARDING [conn63] moveChunk data transfer progress: { active: true, ns: "db17.coll17", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.971-0400 m31200| 2015-07-09T13:56:34.971-0400 I SHARDING [conn63] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.972-0400 m31200| 2015-07-09T13:56:34.971-0400 I SHARDING [conn63] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.972-0400 m31200| 2015-07-09T13:56:34.972-0400 I SHARDING [conn63] moveChunk setting version to: 2|0||559eb5d2ca4787b9985d1c39 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.977-0400 m31100| 2015-07-09T13:56:34.977-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db17.coll17' { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:34.978-0400 m31100| 2015-07-09T13:56:34.977-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:34.977-0400-559eb5d2792e00bb6727492d", server: "bs-osx108-8", clientAddr: "", time: new Date(1436464594977), what: "moveChunk.to", ns: "db17.coll17", details: { min: { _id: MinKey }, max: { _id: 0 }, step 1 of 5: 32, step 2 of 5: 10, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 34, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.031-0400 m31200| 2015-07-09T13:56:35.030-0400 I SHARDING [conn63] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db17.coll17", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.031-0400 m31200| 2015-07-09T13:56:35.030-0400 I SHARDING [conn63] moveChunk updating self version to: 2|1||559eb5d2ca4787b9985d1c39 through { _id: 0 } -> { _id: MaxKey } for collection 'db17.coll17' [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.032-0400 m31200| 2015-07-09T13:56:35.031-0400 I SHARDING [conn63] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:35.031-0400-559eb5d3d5a107a5b9c0dad3", server: "bs-osx108-8", clientAddr: "127.0.0.1:62862", time: new Date(1436464595031), what: "moveChunk.commit", ns: "db17.coll17", details: { min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs1", to: "test-rs0", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.085-0400 m31200| 2015-07-09T13:56:35.084-0400 I SHARDING [conn63] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.227-0400 m31200| 2015-07-09T13:56:35.085-0400 I SHARDING [conn63] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.227-0400 m31200| 2015-07-09T13:56:35.085-0400 I SHARDING [conn63] Deleter starting delete for: db17.coll17 from { _id: MinKey } -> { _id: 0 }, with opId: 23525 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.227-0400 m31200| 2015-07-09T13:56:35.085-0400 I SHARDING [conn63] rangeDeleter deleted 0 documents for db17.coll17 from { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.227-0400 m31200| 2015-07-09T13:56:35.085-0400 I SHARDING [conn63] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.227-0400 m31200| 2015-07-09T13:56:35.086-0400 I SHARDING [conn63] distributed lock 'db17.coll17/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.228-0400 m31200| 2015-07-09T13:56:35.086-0400 I SHARDING [conn63] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:35.086-0400-559eb5d3d5a107a5b9c0dad4", server: "bs-osx108-8", clientAddr: "127.0.0.1:62862", time: new Date(1436464595086), what: "moveChunk.from", ns: "db17.coll17", details: { min: { _id: MinKey }, max: { _id: 0 }, step 1 of 6: 0, step 2 of 6: 58, step 3 of 6: 2, step 4 of 6: 71, step 5 of 6: 113, step 6 of 6: 0, to: "test-rs0", from: "test-rs1", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.228-0400 m31200| 2015-07-09T13:56:35.140-0400 I COMMAND [conn63] command db17.coll17 command: moveChunk { moveChunk: "db17.coll17", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", to: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", fromShard: "test-rs1", toShard: "test-rs0", min: { _id: MinKey }, max: { _id: 0 }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb5d2ca4787b9985d1c39') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 302ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.228-0400 m30999| 2015-07-09T13:56:35.142-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db17.coll17: 0ms sequenceNumber: 85 version: 2|1||559eb5d2ca4787b9985d1c39 based on: 1|1||559eb5d2ca4787b9985d1c39 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.228-0400 m31100| 2015-07-09T13:56:35.143-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db17.coll17", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5d2ca4787b9985d1c39') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.229-0400 m31100| 2015-07-09T13:56:35.146-0400 I SHARDING [conn15] distributed lock 'db17.coll17/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb5d3792e00bb6727492e [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.229-0400 m31100| 2015-07-09T13:56:35.146-0400 I SHARDING [conn15] remotely refreshing metadata for db17.coll17 based on current shard version 0|0||559eb5d2ca4787b9985d1c39, current metadata version is 1|1||559eb5d2ca4787b9985d1c39 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.229-0400 m31100| 2015-07-09T13:56:35.148-0400 I SHARDING [conn15] updating metadata for db17.coll17 from shard version 0|0||559eb5d2ca4787b9985d1c39 to shard version 2|0||559eb5d2ca4787b9985d1c39 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.229-0400 m31100| 2015-07-09T13:56:35.148-0400 I SHARDING [conn15] collection version was loaded at version 2|1||559eb5d2ca4787b9985d1c39, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.229-0400 m31100| 2015-07-09T13:56:35.148-0400 I SHARDING [conn15] splitChunk accepted at version 2|0||559eb5d2ca4787b9985d1c39 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.230-0400 m31100| 2015-07-09T13:56:35.149-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:35.149-0400-559eb5d3792e00bb6727492f", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464595149), what: "split", ns: "db17.coll17", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559eb5d2ca4787b9985d1c39') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559eb5d2ca4787b9985d1c39') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.230-0400 m31100| 2015-07-09T13:56:35.203-0400 I SHARDING [conn15] distributed lock 'db17.coll17/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.231-0400 m30999| 2015-07-09T13:56:35.205-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db17.coll17: 0ms sequenceNumber: 86 version: 2|3||559eb5d2ca4787b9985d1c39 based on: 2|1||559eb5d2ca4787b9985d1c39 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.231-0400 m31200| 2015-07-09T13:56:35.205-0400 I SHARDING [conn63] received splitChunk request: { splitChunk: "db17.coll17", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5d2ca4787b9985d1c39') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.232-0400 m31200| 2015-07-09T13:56:35.209-0400 I SHARDING [conn63] distributed lock 'db17.coll17/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb5d3d5a107a5b9c0dad5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.232-0400 m31200| 2015-07-09T13:56:35.209-0400 I SHARDING [conn63] remotely refreshing metadata for db17.coll17 based on current shard version 2|0||559eb5d2ca4787b9985d1c39, current metadata version is 2|0||559eb5d2ca4787b9985d1c39 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.232-0400 m31200| 2015-07-09T13:56:35.211-0400 I SHARDING [conn63] updating metadata for db17.coll17 from shard version 2|0||559eb5d2ca4787b9985d1c39 to shard version 2|1||559eb5d2ca4787b9985d1c39 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.233-0400 m31200| 2015-07-09T13:56:35.211-0400 I SHARDING [conn63] collection version was loaded at version 2|3||559eb5d2ca4787b9985d1c39, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.233-0400 m31200| 2015-07-09T13:56:35.211-0400 I SHARDING [conn63] splitChunk accepted at version 2|1||559eb5d2ca4787b9985d1c39 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.234-0400 m31200| 2015-07-09T13:56:35.212-0400 I SHARDING [conn63] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:35.212-0400-559eb5d3d5a107a5b9c0dad6", server: "bs-osx108-8", clientAddr: "127.0.0.1:62862", time: new Date(1436464595212), what: "split", ns: "db17.coll17", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559eb5d2ca4787b9985d1c39') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559eb5d2ca4787b9985d1c39') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.266-0400 m31200| 2015-07-09T13:56:35.266-0400 I SHARDING [conn63] distributed lock 'db17.coll17/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.268-0400 m30999| 2015-07-09T13:56:35.268-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db17.coll17: 0ms sequenceNumber: 87 version: 2|5||559eb5d2ca4787b9985d1c39 based on: 2|3||559eb5d2ca4787b9985d1c39 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.498-0400 m31200| 2015-07-09T13:56:35.497-0400 I COMMAND [conn51] command db17.$cmd command: insert { insert: "coll17", documents: 480, ordered: false, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 2000|5, ObjectId('559eb5d2ca4787b9985d1c39') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 488, w: 488 } }, Database: { acquireCount: { w: 488 } }, Collection: { acquireCount: { w: 8 } }, Metadata: { acquireCount: { w: 480 } }, oplog: { acquireCount: { w: 480 } } } protocol:op_command 152ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.507-0400 m31100| 2015-07-09T13:56:35.506-0400 I COMMAND [conn16] command db17.$cmd command: insert { insert: "coll17", documents: 520, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 2000|3, ObjectId('559eb5d2ca4787b9985d1c39') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 528, w: 528 } }, Database: { acquireCount: { w: 528 } }, Collection: { acquireCount: { w: 8 } }, Metadata: { acquireCount: { w: 520 } }, oplog: { acquireCount: { w: 520 } } } protocol:op_command 161ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.664-0400 m31200| 2015-07-09T13:56:35.663-0400 I COMMAND [conn51] command db17.$cmd command: insert { insert: "coll17", documents: 501, ordered: false, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 2000|5, ObjectId('559eb5d2ca4787b9985d1c39') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 508, w: 508 } }, Database: { acquireCount: { w: 508 } }, Collection: { acquireCount: { w: 7 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } protocol:op_command 145ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.670-0400 m31100| 2015-07-09T13:56:35.669-0400 I COMMAND [conn16] command db17.$cmd command: insert { insert: "coll17", documents: 499, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 2000|3, ObjectId('559eb5d2ca4787b9985d1c39') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 507, w: 507 } }, Database: { acquireCount: { w: 507 } }, Collection: { acquireCount: { w: 8 } }, Metadata: { acquireCount: { w: 499 } }, oplog: { acquireCount: { w: 499 } } } protocol:op_command 152ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.671-0400 Using 5 threads (requested 5) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.720-0400 m30998| 2015-07-09T13:56:35.720-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62952 #109 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.724-0400 m30999| 2015-07-09T13:56:35.724-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62953 #110 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.727-0400 m30999| 2015-07-09T13:56:35.727-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62954 #111 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.732-0400 m30998| 2015-07-09T13:56:35.732-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62955 #110 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.732-0400 m30998| 2015-07-09T13:56:35.732-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62956 #111 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.741-0400 setting random seed: 7463727653957 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.742-0400 setting random seed: 8186543001793 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.742-0400 setting random seed: 2870738483034 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.742-0400 setting random seed: 1228295210748 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.742-0400 setting random seed: 8501706817187 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.744-0400 m30998| 2015-07-09T13:56:35.744-0400 I SHARDING [conn109] ChunkManager: time to load chunks for db17.coll17: 0ms sequenceNumber: 19 version: 2|5||559eb5d2ca4787b9985d1c39 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.847-0400 m31100| 2015-07-09T13:56:35.847-0400 I COMMAND [conn56] CMD: drop db17.tmp.mr.coll17_91 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.850-0400 m31200| 2015-07-09T13:56:35.850-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_45 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.851-0400 m31200| 2015-07-09T13:56:35.850-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_46 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.864-0400 m31100| 2015-07-09T13:56:35.863-0400 I COMMAND [conn33] CMD: drop db17.tmp.mr.coll17_90 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.864-0400 m31100| 2015-07-09T13:56:35.863-0400 I COMMAND [conn48] CMD: drop db17.tmp.mr.coll17_92 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.886-0400 m30998| 2015-07-09T13:56:35.885-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T13:56:35.877-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30998:1436464535:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.897-0400 m31100| 2015-07-09T13:56:35.897-0400 I COMMAND [conn72] CMD: drop db17.tmp.mr.coll17_94 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.898-0400 m31100| 2015-07-09T13:56:35.897-0400 I COMMAND [conn60] CMD: drop db17.tmp.mr.coll17_93 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.924-0400 m31200| 2015-07-09T13:56:35.923-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_48 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.925-0400 m31200| 2015-07-09T13:56:35.924-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_49 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:35.928-0400 m31200| 2015-07-09T13:56:35.926-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_47 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.420-0400 m31200| 2015-07-09T13:56:36.419-0400 I COMMAND [conn37] CMD: drop db17.tmp.mrs.coll17_1436464595_18 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.423-0400 m31200| 2015-07-09T13:56:36.423-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_45 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.423-0400 m31200| 2015-07-09T13:56:36.423-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_45 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.425-0400 m31200| 2015-07-09T13:56:36.425-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_45 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.428-0400 m31200| 2015-07-09T13:56:36.427-0400 I COMMAND [conn37] command db17.tmp.mrs.coll17_1436464595_18 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.428-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.428-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.428-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.430-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464595_18", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:6 reslen:212 locks:{ Global: { acquireCount: { r: 163, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 1126 } }, Database: { acquireCount: { r: 26, w: 66, R: 17, W: 11 }, acquireWaitCount: { w: 13, R: 7, W: 6 }, timeAcquiringMicros: { w: 111215, R: 107251, W: 34752 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 605ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.434-0400 m31200| 2015-07-09T13:56:36.434-0400 I COMMAND [conn29] CMD: drop db17.tmp.mrs.coll17_1436464595_19 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.442-0400 m31200| 2015-07-09T13:56:36.442-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_46 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.442-0400 m31200| 2015-07-09T13:56:36.442-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_46 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.443-0400 m31200| 2015-07-09T13:56:36.443-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_46 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.447-0400 m31200| 2015-07-09T13:56:36.447-0400 I COMMAND [conn29] command db17.tmp.mrs.coll17_1436464595_19 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.448-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.448-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.448-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.449-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464595_19", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:7 reslen:212 locks:{ Global: { acquireCount: { r: 165, w: 74, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 4832, W: 457 } }, Database: { acquireCount: { r: 26, w: 66, R: 18, W: 11 }, acquireWaitCount: { r: 3, w: 10, R: 6, W: 7 }, timeAcquiringMicros: { r: 1837, w: 54601, R: 121873, W: 41017 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 615ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.451-0400 m31200| 2015-07-09T13:56:36.450-0400 I COMMAND [conn41] CMD: drop db17.tmp.mrs.coll17_1436464595_27 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.456-0400 m31200| 2015-07-09T13:56:36.455-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_47 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.457-0400 m31200| 2015-07-09T13:56:36.456-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_47 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.460-0400 m31200| 2015-07-09T13:56:36.460-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_47 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.462-0400 m31200| 2015-07-09T13:56:36.462-0400 I COMMAND [conn41] command db17.tmp.mrs.coll17_1436464595_27 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.463-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.463-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.464-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.465-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464595_27", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:8 reslen:212 locks:{ Global: { acquireCount: { r: 167, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 8205, w: 3903, W: 385 } }, Database: { acquireCount: { r: 26, w: 66, R: 19, W: 11 }, acquireWaitCount: { r: 7, w: 8, R: 7, W: 9 }, timeAcquiringMicros: { r: 42403, w: 46148, R: 9038, W: 94817 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 610ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.465-0400 m31200| 2015-07-09T13:56:36.464-0400 I COMMAND [conn39] CMD: drop db17.tmp.mrs.coll17_1436464595_28 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.469-0400 m31200| 2015-07-09T13:56:36.469-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_48 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.470-0400 m31200| 2015-07-09T13:56:36.469-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_48 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.470-0400 m31200| 2015-07-09T13:56:36.470-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_48 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.472-0400 m31200| 2015-07-09T13:56:36.471-0400 I COMMAND [conn39] command db17.tmp.mrs.coll17_1436464595_28 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.472-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.472-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.472-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.473-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464595_28", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:10 reslen:212 locks:{ Global: { acquireCount: { r: 171, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 1, W: 1 }, timeAcquiringMicros: { r: 15089, w: 4993, W: 1695 } }, Database: { acquireCount: { r: 26, w: 66, R: 21, W: 11 }, acquireWaitCount: { r: 8, w: 10, R: 9, W: 9 }, timeAcquiringMicros: { r: 44164, w: 58297, R: 57689, W: 31276 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 619ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.476-0400 m31200| 2015-07-09T13:56:36.476-0400 I COMMAND [conn38] CMD: drop db17.tmp.mrs.coll17_1436464595_20 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.479-0400 m31200| 2015-07-09T13:56:36.479-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_49 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.479-0400 m31200| 2015-07-09T13:56:36.479-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_49 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.480-0400 m31200| 2015-07-09T13:56:36.480-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_49 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.480-0400 m31200| 2015-07-09T13:56:36.480-0400 I COMMAND [conn38] command db17.tmp.mrs.coll17_1436464595_20 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.481-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.481-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.481-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.481-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464595_20", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:8 reslen:212 locks:{ Global: { acquireCount: { r: 167, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 2 }, timeAcquiringMicros: { r: 11669, w: 12405 } }, Database: { acquireCount: { r: 26, w: 66, R: 19, W: 11 }, acquireWaitCount: { r: 6, w: 14, R: 9, W: 5 }, timeAcquiringMicros: { r: 36009, w: 42624, R: 54136, W: 87544 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 626ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.796-0400 m31100| 2015-07-09T13:56:36.796-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T13:56:36.793-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31100:1436464536:197041335', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.935-0400 m31100| 2015-07-09T13:56:36.935-0400 I COMMAND [conn56] CMD: drop db17.tmp.mrs.coll17_1436464595_27 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.939-0400 m31100| 2015-07-09T13:56:36.939-0400 I COMMAND [conn56] CMD: drop db17.tmp.mr.coll17_91 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.939-0400 m31100| 2015-07-09T13:56:36.939-0400 I COMMAND [conn56] CMD: drop db17.tmp.mr.coll17_91 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.940-0400 m31100| 2015-07-09T13:56:36.940-0400 I COMMAND [conn56] CMD: drop db17.tmp.mr.coll17_91 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.942-0400 m31100| 2015-07-09T13:56:36.942-0400 I COMMAND [conn33] CMD: drop db17.tmp.mrs.coll17_1436464595_18 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.942-0400 m31100| 2015-07-09T13:56:36.942-0400 I COMMAND [conn56] command db17.tmp.mrs.coll17_1436464595_27 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.942-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.942-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.943-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.944-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464595_27", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:14 reslen:212 locks:{ Global: { acquireCount: { r: 181, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 615 } }, Database: { acquireCount: { r: 26, w: 66, R: 26, W: 11 }, acquireWaitCount: { w: 18, R: 6, W: 5 }, timeAcquiringMicros: { w: 235095, R: 122366, W: 16390 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 1118ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.944-0400 m31200| 2015-07-09T13:56:36.944-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_50 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.949-0400 m31100| 2015-07-09T13:56:36.948-0400 I COMMAND [conn33] CMD: drop db17.tmp.mr.coll17_90 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.950-0400 m31100| 2015-07-09T13:56:36.948-0400 I COMMAND [conn33] CMD: drop db17.tmp.mr.coll17_90 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.950-0400 m31100| 2015-07-09T13:56:36.950-0400 I COMMAND [conn33] CMD: drop db17.tmp.mr.coll17_90 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.952-0400 m31100| 2015-07-09T13:56:36.952-0400 I COMMAND [conn33] command db17.tmp.mrs.coll17_1436464595_18 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.952-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.953-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.953-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.954-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464595_18", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:20 reslen:212 locks:{ Global: { acquireCount: { r: 193, w: 74, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 4674, W: 119 } }, Database: { acquireCount: { r: 26, w: 66, R: 32, W: 11 }, acquireWaitCount: { r: 4, w: 12, R: 7, W: 6 }, timeAcquiringMicros: { r: 14304, w: 94691, R: 104347, W: 29625 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 1131ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.955-0400 m31100| 2015-07-09T13:56:36.952-0400 I COMMAND [conn48] CMD: drop db17.tmp.mrs.coll17_1436464595_19 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.956-0400 m31200| 2015-07-09T13:56:36.955-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_51 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.960-0400 m31100| 2015-07-09T13:56:36.960-0400 I COMMAND [conn48] CMD: drop db17.tmp.mr.coll17_92 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.962-0400 m31100| 2015-07-09T13:56:36.960-0400 I COMMAND [conn48] CMD: drop db17.tmp.mr.coll17_92 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.962-0400 m31100| 2015-07-09T13:56:36.962-0400 I COMMAND [conn48] CMD: drop db17.tmp.mr.coll17_92 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.964-0400 m31100| 2015-07-09T13:56:36.963-0400 I COMMAND [conn48] command db17.tmp.mrs.coll17_1436464595_19 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.964-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.965-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.965-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.966-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464595_19", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:29 reslen:212 locks:{ Global: { acquireCount: { r: 211, w: 74, W: 3 }, acquireWaitCount: { r: 2, W: 1 }, timeAcquiringMicros: { r: 11464, W: 1066 } }, Database: { acquireCount: { r: 26, w: 66, R: 41, W: 11 }, acquireWaitCount: { r: 5, w: 8, R: 12, W: 8 }, timeAcquiringMicros: { r: 5704, w: 73384, R: 70266, W: 79793 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 1136ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.967-0400 m31200| 2015-07-09T13:56:36.966-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_52 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.974-0400 m31100| 2015-07-09T13:56:36.974-0400 I COMMAND [conn60] CMD: drop db17.tmp.mrs.coll17_1436464595_28 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.986-0400 m31100| 2015-07-09T13:56:36.982-0400 I COMMAND [conn60] CMD: drop db17.tmp.mr.coll17_93 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.987-0400 m31100| 2015-07-09T13:56:36.983-0400 I COMMAND [conn60] CMD: drop db17.tmp.mr.coll17_93 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.988-0400 m31100| 2015-07-09T13:56:36.987-0400 I COMMAND [conn60] CMD: drop db17.tmp.mr.coll17_93 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.988-0400 m31200| 2015-07-09T13:56:36.988-0400 I SHARDING [conn41] ChunkManager: time to load chunks for db17.coll17: 0ms sequenceNumber: 2 version: 2|5||559eb5d2ca4787b9985d1c39 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.990-0400 m31100| 2015-07-09T13:56:36.989-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62957 #76 (70 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.990-0400 m31100| 2015-07-09T13:56:36.990-0400 I COMMAND [conn60] command db17.tmp.mrs.coll17_1436464595_28 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.991-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.991-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.991-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.992-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464595_28", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:25 reslen:212 locks:{ Global: { acquireCount: { r: 203, w: 74, W: 3 }, acquireWaitCount: { w: 3, W: 1 }, timeAcquiringMicros: { w: 20583, W: 218 } }, Database: { acquireCount: { r: 26, w: 66, R: 37, W: 11 }, acquireWaitCount: { r: 4, w: 12, R: 11, W: 8 }, timeAcquiringMicros: { r: 17721, w: 81300, R: 24060, W: 73934 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 1139ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.992-0400 m31100| 2015-07-09T13:56:36.992-0400 I COMMAND [conn72] CMD: drop db17.tmp.mrs.coll17_1436464595_20 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.992-0400 m31200| 2015-07-09T13:56:36.992-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62958 #74 (68 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.993-0400 m31200| 2015-07-09T13:56:36.992-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_53 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.995-0400 m31100| 2015-07-09T13:56:36.995-0400 I COMMAND [conn72] CMD: drop db17.tmp.mr.coll17_94 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.995-0400 m31100| 2015-07-09T13:56:36.995-0400 I COMMAND [conn72] CMD: drop db17.tmp.mr.coll17_94 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.996-0400 m31100| 2015-07-09T13:56:36.996-0400 I COMMAND [conn72] CMD: drop db17.tmp.mr.coll17_94 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.997-0400 m31100| 2015-07-09T13:56:36.996-0400 I COMMAND [conn72] command db17.tmp.mrs.coll17_1436464595_20 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.997-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.998-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.998-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.999-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464595_20", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:25 reslen:212 locks:{ Global: { acquireCount: { r: 203, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 3 }, timeAcquiringMicros: { r: 9289, w: 20250 } }, Database: { acquireCount: { r: 26, w: 66, R: 37, W: 11 }, acquireWaitCount: { r: 4, w: 14, R: 9, W: 5 }, timeAcquiringMicros: { r: 26261, w: 76810, R: 52144, W: 62035 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 1145ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:36.999-0400 m31200| 2015-07-09T13:56:36.998-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_54 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.009-0400 m31100| 2015-07-09T13:56:37.007-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62959 #77 (71 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.011-0400 m31200| 2015-07-09T13:56:37.010-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62960 #75 (69 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.028-0400 m31100| 2015-07-09T13:56:37.027-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62961 #78 (72 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.030-0400 m31200| 2015-07-09T13:56:37.030-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62962 #76 (70 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.059-0400 m31100| 2015-07-09T13:56:37.058-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62963 #79 (73 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.062-0400 m31200| 2015-07-09T13:56:37.062-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62964 #77 (71 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.087-0400 m31100| 2015-07-09T13:56:37.087-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62965 #80 (74 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.092-0400 m31200| 2015-07-09T13:56:37.091-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62966 #78 (72 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.163-0400 m31200| 2015-07-09T13:56:37.163-0400 I COMMAND [conn29] CMD: drop db17.map_reduce_replace0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.172-0400 m31200| 2015-07-09T13:56:37.171-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_52 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.173-0400 m31200| 2015-07-09T13:56:37.172-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_52 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.173-0400 m31200| 2015-07-09T13:56:37.173-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_52 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.175-0400 m31200| 2015-07-09T13:56:37.174-0400 I COMMAND [conn29] command db17.map_reduce_replace0 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.175-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.175-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.175-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.176-0400 m31200| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.176-0400 m31200| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.178-0400 m31200| }, out: { replace: "map_reduce_replace0" }, query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 } }, inputDB: "db17", shardedOutputCollection: "tmp.mrs.coll17_1436464595_19", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll17_1436464595_19", timeMillis: 1133, counts: { input: 1019, emit: 1019, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464596000|66, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll17_1436464595_19", timeMillis: 610, counts: { input: 981, emit: 981, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464596000|65, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1019, emit: 1019, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 981, emit: 981, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:223 locks:{ Global: { acquireCount: { r: 55, w: 50, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 963 } }, Database: { acquireCount: { r: 1, w: 45, W: 6 }, acquireWaitCount: { w: 2, W: 4 }, timeAcquiringMicros: { w: 47558, W: 17406 } }, Collection: { acquireCount: { r: 1, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 208ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.179-0400 m31200| 2015-07-09T13:56:37.174-0400 I COMMAND [conn38] CMD: drop db17.map_reduce_replace4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.179-0400 m31100| 2015-07-09T13:56:37.174-0400 I COMMAND [conn36] CMD: drop db17.tmp.mrs.coll17_1436464595_19 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.179-0400 m31201| 2015-07-09T13:56:37.175-0400 I COMMAND [repl writer worker 8] CMD: drop db17.map_reduce_replace0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.179-0400 m31202| 2015-07-09T13:56:37.177-0400 I COMMAND [repl writer worker 11] CMD: drop db17.map_reduce_replace0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.179-0400 m31200| 2015-07-09T13:56:37.177-0400 I COMMAND [conn34] CMD: drop db17.tmp.mrs.coll17_1436464595_19 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.180-0400 m31101| 2015-07-09T13:56:37.178-0400 I COMMAND [repl writer worker 6] CMD: drop db17.tmp.mrs.coll17_1436464595_19 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.180-0400 m31102| 2015-07-09T13:56:37.180-0400 I COMMAND [repl writer worker 10] CMD: drop db17.tmp.mrs.coll17_1436464595_19 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.187-0400 m31200| 2015-07-09T13:56:37.186-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_54 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.187-0400 m31200| 2015-07-09T13:56:37.186-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_54 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.188-0400 m31200| 2015-07-09T13:56:37.186-0400 I COMMAND [conn41] CMD: drop db17.map_reduce_replace3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.195-0400 m31200| 2015-07-09T13:56:37.194-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T13:56:37.191-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31200:1436464537:809424560', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.197-0400 m31200| 2015-07-09T13:56:37.197-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_50 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.197-0400 m31200| 2015-07-09T13:56:37.197-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_50 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.198-0400 m31200| 2015-07-09T13:56:37.197-0400 I COMMAND [conn37] CMD: drop db17.map_reduce_replace2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.204-0400 m31200| 2015-07-09T13:56:37.204-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_51 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.205-0400 m31200| 2015-07-09T13:56:37.204-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_51 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.206-0400 m31200| 2015-07-09T13:56:37.206-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_50 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.206-0400 m31200| 2015-07-09T13:56:37.206-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_54 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.206-0400 m31200| 2015-07-09T13:56:37.206-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_51 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.208-0400 m31201| 2015-07-09T13:56:37.207-0400 I COMMAND [repl writer worker 12] CMD: drop db17.map_reduce_replace4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.208-0400 m31202| 2015-07-09T13:56:37.207-0400 I COMMAND [repl writer worker 10] CMD: drop db17.map_reduce_replace4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.209-0400 m31200| 2015-07-09T13:56:37.208-0400 I COMMAND [conn38] command db17.map_reduce_replace4 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.209-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.209-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.209-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.210-0400 m31200| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.210-0400 m31200| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.211-0400 m31200| }, out: { replace: "map_reduce_replace4" }, query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 } }, inputDB: "db17", shardedOutputCollection: "tmp.mrs.coll17_1436464595_20", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll17_1436464595_20", timeMillis: 1144, counts: { input: 1019, emit: 1019, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464596000|108, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll17_1436464595_20", timeMillis: 625, counts: { input: 981, emit: 981, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464596000|108, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1019, emit: 1019, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 981, emit: 981, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:223 locks:{ Global: { acquireCount: { r: 55, w: 50, W: 3 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 27387, W: 180 } }, Database: { acquireCount: { r: 1, w: 45, W: 6 }, acquireWaitCount: { w: 2, W: 4 }, timeAcquiringMicros: { w: 18657, W: 13708 } }, Collection: { acquireCount: { r: 1, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 210ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.211-0400 m31100| 2015-07-09T13:56:37.209-0400 I COMMAND [conn36] CMD: drop db17.tmp.mrs.coll17_1436464595_20 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.212-0400 m31200| 2015-07-09T13:56:37.208-0400 I COMMAND [conn37] command db17.map_reduce_replace2 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.212-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.212-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.212-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.212-0400 m31200| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.212-0400 m31200| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.214-0400 m31200| }, out: { replace: "map_reduce_replace2" }, query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 } }, inputDB: "db17", shardedOutputCollection: "tmp.mrs.coll17_1436464595_18", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll17_1436464595_18", timeMillis: 1128, counts: { input: 1019, emit: 1019, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464596000|62, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll17_1436464595_18", timeMillis: 601, counts: { input: 981, emit: 981, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464596000|32, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1019, emit: 1019, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 981, emit: 981, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:223 locks:{ Global: { acquireCount: { r: 55, w: 50, W: 3 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 9462, W: 23143 } }, Database: { acquireCount: { r: 1, w: 45, W: 6 }, acquireWaitCount: { w: 4, W: 3 }, timeAcquiringMicros: { w: 47501, W: 29476 } }, Collection: { acquireCount: { r: 1, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 253ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.214-0400 m31200| 2015-07-09T13:56:37.209-0400 I COMMAND [conn41] command db17.map_reduce_replace3 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.214-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.215-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.215-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.215-0400 m31200| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.215-0400 m31200| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.217-0400 m31200| }, out: { replace: "map_reduce_replace3" }, query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 } }, inputDB: "db17", shardedOutputCollection: "tmp.mrs.coll17_1436464595_27", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll17_1436464595_27", timeMillis: 1115, counts: { input: 1019, emit: 1019, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464596000|56, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll17_1436464595_27", timeMillis: 604, counts: { input: 981, emit: 981, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464596000|82, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1019, emit: 1019, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 981, emit: 981, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:223 locks:{ Global: { acquireCount: { r: 55, w: 50, W: 3 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 15806, W: 12646 } }, Database: { acquireCount: { r: 1, w: 45, W: 6 }, acquireWaitCount: { w: 4, W: 1 }, timeAcquiringMicros: { w: 63805, W: 1870 } }, Collection: { acquireCount: { r: 1, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 266ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.217-0400 m31100| 2015-07-09T13:56:37.210-0400 I COMMAND [conn38] CMD: drop db17.tmp.mrs.coll17_1436464595_27 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.218-0400 m31100| 2015-07-09T13:56:37.210-0400 I COMMAND [conn39] CMD: drop db17.tmp.mrs.coll17_1436464595_18 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.218-0400 m31200| 2015-07-09T13:56:37.211-0400 I COMMAND [conn39] CMD: drop db17.map_reduce_replace1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.218-0400 m31200| 2015-07-09T13:56:37.213-0400 I COMMAND [conn34] CMD: drop db17.tmp.mrs.coll17_1436464595_20 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.218-0400 m31201| 2015-07-09T13:56:37.215-0400 I COMMAND [repl writer worker 11] CMD: drop db17.map_reduce_replace3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.218-0400 m31102| 2015-07-09T13:56:37.215-0400 I COMMAND [repl writer worker 4] CMD: drop db17.tmp.mrs.coll17_1436464595_20 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.219-0400 m31101| 2015-07-09T13:56:37.215-0400 I COMMAND [repl writer worker 9] CMD: drop db17.tmp.mrs.coll17_1436464595_20 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.220-0400 m31202| 2015-07-09T13:56:37.220-0400 I COMMAND [repl writer worker 12] CMD: drop db17.map_reduce_replace3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.223-0400 m31200| 2015-07-09T13:56:37.222-0400 I COMMAND [conn63] CMD: drop db17.tmp.mrs.coll17_1436464595_27 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.224-0400 m31200| 2015-07-09T13:56:37.223-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_53 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.224-0400 m31102| 2015-07-09T13:56:37.223-0400 I COMMAND [repl writer worker 2] CMD: drop db17.tmp.mrs.coll17_1436464595_27 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.225-0400 m31200| 2015-07-09T13:56:37.224-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_53 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.226-0400 m31101| 2015-07-09T13:56:37.225-0400 I COMMAND [repl writer worker 12] CMD: drop db17.tmp.mrs.coll17_1436464595_27 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.230-0400 m31201| 2015-07-09T13:56:37.229-0400 I COMMAND [repl writer worker 6] CMD: drop db17.map_reduce_replace2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.231-0400 m31200| 2015-07-09T13:56:37.230-0400 I COMMAND [conn34] CMD: drop db17.tmp.mrs.coll17_1436464595_18 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.232-0400 m31200| 2015-07-09T13:56:37.231-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_53 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.232-0400 m31100| 2015-07-09T13:56:37.231-0400 I COMMAND [conn48] CMD: drop db17.tmp.mr.coll17_95 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.233-0400 m31200| 2015-07-09T13:56:37.233-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_55 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.235-0400 m31202| 2015-07-09T13:56:37.234-0400 I COMMAND [repl writer worker 3] CMD: drop db17.map_reduce_replace2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.236-0400 m31200| 2015-07-09T13:56:37.234-0400 I COMMAND [conn39] command db17.map_reduce_replace1 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.236-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.237-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.237-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.237-0400 m31200| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.237-0400 m31200| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.240-0400 m31200| }, out: { replace: "map_reduce_replace1" }, query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 } }, inputDB: "db17", shardedOutputCollection: "tmp.mrs.coll17_1436464595_28", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll17_1436464595_28", timeMillis: 1132, counts: { input: 1019, emit: 1019, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464596000|104, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll17_1436464595_28", timeMillis: 617, counts: { input: 981, emit: 981, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464596000|91, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1019, emit: 1019, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 981, emit: 981, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:223 locks:{ Global: { acquireCount: { r: 55, w: 50, W: 3 }, acquireWaitCount: { w: 2 }, timeAcquiringMicros: { w: 39440 } }, Database: { acquireCount: { r: 1, w: 45, W: 6 }, acquireWaitCount: { w: 5, W: 4 }, timeAcquiringMicros: { w: 49854, W: 26553 } }, Collection: { acquireCount: { r: 1, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 241ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.240-0400 m31100| 2015-07-09T13:56:37.235-0400 I COMMAND [conn38] CMD: drop db17.tmp.mrs.coll17_1436464595_28 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.240-0400 m31200| 2015-07-09T13:56:37.235-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_56 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.250-0400 m31201| 2015-07-09T13:56:37.248-0400 I COMMAND [repl writer worker 5] CMD: drop db17.tmp.mrs.coll17_1436464595_19 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.252-0400 m31202| 2015-07-09T13:56:37.252-0400 I COMMAND [repl writer worker 5] CMD: drop db17.tmp.mrs.coll17_1436464595_19 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.258-0400 m31101| 2015-07-09T13:56:37.258-0400 I COMMAND [repl writer worker 0] CMD: drop db17.tmp.mrs.coll17_1436464595_18 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.259-0400 m31102| 2015-07-09T13:56:37.258-0400 I COMMAND [repl writer worker 7] CMD: drop db17.tmp.mrs.coll17_1436464595_18 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.260-0400 m31200| 2015-07-09T13:56:37.259-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_57 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.261-0400 m31201| 2015-07-09T13:56:37.261-0400 I COMMAND [repl writer worker 2] CMD: drop db17.map_reduce_replace1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.262-0400 m31200| 2015-07-09T13:56:37.262-0400 I COMMAND [conn63] CMD: drop db17.tmp.mrs.coll17_1436464595_28 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.262-0400 m31100| 2015-07-09T13:56:37.262-0400 I COMMAND [conn56] CMD: drop db17.tmp.mr.coll17_97 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.263-0400 m31100| 2015-07-09T13:56:37.262-0400 I COMMAND [conn72] CMD: drop db17.tmp.mr.coll17_96 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.263-0400 m31100| 2015-07-09T13:56:37.263-0400 I COMMAND [conn33] CMD: drop db17.tmp.mr.coll17_98 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.264-0400 m31202| 2015-07-09T13:56:37.264-0400 I COMMAND [repl writer worker 4] CMD: drop db17.map_reduce_replace1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.273-0400 m31201| 2015-07-09T13:56:37.272-0400 I COMMAND [repl writer worker 1] CMD: drop db17.tmp.mrs.coll17_1436464595_20 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.279-0400 m31201| 2015-07-09T13:56:37.278-0400 I COMMAND [repl writer worker 14] CMD: drop db17.tmp.mrs.coll17_1436464595_27 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.280-0400 m31202| 2015-07-09T13:56:37.280-0400 I COMMAND [repl writer worker 1] CMD: drop db17.tmp.mrs.coll17_1436464595_20 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.283-0400 m31202| 2015-07-09T13:56:37.283-0400 I COMMAND [repl writer worker 7] CMD: drop db17.tmp.mrs.coll17_1436464595_27 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.283-0400 m31201| 2015-07-09T13:56:37.283-0400 I COMMAND [repl writer worker 10] CMD: drop db17.tmp.mrs.coll17_1436464595_18 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.284-0400 m31202| 2015-07-09T13:56:37.284-0400 I COMMAND [repl writer worker 13] CMD: drop db17.tmp.mrs.coll17_1436464595_18 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.285-0400 m31102| 2015-07-09T13:56:37.285-0400 I COMMAND [repl writer worker 12] CMD: drop db17.tmp.mrs.coll17_1436464595_28 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.287-0400 m31101| 2015-07-09T13:56:37.287-0400 I COMMAND [repl writer worker 3] CMD: drop db17.tmp.mrs.coll17_1436464595_28 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.331-0400 m31200| 2015-07-09T13:56:37.331-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_58 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.333-0400 m31201| 2015-07-09T13:56:37.332-0400 I COMMAND [repl writer worker 3] CMD: drop db17.tmp.mrs.coll17_1436464595_28 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.334-0400 m31202| 2015-07-09T13:56:37.334-0400 I COMMAND [repl writer worker 14] CMD: drop db17.tmp.mrs.coll17_1436464595_28 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.341-0400 m31100| 2015-07-09T13:56:37.341-0400 I COMMAND [conn60] CMD: drop db17.tmp.mr.coll17_99 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.374-0400 m31200| 2015-07-09T13:56:37.374-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_59 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.841-0400 m31200| 2015-07-09T13:56:37.841-0400 I COMMAND [conn29] CMD: drop db17.tmp.mrs.coll17_1436464597_21 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.846-0400 m31200| 2015-07-09T13:56:37.846-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_55 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.846-0400 m31200| 2015-07-09T13:56:37.846-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_55 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.856-0400 m31200| 2015-07-09T13:56:37.856-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_55 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.863-0400 m31200| 2015-07-09T13:56:37.862-0400 I COMMAND [conn29] command db17.tmp.mrs.coll17_1436464597_21 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.863-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.863-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.863-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.864-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464597_21", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 11408, W: 91 } }, Database: { acquireCount: { r: 25, w: 66, R: 13, W: 11 }, acquireWaitCount: { w: 15, R: 12, W: 8 }, timeAcquiringMicros: { w: 174279, R: 175386, W: 17753 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 650ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.870-0400 m31200| 2015-07-09T13:56:37.870-0400 I COMMAND [conn38] CMD: drop db17.tmp.mrs.coll17_1436464597_22 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.881-0400 m31200| 2015-07-09T13:56:37.880-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_56 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.881-0400 m31200| 2015-07-09T13:56:37.881-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_56 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.883-0400 m31200| 2015-07-09T13:56:37.883-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_56 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.885-0400 m31200| 2015-07-09T13:56:37.885-0400 I COMMAND [conn38] command db17.tmp.mrs.coll17_1436464597_22 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.886-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.886-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.886-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.887-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464597_22", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:5 reslen:212 locks:{ Global: { acquireCount: { r: 159, w: 74, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 4939, W: 420 } }, Database: { acquireCount: { r: 25, w: 66, R: 16, W: 11 }, acquireWaitCount: { r: 3, w: 13, R: 13, W: 9 }, timeAcquiringMicros: { r: 8962, w: 111084, R: 130058, W: 72467 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 651ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.895-0400 m31200| 2015-07-09T13:56:37.895-0400 I COMMAND [conn41] CMD: drop db17.tmp.mrs.coll17_1436464597_29 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.905-0400 m31200| 2015-07-09T13:56:37.905-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_57 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.906-0400 m31200| 2015-07-09T13:56:37.905-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_57 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.909-0400 m31200| 2015-07-09T13:56:37.908-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_57 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.910-0400 m31200| 2015-07-09T13:56:37.909-0400 I COMMAND [conn41] command db17.tmp.mrs.coll17_1436464597_29 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.911-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.911-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.911-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.912-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464597_29", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:6 reslen:212 locks:{ Global: { acquireCount: { r: 161, w: 74, W: 3 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 10932, W: 501 } }, Database: { acquireCount: { r: 25, w: 66, R: 17, W: 11 }, acquireWaitCount: { r: 2, w: 10, R: 16, W: 9 }, timeAcquiringMicros: { r: 16017, w: 59905, R: 99674, W: 130687 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 670ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.917-0400 m31200| 2015-07-09T13:56:37.916-0400 I COMMAND [conn37] CMD: drop db17.tmp.mrs.coll17_1436464597_23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.926-0400 m31200| 2015-07-09T13:56:37.926-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_58 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.927-0400 m31200| 2015-07-09T13:56:37.926-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_58 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.928-0400 m31200| 2015-07-09T13:56:37.927-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_58 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.929-0400 m31200| 2015-07-09T13:56:37.928-0400 I COMMAND [conn37] command db17.tmp.mrs.coll17_1436464597_23 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.930-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.930-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.930-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.931-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464597_23", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:8 reslen:212 locks:{ Global: { acquireCount: { r: 165, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 10571, w: 10919, W: 785 } }, Database: { acquireCount: { r: 25, w: 66, R: 19, W: 11 }, acquireWaitCount: { r: 4, w: 8, R: 16, W: 9 }, timeAcquiringMicros: { r: 62654, w: 112315, R: 63110, W: 45875 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 669ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.932-0400 m31200| 2015-07-09T13:56:37.932-0400 I COMMAND [conn39] CMD: drop db17.tmp.mrs.coll17_1436464597_30 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.938-0400 m31200| 2015-07-09T13:56:37.937-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_59 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.939-0400 m31200| 2015-07-09T13:56:37.939-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_59 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.940-0400 m31200| 2015-07-09T13:56:37.939-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_59 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.941-0400 m31200| 2015-07-09T13:56:37.941-0400 I COMMAND [conn39] command db17.tmp.mrs.coll17_1436464597_30 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.941-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.942-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.942-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:37.942-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464597_30", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:5 reslen:212 locks:{ Global: { acquireCount: { r: 159, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 1 }, timeAcquiringMicros: { r: 20940, w: 9753 } }, Database: { acquireCount: { r: 25, w: 66, R: 16, W: 11 }, acquireWaitCount: { r: 5, w: 11, R: 12, W: 5 }, timeAcquiringMicros: { r: 6380, w: 53069, R: 67211, W: 132189 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 603ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.435-0400 m31100| 2015-07-09T13:56:38.434-0400 I COMMAND [conn48] CMD: drop db17.tmp.mrs.coll17_1436464597_21 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.440-0400 m31100| 2015-07-09T13:56:38.439-0400 I COMMAND [conn48] CMD: drop db17.tmp.mr.coll17_95 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.440-0400 m31100| 2015-07-09T13:56:38.439-0400 I COMMAND [conn48] CMD: drop db17.tmp.mr.coll17_95 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.440-0400 m31100| 2015-07-09T13:56:38.440-0400 I COMMAND [conn48] CMD: drop db17.tmp.mr.coll17_95 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.442-0400 m31100| 2015-07-09T13:56:38.442-0400 I COMMAND [conn48] command db17.tmp.mrs.coll17_1436464597_21 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.443-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.443-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.443-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.444-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464597_21", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:11 reslen:212 locks:{ Global: { acquireCount: { r: 173, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 309 } }, Database: { acquireCount: { r: 25, w: 66, R: 23, W: 11 }, acquireWaitCount: { r: 1, w: 27, R: 9, W: 4 }, timeAcquiringMicros: { r: 18060, w: 414543, R: 188173, W: 3416 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 1229ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.445-0400 m31200| 2015-07-09T13:56:38.444-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_60 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.449-0400 m31100| 2015-07-09T13:56:38.448-0400 I COMMAND [conn72] CMD: drop db17.tmp.mrs.coll17_1436464597_22 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.456-0400 m31100| 2015-07-09T13:56:38.456-0400 I COMMAND [conn72] CMD: drop db17.tmp.mr.coll17_96 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.457-0400 m31100| 2015-07-09T13:56:38.456-0400 I COMMAND [conn72] CMD: drop db17.tmp.mr.coll17_96 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.459-0400 m31100| 2015-07-09T13:56:38.458-0400 I COMMAND [conn72] CMD: drop db17.tmp.mr.coll17_96 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.461-0400 m31100| 2015-07-09T13:56:38.460-0400 I COMMAND [conn72] command db17.tmp.mrs.coll17_1436464597_22 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.461-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.461-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.461-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.463-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464597_22", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:25 reslen:212 locks:{ Global: { acquireCount: { r: 201, w: 74, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 5616, W: 386 } }, Database: { acquireCount: { r: 25, w: 66, R: 37, W: 11 }, acquireWaitCount: { r: 4, w: 10, R: 18, W: 9 }, timeAcquiringMicros: { r: 23177, w: 58019, R: 131190, W: 74359 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 1225ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.463-0400 m31100| 2015-07-09T13:56:38.462-0400 I COMMAND [conn56] CMD: drop db17.tmp.mrs.coll17_1436464597_29 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.463-0400 m31200| 2015-07-09T13:56:38.462-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_61 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.469-0400 m31100| 2015-07-09T13:56:38.468-0400 I COMMAND [conn56] CMD: drop db17.tmp.mr.coll17_97 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.469-0400 m31100| 2015-07-09T13:56:38.468-0400 I COMMAND [conn56] CMD: drop db17.tmp.mr.coll17_97 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.470-0400 m31100| 2015-07-09T13:56:38.469-0400 I COMMAND [conn56] CMD: drop db17.tmp.mr.coll17_97 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.471-0400 m31100| 2015-07-09T13:56:38.470-0400 I COMMAND [conn33] CMD: drop db17.tmp.mrs.coll17_1436464597_23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.471-0400 m31100| 2015-07-09T13:56:38.470-0400 I COMMAND [conn56] command db17.tmp.mrs.coll17_1436464597_29 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.472-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.472-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.472-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.474-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464597_29", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:29 reslen:212 locks:{ Global: { acquireCount: { r: 209, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 7827, w: 5737, W: 1138 } }, Database: { acquireCount: { r: 25, w: 66, R: 41, W: 11 }, acquireWaitCount: { r: 4, w: 11, R: 19, W: 7 }, timeAcquiringMicros: { r: 18240, w: 51479, R: 105436, W: 58347 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 1230ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.475-0400 m31200| 2015-07-09T13:56:38.471-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_62 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.485-0400 m31100| 2015-07-09T13:56:38.485-0400 I COMMAND [conn33] CMD: drop db17.tmp.mr.coll17_98 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.486-0400 m31100| 2015-07-09T13:56:38.485-0400 I COMMAND [conn33] CMD: drop db17.tmp.mr.coll17_98 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.487-0400 m31100| 2015-07-09T13:56:38.486-0400 I COMMAND [conn33] CMD: drop db17.tmp.mr.coll17_98 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.488-0400 m31100| 2015-07-09T13:56:38.488-0400 I COMMAND [conn33] command db17.tmp.mrs.coll17_1436464597_23 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.489-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.489-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.489-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.490-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464597_23", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:26 reslen:212 locks:{ Global: { acquireCount: { r: 203, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 1, W: 1 }, timeAcquiringMicros: { r: 13492, w: 8641, W: 161 } }, Database: { acquireCount: { r: 25, w: 66, R: 38, W: 11 }, acquireWaitCount: { r: 8, w: 8, R: 23, W: 7 }, timeAcquiringMicros: { r: 6409, w: 103242, R: 76012, W: 57558 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 1229ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.491-0400 m31200| 2015-07-09T13:56:38.489-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_63 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.492-0400 m31100| 2015-07-09T13:56:38.491-0400 I COMMAND [conn60] CMD: drop db17.tmp.mrs.coll17_1436464597_30 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.516-0400 m31100| 2015-07-09T13:56:38.515-0400 I COMMAND [conn60] CMD: drop db17.tmp.mr.coll17_99 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.516-0400 m31100| 2015-07-09T13:56:38.515-0400 I COMMAND [conn60] CMD: drop db17.tmp.mr.coll17_99 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.517-0400 m31100| 2015-07-09T13:56:38.516-0400 I COMMAND [conn60] CMD: drop db17.tmp.mr.coll17_99 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.518-0400 m31100| 2015-07-09T13:56:38.517-0400 I COMMAND [conn60] command db17.tmp.mrs.coll17_1436464597_30 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.518-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.518-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.518-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.519-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464597_30", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:26 reslen:212 locks:{ Global: { acquireCount: { r: 203, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 3 }, timeAcquiringMicros: { r: 7157, w: 28577 } }, Database: { acquireCount: { r: 25, w: 66, R: 38, W: 11 }, acquireWaitCount: { r: 8, w: 12, R: 16, W: 5 }, timeAcquiringMicros: { r: 4472, w: 81653, R: 29043, W: 100655 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 1179ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.519-0400 m31200| 2015-07-09T13:56:38.518-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_64 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.681-0400 m31200| 2015-07-09T13:56:38.681-0400 I COMMAND [conn39] CMD: drop db17.map_reduce_replace1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.689-0400 m31200| 2015-07-09T13:56:38.688-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_64 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.689-0400 m31200| 2015-07-09T13:56:38.689-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_64 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.690-0400 m31201| 2015-07-09T13:56:38.689-0400 I COMMAND [repl writer worker 1] CMD: drop db17.map_reduce_replace1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.692-0400 m31200| 2015-07-09T13:56:38.690-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_64 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.693-0400 m31202| 2015-07-09T13:56:38.690-0400 I COMMAND [repl writer worker 11] CMD: drop db17.map_reduce_replace1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.693-0400 m31200| 2015-07-09T13:56:38.690-0400 I COMMAND [conn38] CMD: drop db17.map_reduce_replace4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.699-0400 m31200| 2015-07-09T13:56:38.698-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_61 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.700-0400 m31200| 2015-07-09T13:56:38.699-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_61 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.700-0400 m31200| 2015-07-09T13:56:38.699-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_61 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.700-0400 m31200| 2015-07-09T13:56:38.700-0400 I COMMAND [conn38] command db17.map_reduce_replace4 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.701-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.701-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.704-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.705-0400 m31200| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.705-0400 m31200| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.707-0400 m31200| }, out: { replace: "map_reduce_replace4" }, query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 } }, inputDB: "db17", shardedOutputCollection: "tmp.mrs.coll17_1436464597_22", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll17_1436464597_22", timeMillis: 1221, counts: { input: 1019, emit: 1019, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464598000|74, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll17_1436464597_22", timeMillis: 646, counts: { input: 981, emit: 981, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464597000|166, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1019, emit: 1019, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 981, emit: 981, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:223 locks:{ Global: { acquireCount: { r: 55, w: 50, W: 3 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 7205, W: 1323 } }, Database: { acquireCount: { r: 1, w: 45, W: 6 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 82590, W: 407 } }, Collection: { acquireCount: { r: 1, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 237ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.707-0400 m31100| 2015-07-09T13:56:38.700-0400 I COMMAND [conn39] CMD: drop db17.tmp.mrs.coll17_1436464597_22 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.708-0400 m31200| 2015-07-09T13:56:38.701-0400 I COMMAND [conn39] command db17.map_reduce_replace1 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.708-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.708-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.708-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.709-0400 m31200| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.709-0400 m31200| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.711-0400 m31200| }, out: { replace: "map_reduce_replace1" }, query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 } }, inputDB: "db17", shardedOutputCollection: "tmp.mrs.coll17_1436464597_30", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll17_1436464597_30", timeMillis: 1178, counts: { input: 1019, emit: 1019, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464598000|105, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll17_1436464597_30", timeMillis: 601, counts: { input: 981, emit: 981, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464597000|229, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1019, emit: 1019, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 981, emit: 981, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:223 locks:{ Global: { acquireCount: { r: 55, w: 50, W: 3 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 10134, W: 33 } }, Database: { acquireCount: { r: 1, w: 45, W: 6 }, acquireWaitCount: { w: 1, W: 4 }, timeAcquiringMicros: { w: 27660, W: 2945 } }, Collection: { acquireCount: { r: 1, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 183ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.711-0400 m31200| 2015-07-09T13:56:38.702-0400 I COMMAND [conn41] CMD: drop db17.map_reduce_replace3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.712-0400 m31100| 2015-07-09T13:56:38.702-0400 I COMMAND [conn38] CMD: drop db17.tmp.mrs.coll17_1436464597_30 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.712-0400 m31200| 2015-07-09T13:56:38.703-0400 I COMMAND [conn34] CMD: drop db17.tmp.mrs.coll17_1436464597_22 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.712-0400 m31102| 2015-07-09T13:56:38.706-0400 I COMMAND [repl writer worker 9] CMD: drop db17.tmp.mrs.coll17_1436464597_22 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.712-0400 m31200| 2015-07-09T13:56:38.706-0400 I COMMAND [conn63] CMD: drop db17.tmp.mrs.coll17_1436464597_30 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.712-0400 m31101| 2015-07-09T13:56:38.707-0400 I COMMAND [repl writer worker 1] CMD: drop db17.tmp.mrs.coll17_1436464597_22 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.713-0400 m31201| 2015-07-09T13:56:38.707-0400 I COMMAND [repl writer worker 7] CMD: drop db17.map_reduce_replace4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.713-0400 m31101| 2015-07-09T13:56:38.710-0400 I COMMAND [repl writer worker 9] CMD: drop db17.tmp.mrs.coll17_1436464597_30 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.713-0400 m31102| 2015-07-09T13:56:38.710-0400 I COMMAND [repl writer worker 12] CMD: drop db17.tmp.mrs.coll17_1436464597_30 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.713-0400 m31202| 2015-07-09T13:56:38.710-0400 I COMMAND [repl writer worker 8] CMD: drop db17.map_reduce_replace4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.714-0400 m31200| 2015-07-09T13:56:38.712-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_62 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.714-0400 m31200| 2015-07-09T13:56:38.712-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_62 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.714-0400 m31200| 2015-07-09T13:56:38.712-0400 I COMMAND [conn29] CMD: drop db17.map_reduce_replace0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.727-0400 m31200| 2015-07-09T13:56:38.726-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_60 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.728-0400 m31200| 2015-07-09T13:56:38.726-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_60 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.728-0400 m31200| 2015-07-09T13:56:38.727-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_62 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.729-0400 m31202| 2015-07-09T13:56:38.729-0400 I COMMAND [repl writer worker 3] CMD: drop db17.map_reduce_replace3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.731-0400 m31201| 2015-07-09T13:56:38.731-0400 I COMMAND [repl writer worker 15] CMD: drop db17.map_reduce_replace3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.737-0400 m31202| 2015-07-09T13:56:38.736-0400 I COMMAND [repl writer worker 15] CMD: drop db17.map_reduce_replace0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.740-0400 m31200| 2015-07-09T13:56:38.740-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_60 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.742-0400 m31200| 2015-07-09T13:56:38.740-0400 I COMMAND [conn41] command db17.map_reduce_replace3 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.742-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.743-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.743-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.744-0400 m31200| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.744-0400 m31200| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.750-0400 m31200| }, out: { replace: "map_reduce_replace3" }, query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 } }, inputDB: "db17", shardedOutputCollection: "tmp.mrs.coll17_1436464597_29", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll17_1436464597_29", timeMillis: 1228, counts: { input: 1019, emit: 1019, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464598000|86, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll17_1436464597_29", timeMillis: 666, counts: { input: 981, emit: 981, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464597000|199, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1019, emit: 1019, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 981, emit: 981, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:223 locks:{ Global: { acquireCount: { r: 55, w: 50, W: 3 }, acquireWaitCount: { w: 3, W: 1 }, timeAcquiringMicros: { w: 31313, W: 314 } }, Database: { acquireCount: { r: 1, w: 45, W: 6 }, acquireWaitCount: { w: 3, W: 4 }, timeAcquiringMicros: { w: 55375, W: 64973 } }, Collection: { acquireCount: { r: 1, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 268ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.750-0400 m31201| 2015-07-09T13:56:38.741-0400 I COMMAND [repl writer worker 5] CMD: drop db17.map_reduce_replace0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.750-0400 m31100| 2015-07-09T13:56:38.741-0400 I COMMAND [conn38] CMD: drop db17.tmp.mrs.coll17_1436464597_29 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.751-0400 m31200| 2015-07-09T13:56:38.741-0400 I COMMAND [conn29] command db17.map_reduce_replace0 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.751-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.751-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.751-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.752-0400 m31200| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.752-0400 m31200| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.753-0400 m31200| }, out: { replace: "map_reduce_replace0" }, query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 } }, inputDB: "db17", shardedOutputCollection: "tmp.mrs.coll17_1436464597_21", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll17_1436464597_21", timeMillis: 1227, counts: { input: 1019, emit: 1019, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464598000|50, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll17_1436464597_21", timeMillis: 633, counts: { input: 981, emit: 981, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464597000|148, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1019, emit: 1019, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 981, emit: 981, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:223 locks:{ Global: { acquireCount: { r: 55, w: 50, W: 3 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 17172, W: 10799 } }, Database: { acquireCount: { r: 1, w: 45, W: 6 }, acquireWaitCount: { w: 3, W: 2 }, timeAcquiringMicros: { w: 126616, W: 12989 } }, Collection: { acquireCount: { r: 1, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 296ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.753-0400 m31200| 2015-07-09T13:56:38.741-0400 I COMMAND [conn37] CMD: drop db17.map_reduce_replace2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.754-0400 m31100| 2015-07-09T13:56:38.742-0400 I COMMAND [conn39] CMD: drop db17.tmp.mrs.coll17_1436464597_21 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.754-0400 m31102| 2015-07-09T13:56:38.745-0400 I COMMAND [repl writer worker 0] CMD: drop db17.tmp.mrs.coll17_1436464597_29 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.754-0400 m31200| 2015-07-09T13:56:38.745-0400 I COMMAND [conn63] CMD: drop db17.tmp.mrs.coll17_1436464597_29 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.754-0400 m31101| 2015-07-09T13:56:38.746-0400 I COMMAND [repl writer worker 0] CMD: drop db17.tmp.mrs.coll17_1436464597_29 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.755-0400 m31100| 2015-07-09T13:56:38.748-0400 I COMMAND [conn60] CMD: drop db17.tmp.mr.coll17_100 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.755-0400 m31200| 2015-07-09T13:56:38.749-0400 I COMMAND [conn34] CMD: drop db17.tmp.mrs.coll17_1436464597_21 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.755-0400 m31102| 2015-07-09T13:56:38.750-0400 I COMMAND [repl writer worker 15] CMD: drop db17.tmp.mrs.coll17_1436464597_21 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.756-0400 m31202| 2015-07-09T13:56:38.750-0400 I COMMAND [repl writer worker 10] CMD: drop db17.tmp.mrs.coll17_1436464597_30 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.756-0400 m31200| 2015-07-09T13:56:38.754-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_63 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.756-0400 m31200| 2015-07-09T13:56:38.754-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_63 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.759-0400 m31201| 2015-07-09T13:56:38.758-0400 I COMMAND [repl writer worker 10] CMD: drop db17.tmp.mrs.coll17_1436464597_30 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.763-0400 m31200| 2015-07-09T13:56:38.762-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_63 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.765-0400 m31202| 2015-07-09T13:56:38.765-0400 I COMMAND [repl writer worker 12] CMD: drop db17.tmp.mrs.coll17_1436464597_22 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.766-0400 m31200| 2015-07-09T13:56:38.766-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_65 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.767-0400 m31200| 2015-07-09T13:56:38.766-0400 I COMMAND [conn37] command db17.map_reduce_replace2 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.767-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.767-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.767-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.768-0400 m31200| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.768-0400 m31200| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.770-0400 m31200| }, out: { replace: "map_reduce_replace2" }, query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 } }, inputDB: "db17", shardedOutputCollection: "tmp.mrs.coll17_1436464597_23", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll17_1436464597_23", timeMillis: 1226, counts: { input: 1019, emit: 1019, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464598000|89, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll17_1436464597_23", timeMillis: 667, counts: { input: 981, emit: 981, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464597000|224, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1019, emit: 1019, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 981, emit: 981, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:223 locks:{ Global: { acquireCount: { r: 55, w: 50, W: 3 }, acquireWaitCount: { w: 3, W: 1 }, timeAcquiringMicros: { w: 35977, W: 1141 } }, Database: { acquireCount: { r: 1, w: 45, W: 6 }, acquireWaitCount: { w: 6, W: 4 }, timeAcquiringMicros: { w: 37796, W: 68421 } }, Collection: { acquireCount: { r: 1, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 277ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.770-0400 m31201| 2015-07-09T13:56:38.767-0400 I COMMAND [repl writer worker 4] CMD: drop db17.tmp.mrs.coll17_1436464597_22 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.771-0400 m31101| 2015-07-09T13:56:38.767-0400 I COMMAND [repl writer worker 10] CMD: drop db17.tmp.mrs.coll17_1436464597_21 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.771-0400 m31100| 2015-07-09T13:56:38.767-0400 I COMMAND [conn39] CMD: drop db17.tmp.mrs.coll17_1436464597_23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.772-0400 m31100| 2015-07-09T13:56:38.772-0400 I COMMAND [conn72] CMD: drop db17.tmp.mr.coll17_102 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.772-0400 m31200| 2015-07-09T13:56:38.771-0400 I COMMAND [conn34] CMD: drop db17.tmp.mrs.coll17_1436464597_23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.773-0400 m31100| 2015-07-09T13:56:38.773-0400 I COMMAND [conn48] CMD: drop db17.tmp.mr.coll17_101 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.774-0400 m31102| 2015-07-09T13:56:38.773-0400 I COMMAND [repl writer worker 7] CMD: drop db17.tmp.mrs.coll17_1436464597_23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.775-0400 m31202| 2015-07-09T13:56:38.774-0400 I COMMAND [repl writer worker 9] CMD: drop db17.map_reduce_replace2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.775-0400 m31201| 2015-07-09T13:56:38.775-0400 I COMMAND [repl writer worker 11] CMD: drop db17.map_reduce_replace2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.786-0400 m31202| 2015-07-09T13:56:38.785-0400 I COMMAND [repl writer worker 8] CMD: drop db17.tmp.mrs.coll17_1436464597_21 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.794-0400 m31101| 2015-07-09T13:56:38.792-0400 I COMMAND [repl writer worker 8] CMD: drop db17.tmp.mrs.coll17_1436464597_23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.795-0400 m31202| 2015-07-09T13:56:38.795-0400 I COMMAND [repl writer worker 1] CMD: drop db17.tmp.mrs.coll17_1436464597_29 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.799-0400 m31200| 2015-07-09T13:56:38.798-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_68 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.799-0400 m31200| 2015-07-09T13:56:38.798-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_67 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.801-0400 m31200| 2015-07-09T13:56:38.799-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_66 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.802-0400 m31202| 2015-07-09T13:56:38.800-0400 I COMMAND [repl writer worker 13] CMD: drop db17.tmp.mrs.coll17_1436464597_23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.803-0400 m31201| 2015-07-09T13:56:38.803-0400 I COMMAND [repl writer worker 7] CMD: drop db17.tmp.mrs.coll17_1436464597_21 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.818-0400 m31201| 2015-07-09T13:56:38.818-0400 I COMMAND [repl writer worker 14] CMD: drop db17.tmp.mrs.coll17_1436464597_29 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.836-0400 m31201| 2015-07-09T13:56:38.835-0400 I COMMAND [repl writer worker 9] CMD: drop db17.tmp.mrs.coll17_1436464597_23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.842-0400 m31100| 2015-07-09T13:56:38.841-0400 I COMMAND [conn56] CMD: drop db17.tmp.mr.coll17_103 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.848-0400 m31100| 2015-07-09T13:56:38.848-0400 I COMMAND [conn33] CMD: drop db17.tmp.mr.coll17_104 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:38.852-0400 m31200| 2015-07-09T13:56:38.852-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_69 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.547-0400 m31200| 2015-07-09T13:56:39.546-0400 I COMMAND [conn39] CMD: drop db17.tmp.mrs.coll17_1436464598_31 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.553-0400 m31200| 2015-07-09T13:56:39.553-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_65 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.553-0400 m31200| 2015-07-09T13:56:39.553-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_65 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.554-0400 m31200| 2015-07-09T13:56:39.554-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_65 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.558-0400 m31200| 2015-07-09T13:56:39.558-0400 I COMMAND [conn39] command db17.tmp.mrs.coll17_1436464598_31 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.559-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.559-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.559-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.560-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464598_31", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:4 reslen:212 locks:{ Global: { acquireCount: { r: 157, w: 74, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 11635, W: 1009 } }, Database: { acquireCount: { r: 25, w: 66, R: 15, W: 11 }, acquireWaitCount: { r: 1, w: 20, R: 10, W: 3 }, timeAcquiringMicros: { r: 11133, w: 251065, R: 131593, W: 3954 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 814ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.574-0400 m31200| 2015-07-09T13:56:39.574-0400 I COMMAND [conn29] CMD: drop db17.tmp.mrs.coll17_1436464598_25 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.582-0400 m31200| 2015-07-09T13:56:39.582-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_66 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.583-0400 m31200| 2015-07-09T13:56:39.583-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_66 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.584-0400 m31200| 2015-07-09T13:56:39.584-0400 I COMMAND [conn41] CMD: drop db17.tmp.mrs.coll17_1436464598_32 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.592-0400 m31200| 2015-07-09T13:56:39.591-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_68 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.592-0400 m31200| 2015-07-09T13:56:39.592-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_68 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.592-0400 m31100| 2015-07-09T13:56:39.592-0400 I COMMAND [conn60] CMD: drop db17.tmp.mrs.coll17_1436464598_31 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.593-0400 m31200| 2015-07-09T13:56:39.592-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_68 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.596-0400 m31200| 2015-07-09T13:56:39.595-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_66 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.597-0400 m31200| 2015-07-09T13:56:39.597-0400 I COMMAND [conn41] command db17.tmp.mrs.coll17_1436464598_32 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.597-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.597-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.597-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.598-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464598_32", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:11 reslen:212 locks:{ Global: { acquireCount: { r: 171, w: 74, W: 3 }, acquireWaitCount: { r: 2, W: 1 }, timeAcquiringMicros: { r: 15987, W: 1002 } }, Database: { acquireCount: { r: 25, w: 66, R: 22, W: 11 }, acquireWaitCount: { r: 3, w: 6, R: 19, W: 7 }, timeAcquiringMicros: { r: 3818, w: 39804, R: 125123, W: 91612 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 802ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.599-0400 m31200| 2015-07-09T13:56:39.599-0400 I COMMAND [conn29] command db17.tmp.mrs.coll17_1436464598_25 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.599-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.600-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.600-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.601-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464598_25", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:9 reslen:212 locks:{ Global: { acquireCount: { r: 167, w: 74, W: 3 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 17214, W: 705 } }, Database: { acquireCount: { r: 25, w: 66, R: 20, W: 11 }, acquireWaitCount: { r: 2, w: 10, R: 15, W: 9 }, timeAcquiringMicros: { r: 24929, w: 62169, R: 73453, W: 174334 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 830ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.602-0400 m31200| 2015-07-09T13:56:39.602-0400 I COMMAND [conn38] CMD: drop db17.tmp.mrs.coll17_1436464598_24 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.607-0400 m31100| 2015-07-09T13:56:39.606-0400 I COMMAND [conn60] CMD: drop db17.tmp.mr.coll17_100 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.608-0400 m31100| 2015-07-09T13:56:39.607-0400 I COMMAND [conn60] CMD: drop db17.tmp.mr.coll17_100 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.610-0400 m31200| 2015-07-09T13:56:39.610-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_67 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.610-0400 m31200| 2015-07-09T13:56:39.610-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_67 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.612-0400 m31100| 2015-07-09T13:56:39.611-0400 I COMMAND [conn60] CMD: drop db17.tmp.mr.coll17_100 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.612-0400 m31200| 2015-07-09T13:56:39.612-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_67 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.616-0400 m31200| 2015-07-09T13:56:39.616-0400 I COMMAND [conn38] command db17.tmp.mrs.coll17_1436464598_24 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.616-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.616-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.616-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.617-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464598_24", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:10 reslen:212 locks:{ Global: { acquireCount: { r: 169, w: 74, W: 3 }, acquireWaitCount: { r: 3, w: 1, W: 1 }, timeAcquiringMicros: { r: 18858, w: 9951, W: 37 } }, Database: { acquireCount: { r: 25, w: 66, R: 21, W: 11 }, acquireWaitCount: { r: 5, w: 10, R: 16, W: 7 }, timeAcquiringMicros: { r: 27974, w: 75261, R: 124944, W: 86556 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 847ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.619-0400 m31100| 2015-07-09T13:56:39.617-0400 I COMMAND [conn60] command db17.tmp.mrs.coll17_1436464598_31 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.620-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.620-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.620-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.621-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464598_31", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:8 reslen:212 locks:{ Global: { acquireCount: { r: 167, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 1485 } }, Database: { acquireCount: { r: 25, w: 66, R: 20, W: 11 }, acquireWaitCount: { r: 1, w: 16, R: 10, W: 6 }, timeAcquiringMicros: { r: 5042, w: 194428, R: 158206, W: 8975 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 874ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.621-0400 m31200| 2015-07-09T13:56:39.618-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_70 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.627-0400 m31100| 2015-07-09T13:56:39.626-0400 I COMMAND [conn72] CMD: drop db17.tmp.mrs.coll17_1436464598_25 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.638-0400 m31100| 2015-07-09T13:56:39.638-0400 I COMMAND [conn72] CMD: drop db17.tmp.mr.coll17_102 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.639-0400 m31100| 2015-07-09T13:56:39.638-0400 I COMMAND [conn72] CMD: drop db17.tmp.mr.coll17_102 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.640-0400 m31100| 2015-07-09T13:56:39.640-0400 I COMMAND [conn72] CMD: drop db17.tmp.mr.coll17_102 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.643-0400 m31100| 2015-07-09T13:56:39.642-0400 I COMMAND [conn72] command db17.tmp.mrs.coll17_1436464598_25 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.643-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.643-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.643-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.645-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464598_25", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:15 reslen:212 locks:{ Global: { acquireCount: { r: 181, w: 74, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 15732, W: 519 } }, Database: { acquireCount: { r: 25, w: 66, R: 27, W: 11 }, acquireWaitCount: { r: 4, w: 9, R: 14, W: 7 }, timeAcquiringMicros: { r: 5251, w: 90932, R: 110249, W: 47186 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 873ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.645-0400 m31200| 2015-07-09T13:56:39.643-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_71 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.645-0400 m31200| 2015-07-09T13:56:39.645-0400 I COMMAND [conn37] CMD: drop db17.tmp.mrs.coll17_1436464598_26 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.645-0400 m31100| 2015-07-09T13:56:39.645-0400 I COMMAND [conn48] CMD: drop db17.tmp.mrs.coll17_1436464598_24 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.651-0400 m31200| 2015-07-09T13:56:39.651-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_69 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.651-0400 m31200| 2015-07-09T13:56:39.651-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_69 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.653-0400 m31200| 2015-07-09T13:56:39.652-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_69 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.654-0400 m31100| 2015-07-09T13:56:39.654-0400 I COMMAND [conn48] CMD: drop db17.tmp.mr.coll17_101 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.656-0400 m31100| 2015-07-09T13:56:39.654-0400 I COMMAND [conn48] CMD: drop db17.tmp.mr.coll17_101 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.657-0400 m31100| 2015-07-09T13:56:39.656-0400 I COMMAND [conn48] CMD: drop db17.tmp.mr.coll17_101 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.658-0400 m31100| 2015-07-09T13:56:39.657-0400 I COMMAND [conn48] command db17.tmp.mrs.coll17_1436464598_24 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.658-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.658-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.658-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.659-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464598_24", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:12 reslen:212 locks:{ Global: { acquireCount: { r: 175, w: 74, W: 3 }, acquireWaitCount: { r: 2, W: 1 }, timeAcquiringMicros: { r: 29094, W: 468 } }, Database: { acquireCount: { r: 25, w: 66, R: 24, W: 11 }, acquireWaitCount: { r: 7, w: 7, R: 11, W: 8 }, timeAcquiringMicros: { r: 12101, w: 32077, R: 107547, W: 130521 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 889ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.660-0400 m31200| 2015-07-09T13:56:39.659-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_72 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.660-0400 m31200| 2015-07-09T13:56:39.659-0400 I COMMAND [conn37] command db17.tmp.mrs.coll17_1436464598_26 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.660-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.660-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.661-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.662-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464598_26", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:12 reslen:212 locks:{ Global: { acquireCount: { r: 173, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 3, W: 1 }, timeAcquiringMicros: { r: 9184, w: 24999, W: 25333 } }, Database: { acquireCount: { r: 25, w: 66, R: 23, W: 11 }, acquireWaitCount: { r: 6, w: 10, R: 15, W: 8 }, timeAcquiringMicros: { r: 16073, w: 74267, R: 37623, W: 145574 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 823ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.662-0400 m31100| 2015-07-09T13:56:39.660-0400 I COMMAND [conn33] CMD: drop db17.tmp.mrs.coll17_1436464598_26 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.675-0400 m31100| 2015-07-09T13:56:39.674-0400 I COMMAND [conn33] CMD: drop db17.tmp.mr.coll17_104 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.675-0400 m31100| 2015-07-09T13:56:39.674-0400 I COMMAND [conn33] CMD: drop db17.tmp.mr.coll17_104 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.677-0400 m31100| 2015-07-09T13:56:39.677-0400 I COMMAND [conn56] CMD: drop db17.tmp.mrs.coll17_1436464598_32 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.678-0400 m31100| 2015-07-09T13:56:39.677-0400 I COMMAND [conn33] CMD: drop db17.tmp.mr.coll17_104 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.693-0400 m31100| 2015-07-09T13:56:39.692-0400 I COMMAND [conn56] CMD: drop db17.tmp.mr.coll17_103 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.693-0400 m31100| 2015-07-09T13:56:39.692-0400 I COMMAND [conn56] CMD: drop db17.tmp.mr.coll17_103 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.694-0400 m31100| 2015-07-09T13:56:39.693-0400 I COMMAND [conn56] CMD: drop db17.tmp.mr.coll17_103 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.698-0400 m31100| 2015-07-09T13:56:39.697-0400 I COMMAND [conn33] command db17.tmp.mrs.coll17_1436464598_26 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.699-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.699-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.699-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.700-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464598_26", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:13 reslen:212 locks:{ Global: { acquireCount: { r: 177, w: 74, W: 3 }, acquireWaitCount: { r: 3, w: 1, W: 1 }, timeAcquiringMicros: { r: 40331, w: 15085, W: 49 } }, Database: { acquireCount: { r: 25, w: 66, R: 25, W: 11 }, acquireWaitCount: { r: 8, w: 8, R: 12, W: 8 }, timeAcquiringMicros: { r: 12352, w: 35331, R: 32544, W: 146991 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 861ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.701-0400 m31100| 2015-07-09T13:56:39.697-0400 I COMMAND [conn56] command db17.tmp.mrs.coll17_1436464598_32 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.701-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.701-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.701-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.702-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464598_32", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:13 reslen:212 locks:{ Global: { acquireCount: { r: 177, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 2, W: 1 }, timeAcquiringMicros: { r: 26570, w: 25808, W: 702 } }, Database: { acquireCount: { r: 25, w: 66, R: 25, W: 11 }, acquireWaitCount: { r: 8, w: 11, R: 12, W: 6 }, timeAcquiringMicros: { r: 9187, w: 101797, R: 109016, W: 49930 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 902ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.702-0400 m31200| 2015-07-09T13:56:39.699-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_74 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.703-0400 m31200| 2015-07-09T13:56:39.699-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_73 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.892-0400 m31200| 2015-07-09T13:56:39.892-0400 I COMMAND [conn38] CMD: drop db17.map_reduce_replace0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.898-0400 m31200| 2015-07-09T13:56:39.897-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_72 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.898-0400 m31200| 2015-07-09T13:56:39.898-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_72 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.899-0400 m31200| 2015-07-09T13:56:39.898-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_72 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.900-0400 m31201| 2015-07-09T13:56:39.899-0400 I COMMAND [repl writer worker 1] CMD: drop db17.map_reduce_replace0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.901-0400 m31200| 2015-07-09T13:56:39.899-0400 I COMMAND [conn39] CMD: drop db17.map_reduce_replace1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.902-0400 m31202| 2015-07-09T13:56:39.901-0400 I COMMAND [repl writer worker 7] CMD: drop db17.map_reduce_replace0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.913-0400 m31200| 2015-07-09T13:56:39.913-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_70 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.914-0400 m31200| 2015-07-09T13:56:39.913-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_70 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.914-0400 m31200| 2015-07-09T13:56:39.913-0400 I COMMAND [conn37] CMD: drop db17.map_reduce_replace2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.923-0400 m31200| 2015-07-09T13:56:39.922-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_73 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.923-0400 m31200| 2015-07-09T13:56:39.922-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_73 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.924-0400 m31200| 2015-07-09T13:56:39.922-0400 I COMMAND [conn29] CMD: drop db17.map_reduce_replace4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.929-0400 m31200| 2015-07-09T13:56:39.928-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_71 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.930-0400 m31200| 2015-07-09T13:56:39.928-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_71 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.931-0400 m31200| 2015-07-09T13:56:39.929-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_71 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.931-0400 m31200| 2015-07-09T13:56:39.929-0400 I COMMAND [conn38] command db17.map_reduce_replace0 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.932-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.932-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.933-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.933-0400 m31200| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.933-0400 m31200| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.936-0400 m31200| }, out: { replace: "map_reduce_replace0" }, query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 } }, inputDB: "db17", shardedOutputCollection: "tmp.mrs.coll17_1436464598_24", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll17_1436464598_24", timeMillis: 885, counts: { input: 1019, emit: 1019, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464599000|99, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll17_1436464598_24", timeMillis: 841, counts: { input: 981, emit: 981, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464599000|100, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1019, emit: 1019, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 981, emit: 981, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:223 locks:{ Global: { acquireCount: { r: 55, w: 50, W: 3 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 29843 } }, Database: { acquireCount: { r: 1, w: 45, W: 6 }, acquireWaitCount: { w: 2, W: 3 }, timeAcquiringMicros: { w: 19097, W: 73335 } }, Collection: { acquireCount: { r: 1, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 270ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.936-0400 m31200| 2015-07-09T13:56:39.929-0400 I COMMAND [conn29] command db17.map_reduce_replace4 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.937-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.937-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.937-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.938-0400 m31200| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.939-0400 m31200| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.941-0400 m31200| }, out: { replace: "map_reduce_replace4" }, query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 } }, inputDB: "db17", shardedOutputCollection: "tmp.mrs.coll17_1436464598_25", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll17_1436464598_25", timeMillis: 870, counts: { input: 1019, emit: 1019, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464599000|86, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll17_1436464598_25", timeMillis: 814, counts: { input: 981, emit: 981, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464599000|78, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1019, emit: 1019, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 981, emit: 981, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:223 locks:{ Global: { acquireCount: { r: 55, w: 50, W: 3 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 12509, W: 24727 } }, Database: { acquireCount: { r: 1, w: 45, W: 6 }, acquireWaitCount: { w: 4, W: 2 }, timeAcquiringMicros: { w: 57779, W: 2051 } }, Collection: { acquireCount: { r: 1, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 285ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.941-0400 m31100| 2015-07-09T13:56:39.931-0400 I COMMAND [conn39] CMD: drop db17.tmp.mrs.coll17_1436464598_24 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.941-0400 m31100| 2015-07-09T13:56:39.931-0400 I COMMAND [conn36] CMD: drop db17.tmp.mrs.coll17_1436464598_25 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.941-0400 m31200| 2015-07-09T13:56:39.930-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_73 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.942-0400 m31200| 2015-07-09T13:56:39.930-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_70 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.944-0400 m31200| 2015-07-09T13:56:39.930-0400 I COMMAND [conn41] CMD: drop db17.map_reduce_replace3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.944-0400 m31201| 2015-07-09T13:56:39.932-0400 I COMMAND [repl writer worker 12] CMD: drop db17.map_reduce_replace1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.944-0400 m31202| 2015-07-09T13:56:39.932-0400 I COMMAND [repl writer worker 14] CMD: drop db17.map_reduce_replace1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.944-0400 m31200| 2015-07-09T13:56:39.934-0400 I COMMAND [conn34] CMD: drop db17.tmp.mrs.coll17_1436464598_24 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.944-0400 m31102| 2015-07-09T13:56:39.935-0400 I COMMAND [repl writer worker 7] CMD: drop db17.tmp.mrs.coll17_1436464598_24 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.945-0400 m31200| 2015-07-09T13:56:39.936-0400 I COMMAND [conn62] CMD: drop db17.tmp.mrs.coll17_1436464598_25 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.945-0400 m31101| 2015-07-09T13:56:39.936-0400 I COMMAND [repl writer worker 12] CMD: drop db17.tmp.mrs.coll17_1436464598_24 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.946-0400 m31101| 2015-07-09T13:56:39.938-0400 I COMMAND [repl writer worker 6] CMD: drop db17.tmp.mrs.coll17_1436464598_25 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.946-0400 m31202| 2015-07-09T13:56:39.941-0400 I COMMAND [repl writer worker 3] CMD: drop db17.map_reduce_replace2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.946-0400 m31200| 2015-07-09T13:56:39.942-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_74 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.947-0400 m31200| 2015-07-09T13:56:39.942-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_74 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.947-0400 m31200| 2015-07-09T13:56:39.942-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_74 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.947-0400 m31200| 2015-07-09T13:56:39.942-0400 I COMMAND [conn39] command db17.map_reduce_replace1 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.948-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.948-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.948-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.948-0400 m31200| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.948-0400 m31200| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.951-0400 m31200| }, out: { replace: "map_reduce_replace1" }, query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 } }, inputDB: "db17", shardedOutputCollection: "tmp.mrs.coll17_1436464598_31", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll17_1436464598_31", timeMillis: 863, counts: { input: 1019, emit: 1019, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464599000|35, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll17_1436464598_31", timeMillis: 810, counts: { input: 981, emit: 981, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464599000|25, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1019, emit: 1019, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 981, emit: 981, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:223 locks:{ Global: { acquireCount: { r: 55, w: 50, W: 3 }, acquireWaitCount: { w: 3, W: 1 }, timeAcquiringMicros: { w: 30153, W: 217 } }, Database: { acquireCount: { r: 1, w: 45, W: 6 }, acquireWaitCount: { w: 5, W: 3 }, timeAcquiringMicros: { w: 106019, W: 1129 } }, Collection: { acquireCount: { r: 1, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 324ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.952-0400 m31200| 2015-07-09T13:56:39.943-0400 I COMMAND [conn37] command db17.map_reduce_replace2 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.953-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.954-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.954-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.954-0400 m31200| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.955-0400 m31200| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.957-0400 m31200| }, out: { replace: "map_reduce_replace2" }, query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 } }, inputDB: "db17", shardedOutputCollection: "tmp.mrs.coll17_1436464598_26", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll17_1436464598_26", timeMillis: 838, counts: { input: 1019, emit: 1019, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464599000|105, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll17_1436464598_26", timeMillis: 815, counts: { input: 981, emit: 981, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464599000|108, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1019, emit: 1019, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 981, emit: 981, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:223 locks:{ Global: { acquireCount: { r: 55, w: 50, W: 3 }, acquireWaitCount: { w: 3, W: 1 }, timeAcquiringMicros: { w: 20139, W: 13961 } }, Database: { acquireCount: { r: 1, w: 45, W: 6 }, acquireWaitCount: { w: 1, W: 3 }, timeAcquiringMicros: { w: 874, W: 14136 } }, Collection: { acquireCount: { r: 1, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 244ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.957-0400 m31100| 2015-07-09T13:56:39.943-0400 I COMMAND [conn38] CMD: drop db17.tmp.mrs.coll17_1436464598_31 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.958-0400 m31100| 2015-07-09T13:56:39.944-0400 I COMMAND [conn36] CMD: drop db17.tmp.mrs.coll17_1436464598_26 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.958-0400 m31200| 2015-07-09T13:56:39.949-0400 I COMMAND [conn63] CMD: drop db17.tmp.mrs.coll17_1436464598_31 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.958-0400 m31101| 2015-07-09T13:56:39.949-0400 I COMMAND [repl writer worker 10] CMD: drop db17.tmp.mrs.coll17_1436464598_31 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.958-0400 m31102| 2015-07-09T13:56:39.950-0400 I COMMAND [repl writer worker 6] CMD: drop db17.tmp.mrs.coll17_1436464598_25 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.959-0400 m31200| 2015-07-09T13:56:39.950-0400 I COMMAND [conn41] command db17.map_reduce_replace3 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.960-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.960-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.960-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.963-0400 m31200| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.963-0400 m31200| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.965-0400 m31200| }, out: { replace: "map_reduce_replace3" }, query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 } }, inputDB: "db17", shardedOutputCollection: "tmp.mrs.coll17_1436464598_32", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll17_1436464598_32", timeMillis: 898, counts: { input: 1019, emit: 1019, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464599000|107, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll17_1436464598_32", timeMillis: 797, counts: { input: 981, emit: 981, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464599000|81, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1019, emit: 1019, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 981, emit: 981, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:223 locks:{ Global: { acquireCount: { r: 55, w: 50, W: 3 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 32743, W: 377 } }, Database: { acquireCount: { r: 1, w: 45, W: 6 }, acquireWaitCount: { w: 1, W: 3 }, timeAcquiringMicros: { w: 18124, W: 21196 } }, Collection: { acquireCount: { r: 1, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 251ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.965-0400 m31100| 2015-07-09T13:56:39.951-0400 I COMMAND [conn38] CMD: drop db17.tmp.mrs.coll17_1436464598_32 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.966-0400 m31200| 2015-07-09T13:56:39.953-0400 I COMMAND [conn34] CMD: drop db17.tmp.mrs.coll17_1436464598_26 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.966-0400 m31202| 2015-07-09T13:56:39.953-0400 I COMMAND [repl writer worker 2] CMD: drop db17.map_reduce_replace4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.966-0400 m31102| 2015-07-09T13:56:39.956-0400 I COMMAND [repl writer worker 15] CMD: drop db17.tmp.mrs.coll17_1436464598_31 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.966-0400 m31101| 2015-07-09T13:56:39.957-0400 I COMMAND [repl writer worker 1] CMD: drop db17.tmp.mrs.coll17_1436464598_26 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.966-0400 m31200| 2015-07-09T13:56:39.957-0400 I COMMAND [conn48] CMD: drop db17.tmp.mrs.coll17_1436464598_32 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.967-0400 m31102| 2015-07-09T13:56:39.960-0400 I COMMAND [repl writer worker 10] CMD: drop db17.tmp.mrs.coll17_1436464598_26 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.967-0400 m31201| 2015-07-09T13:56:39.960-0400 I COMMAND [repl writer worker 15] CMD: drop db17.map_reduce_replace2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.967-0400 m31100| 2015-07-09T13:56:39.960-0400 I COMMAND [conn72] CMD: drop db17.tmp.mr.coll17_105 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.967-0400 m31101| 2015-07-09T13:56:39.961-0400 I COMMAND [repl writer worker 7] CMD: drop db17.tmp.mrs.coll17_1436464598_32 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.967-0400 m31102| 2015-07-09T13:56:39.963-0400 I COMMAND [repl writer worker 0] CMD: drop db17.tmp.mrs.coll17_1436464598_32 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.967-0400 m31202| 2015-07-09T13:56:39.966-0400 I COMMAND [repl writer worker 9] CMD: drop db17.map_reduce_replace3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.972-0400 m31200| 2015-07-09T13:56:39.972-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_76 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.976-0400 m31201| 2015-07-09T13:56:39.975-0400 I COMMAND [repl writer worker 8] CMD: drop db17.map_reduce_replace4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.987-0400 m31202| 2015-07-09T13:56:39.986-0400 I COMMAND [repl writer worker 8] CMD: drop db17.tmp.mrs.coll17_1436464598_24 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:39.995-0400 m31201| 2015-07-09T13:56:39.994-0400 I COMMAND [repl writer worker 5] CMD: drop db17.map_reduce_replace3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.001-0400 m31202| 2015-07-09T13:56:40.001-0400 I COMMAND [repl writer worker 7] CMD: drop db17.tmp.mrs.coll17_1436464598_25 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.002-0400 m31100| 2015-07-09T13:56:40.002-0400 I COMMAND [conn48] CMD: drop db17.tmp.mr.coll17_106 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.002-0400 m31100| 2015-07-09T13:56:40.002-0400 I COMMAND [conn56] CMD: drop db17.tmp.mr.coll17_107 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.003-0400 m31100| 2015-07-09T13:56:40.003-0400 I COMMAND [conn33] CMD: drop db17.tmp.mr.coll17_108 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.003-0400 m31200| 2015-07-09T13:56:40.003-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_75 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.004-0400 m31202| 2015-07-09T13:56:40.003-0400 I COMMAND [repl writer worker 11] CMD: drop db17.tmp.mrs.coll17_1436464598_31 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.004-0400 m31200| 2015-07-09T13:56:40.004-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_78 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.004-0400 m31200| 2015-07-09T13:56:40.004-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_77 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.010-0400 m31202| 2015-07-09T13:56:40.010-0400 I COMMAND [repl writer worker 13] CMD: drop db17.tmp.mrs.coll17_1436464598_26 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.021-0400 m31201| 2015-07-09T13:56:40.021-0400 I COMMAND [repl writer worker 6] CMD: drop db17.tmp.mrs.coll17_1436464598_24 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.029-0400 m31201| 2015-07-09T13:56:40.028-0400 I COMMAND [repl writer worker 1] CMD: drop db17.tmp.mrs.coll17_1436464598_25 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.031-0400 m31201| 2015-07-09T13:56:40.031-0400 I COMMAND [repl writer worker 4] CMD: drop db17.tmp.mrs.coll17_1436464598_31 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.035-0400 m31201| 2015-07-09T13:56:40.034-0400 I COMMAND [repl writer worker 7] CMD: drop db17.tmp.mrs.coll17_1436464598_26 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.035-0400 m31202| 2015-07-09T13:56:40.035-0400 I COMMAND [repl writer worker 12] CMD: drop db17.tmp.mrs.coll17_1436464598_32 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.039-0400 m31201| 2015-07-09T13:56:40.039-0400 I COMMAND [repl writer worker 9] CMD: drop db17.tmp.mrs.coll17_1436464598_32 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.043-0400 m31100| 2015-07-09T13:56:40.043-0400 I COMMAND [conn60] CMD: drop db17.tmp.mr.coll17_109 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.066-0400 m31200| 2015-07-09T13:56:40.065-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_79 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.719-0400 m31200| 2015-07-09T13:56:40.719-0400 I COMMAND [conn29] CMD: drop db17.tmp.mrs.coll17_1436464599_28 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.726-0400 m31200| 2015-07-09T13:56:40.726-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_76 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.726-0400 m31200| 2015-07-09T13:56:40.726-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_76 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.739-0400 m31200| 2015-07-09T13:56:40.738-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_76 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.759-0400 m31200| 2015-07-09T13:56:40.759-0400 I COMMAND [conn29] command db17.tmp.mrs.coll17_1436464599_28 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.759-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.760-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.760-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.760-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464599_28", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:7 reslen:212 locks:{ Global: { acquireCount: { r: 163, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 42 } }, Database: { acquireCount: { r: 25, w: 66, R: 18, W: 11 }, acquireWaitCount: { r: 1, w: 15, R: 9, W: 6 }, timeAcquiringMicros: { r: 8343, w: 150774, R: 173646, W: 32037 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 796ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.792-0400 m31200| 2015-07-09T13:56:40.792-0400 I COMMAND [conn38] CMD: drop db17.tmp.mrs.coll17_1436464599_27 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.800-0400 m31200| 2015-07-09T13:56:40.800-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_75 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.800-0400 m31200| 2015-07-09T13:56:40.800-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_75 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.802-0400 m31200| 2015-07-09T13:56:40.801-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_75 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.806-0400 m31200| 2015-07-09T13:56:40.806-0400 I COMMAND [conn38] command db17.tmp.mrs.coll17_1436464599_27 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.806-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.806-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.807-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.807-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464599_27", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:8 reslen:212 locks:{ Global: { acquireCount: { r: 165, w: 74, W: 3 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 7230, W: 572 } }, Database: { acquireCount: { r: 25, w: 66, R: 19, W: 11 }, acquireWaitCount: { r: 1, w: 10, R: 9, W: 9 }, timeAcquiringMicros: { r: 11048, w: 96737, R: 140014, W: 98943 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 845ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.812-0400 m31200| 2015-07-09T13:56:40.811-0400 I COMMAND [conn41] CMD: drop db17.tmp.mrs.coll17_1436464599_33 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.822-0400 m31200| 2015-07-09T13:56:40.821-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_77 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.822-0400 m31200| 2015-07-09T13:56:40.821-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_77 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.825-0400 m31200| 2015-07-09T13:56:40.825-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_77 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.832-0400 m31200| 2015-07-09T13:56:40.831-0400 I COMMAND [conn41] command db17.tmp.mrs.coll17_1436464599_33 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.832-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.832-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.832-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.833-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464599_33", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:13 reslen:212 locks:{ Global: { acquireCount: { r: 175, w: 74, W: 3 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 8773, W: 1939 } }, Database: { acquireCount: { r: 25, w: 66, R: 24, W: 11 }, acquireWaitCount: { r: 4, w: 8, R: 12, W: 7 }, timeAcquiringMicros: { r: 33490, w: 35197, R: 77633, W: 200401 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 859ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.833-0400 m31200| 2015-07-09T13:56:40.833-0400 I COMMAND [conn37] CMD: drop db17.tmp.mrs.coll17_1436464599_29 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.835-0400 m31100| 2015-07-09T13:56:40.835-0400 I COMMAND [conn72] CMD: drop db17.tmp.mrs.coll17_1436464599_27 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.842-0400 m31200| 2015-07-09T13:56:40.842-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_78 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.843-0400 m31200| 2015-07-09T13:56:40.842-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_78 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.843-0400 m31200| 2015-07-09T13:56:40.843-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_78 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.845-0400 m31100| 2015-07-09T13:56:40.844-0400 I COMMAND [conn72] CMD: drop db17.tmp.mr.coll17_105 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.846-0400 m31100| 2015-07-09T13:56:40.845-0400 I COMMAND [conn72] CMD: drop db17.tmp.mr.coll17_105 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.847-0400 m31200| 2015-07-09T13:56:40.847-0400 I COMMAND [conn37] command db17.tmp.mrs.coll17_1436464599_29 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.847-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.847-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.848-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.849-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464599_29", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:9 reslen:212 locks:{ Global: { acquireCount: { r: 167, w: 74, W: 3 }, acquireWaitCount: { r: 2, W: 1 }, timeAcquiringMicros: { r: 19916, W: 557 } }, Database: { acquireCount: { r: 25, w: 66, R: 20, W: 11 }, acquireWaitCount: { r: 6, w: 10, R: 11, W: 7 }, timeAcquiringMicros: { r: 39177, w: 54894, R: 94964, W: 125519 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 873ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.853-0400 m31200| 2015-07-09T13:56:40.853-0400 I COMMAND [conn39] CMD: drop db17.tmp.mrs.coll17_1436464600_34 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.865-0400 m31200| 2015-07-09T13:56:40.865-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_79 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.866-0400 m31200| 2015-07-09T13:56:40.865-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_79 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.866-0400 m31200| 2015-07-09T13:56:40.866-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_79 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.867-0400 m31200| 2015-07-09T13:56:40.866-0400 I COMMAND [conn39] command db17.tmp.mrs.coll17_1436464600_34 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.867-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.868-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.868-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.869-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464600_34", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:14 reslen:212 locks:{ Global: { acquireCount: { r: 177, w: 74, W: 3 }, acquireWaitCount: { r: 3 }, timeAcquiringMicros: { r: 29453 } }, Database: { acquireCount: { r: 25, w: 66, R: 25, W: 11 }, acquireWaitCount: { r: 8, w: 7, R: 12, W: 5 }, timeAcquiringMicros: { r: 15397, w: 94467, R: 80625, W: 106613 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 862ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.869-0400 m31100| 2015-07-09T13:56:40.867-0400 I COMMAND [conn72] CMD: drop db17.tmp.mr.coll17_105 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.872-0400 m31100| 2015-07-09T13:56:40.871-0400 I COMMAND [conn72] command db17.tmp.mrs.coll17_1436464599_27 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.872-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.872-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.872-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.872-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464599_27", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:7 reslen:212 locks:{ Global: { acquireCount: { r: 165, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 418 } }, Database: { acquireCount: { r: 25, w: 66, R: 19, W: 11 }, acquireWaitCount: { w: 11, R: 8, W: 6 }, timeAcquiringMicros: { w: 152991, R: 158067, W: 24512 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 910ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.874-0400 m31200| 2015-07-09T13:56:40.873-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_80 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.899-0400 m31100| 2015-07-09T13:56:40.898-0400 I COMMAND [conn56] CMD: drop db17.tmp.mrs.coll17_1436464599_33 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.909-0400 m31100| 2015-07-09T13:56:40.908-0400 I COMMAND [conn56] CMD: drop db17.tmp.mr.coll17_107 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.909-0400 m31100| 2015-07-09T13:56:40.909-0400 I COMMAND [conn56] CMD: drop db17.tmp.mr.coll17_107 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.912-0400 m31100| 2015-07-09T13:56:40.912-0400 I COMMAND [conn56] CMD: drop db17.tmp.mr.coll17_107 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.913-0400 m31100| 2015-07-09T13:56:40.912-0400 I COMMAND [conn33] CMD: drop db17.tmp.mrs.coll17_1436464599_29 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.918-0400 m31100| 2015-07-09T13:56:40.918-0400 I COMMAND [conn33] CMD: drop db17.tmp.mr.coll17_108 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.919-0400 m31100| 2015-07-09T13:56:40.919-0400 I COMMAND [conn33] CMD: drop db17.tmp.mr.coll17_108 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.920-0400 m31100| 2015-07-09T13:56:40.920-0400 I COMMAND [conn56] command db17.tmp.mrs.coll17_1436464599_33 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.920-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.920-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.920-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.921-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464599_33", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:18 reslen:212 locks:{ Global: { acquireCount: { r: 187, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 3620, w: 6937, W: 677 } }, Database: { acquireCount: { r: 25, w: 66, R: 30, W: 11 }, acquireWaitCount: { r: 1, w: 7, R: 11, W: 9 }, timeAcquiringMicros: { r: 28181, w: 86660, R: 103751, W: 54972 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 947ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.922-0400 m31200| 2015-07-09T13:56:40.921-0400 I COMMAND [conn38] CMD: drop db17.map_reduce_replace4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.923-0400 m31100| 2015-07-09T13:56:40.922-0400 I COMMAND [conn33] CMD: drop db17.tmp.mr.coll17_108 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.923-0400 m31100| 2015-07-09T13:56:40.923-0400 I COMMAND [conn48] CMD: drop db17.tmp.mrs.coll17_1436464599_28 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.923-0400 m31200| 2015-07-09T13:56:40.923-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_81 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.935-0400 m31100| 2015-07-09T13:56:40.935-0400 I COMMAND [conn48] CMD: drop db17.tmp.mr.coll17_106 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.936-0400 m31100| 2015-07-09T13:56:40.935-0400 I COMMAND [conn48] CMD: drop db17.tmp.mr.coll17_106 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.937-0400 m31100| 2015-07-09T13:56:40.937-0400 I COMMAND [conn33] command db17.tmp.mrs.coll17_1436464599_29 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.937-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.938-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.938-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.938-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464599_29", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:16 reslen:212 locks:{ Global: { acquireCount: { r: 183, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 2, W: 1 }, timeAcquiringMicros: { r: 10533, w: 22235, W: 1222 } }, Database: { acquireCount: { r: 25, w: 66, R: 28, W: 11 }, acquireWaitCount: { r: 2, w: 8, R: 9, W: 8 }, timeAcquiringMicros: { r: 29077, w: 51419, R: 80984, W: 129046 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 963ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.939-0400 m31100| 2015-07-09T13:56:40.937-0400 I COMMAND [conn48] CMD: drop db17.tmp.mr.coll17_106 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.939-0400 m31200| 2015-07-09T13:56:40.938-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_82 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.939-0400 m31100| 2015-07-09T13:56:40.938-0400 I COMMAND [conn48] command db17.tmp.mrs.coll17_1436464599_28 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.939-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.939-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.940-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.940-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464599_28", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:18 reslen:212 locks:{ Global: { acquireCount: { r: 187, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 2, W: 1 }, timeAcquiringMicros: { r: 17957, w: 5944, W: 2880 } }, Database: { acquireCount: { r: 25, w: 66, R: 30, W: 11 }, acquireWaitCount: { r: 3, w: 8, R: 10, W: 9 }, timeAcquiringMicros: { r: 39951, w: 69351, R: 64071, W: 128571 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 975ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.940-0400 m31100| 2015-07-09T13:56:40.938-0400 I COMMAND [conn60] CMD: drop db17.tmp.mrs.coll17_1436464600_34 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.940-0400 m31200| 2015-07-09T13:56:40.940-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_83 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.945-0400 m31200| 2015-07-09T13:56:40.945-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_80 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.946-0400 m31200| 2015-07-09T13:56:40.945-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_80 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.946-0400 m31200| 2015-07-09T13:56:40.945-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_80 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.946-0400 m31100| 2015-07-09T13:56:40.946-0400 I COMMAND [conn36] CMD: drop db17.tmp.mrs.coll17_1436464599_27 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.949-0400 m31201| 2015-07-09T13:56:40.948-0400 I COMMAND [repl writer worker 3] CMD: drop db17.map_reduce_replace4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.950-0400 m31202| 2015-07-09T13:56:40.950-0400 I COMMAND [repl writer worker 7] CMD: drop db17.map_reduce_replace4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.958-0400 m31100| 2015-07-09T13:56:40.957-0400 I COMMAND [conn60] CMD: drop db17.tmp.mr.coll17_109 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.959-0400 m31100| 2015-07-09T13:56:40.957-0400 I COMMAND [conn60] CMD: drop db17.tmp.mr.coll17_109 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.959-0400 m31100| 2015-07-09T13:56:40.959-0400 I COMMAND [conn60] CMD: drop db17.tmp.mr.coll17_109 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.976-0400 m31100| 2015-07-09T13:56:40.975-0400 I COMMAND [conn60] command db17.tmp.mrs.coll17_1436464600_34 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.976-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.976-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.976-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.977-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464600_34", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:17 reslen:212 locks:{ Global: { acquireCount: { r: 185, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 2, W: 1 }, timeAcquiringMicros: { r: 11195, w: 23250, W: 33 } }, Database: { acquireCount: { r: 25, w: 66, R: 29, W: 11 }, acquireWaitCount: { r: 5, w: 5, R: 8, W: 6 }, timeAcquiringMicros: { r: 26956, w: 83990, R: 30411, W: 117833 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 971ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.977-0400 m31200| 2015-07-09T13:56:40.975-0400 I COMMAND [conn34] CMD: drop db17.tmp.mrs.coll17_1436464599_27 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:40.977-0400 m31200| 2015-07-09T13:56:40.977-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_84 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.003-0400 m31201| 2015-07-09T13:56:41.002-0400 I COMMAND [repl writer worker 15] CMD: drop db17.tmp.mrs.coll17_1436464599_27 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.005-0400 m31202| 2015-07-09T13:56:41.004-0400 I COMMAND [repl writer worker 6] CMD: drop db17.tmp.mrs.coll17_1436464599_27 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.054-0400 m31100| 2015-07-09T13:56:41.054-0400 I COMMAND [conn72] CMD: drop db17.tmp.mr.coll17_110 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.055-0400 m31200| 2015-07-09T13:56:41.055-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_85 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.103-0400 m31102| 2015-07-09T13:56:41.103-0400 I COMMAND [repl writer worker 13] CMD: drop db17.tmp.mrs.coll17_1436464599_27 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.105-0400 m31101| 2015-07-09T13:56:41.104-0400 I COMMAND [repl writer worker 15] CMD: drop db17.tmp.mrs.coll17_1436464599_27 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.213-0400 m31200| 2015-07-09T13:56:41.213-0400 I COMMAND [conn29] CMD: drop db17.map_reduce_replace0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.222-0400 m31200| 2015-07-09T13:56:41.222-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_83 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.223-0400 m31200| 2015-07-09T13:56:41.223-0400 I COMMAND [conn37] CMD: drop db17.map_reduce_replace2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.224-0400 m31200| 2015-07-09T13:56:41.223-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_83 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.226-0400 m31100| 2015-07-09T13:56:41.225-0400 I COMMAND [conn72] CMD: drop db17.tmp.mrs.coll17_1436464601_30 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.230-0400 m31100| 2015-07-09T13:56:41.229-0400 I COMMAND [conn72] CMD: drop db17.tmp.mr.coll17_110 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.230-0400 m31100| 2015-07-09T13:56:41.229-0400 I COMMAND [conn72] CMD: drop db17.tmp.mr.coll17_110 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.232-0400 m31100| 2015-07-09T13:56:41.231-0400 I COMMAND [conn72] CMD: drop db17.tmp.mr.coll17_110 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.233-0400 m31100| 2015-07-09T13:56:41.231-0400 I COMMAND [conn72] command db17.tmp.mrs.coll17_1436464601_30 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.233-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.233-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.233-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.234-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464601_30", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 } }, Database: { acquireCount: { r: 25, w: 66, R: 12, W: 11 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 177ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.234-0400 m31200| 2015-07-09T13:56:41.232-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_82 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.234-0400 m31200| 2015-07-09T13:56:41.232-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_82 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.235-0400 m31200| 2015-07-09T13:56:41.232-0400 I COMMAND [conn39] CMD: drop db17.map_reduce_replace3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.246-0400 m31200| 2015-07-09T13:56:41.245-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_84 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.246-0400 m31200| 2015-07-09T13:56:41.246-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_84 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.247-0400 m31200| 2015-07-09T13:56:41.246-0400 I COMMAND [conn41] CMD: drop db17.map_reduce_replace1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.257-0400 m31200| 2015-07-09T13:56:41.257-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_81 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.257-0400 m31200| 2015-07-09T13:56:41.257-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_81 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.258-0400 m31200| 2015-07-09T13:56:41.257-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_81 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.258-0400 m31200| 2015-07-09T13:56:41.258-0400 I COMMAND [conn41] command db17.map_reduce_replace1 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.259-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.259-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.259-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.259-0400 m31200| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.259-0400 m31200| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.262-0400 m31200| }, out: { replace: "map_reduce_replace1" }, query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 } }, inputDB: "db17", shardedOutputCollection: "tmp.mrs.coll17_1436464599_33", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll17_1436464599_33", timeMillis: 936, counts: { input: 1019, emit: 1019, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464600000|101, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll17_1436464599_33", timeMillis: 849, counts: { input: 981, emit: 981, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464600000|87, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1019, emit: 1019, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 981, emit: 981, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:223 locks:{ Global: { acquireCount: { r: 55, w: 50, W: 3 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 22020, W: 33730 } }, Database: { acquireCount: { r: 1, w: 45, W: 6 }, acquireWaitCount: { w: 11 }, timeAcquiringMicros: { w: 155438 } }, Collection: { acquireCount: { r: 1, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 334ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.262-0400 m31200| 2015-07-09T13:56:41.258-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_82 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.262-0400 m31200| 2015-07-09T13:56:41.258-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_84 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.262-0400 m31200| 2015-07-09T13:56:41.258-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_83 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.262-0400 m31100| 2015-07-09T13:56:41.259-0400 I COMMAND [conn38] CMD: drop db17.tmp.mrs.coll17_1436464599_33 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.263-0400 m31200| 2015-07-09T13:56:41.263-0400 I COMMAND [conn48] CMD: drop db17.tmp.mrs.coll17_1436464599_33 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.263-0400 m31201| 2015-07-09T13:56:41.263-0400 I COMMAND [repl writer worker 13] CMD: drop db17.map_reduce_replace0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.265-0400 m31101| 2015-07-09T13:56:41.264-0400 I COMMAND [repl writer worker 10] CMD: drop db17.tmp.mrs.coll17_1436464599_33 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.265-0400 m31102| 2015-07-09T13:56:41.265-0400 I COMMAND [repl writer worker 4] CMD: drop db17.tmp.mrs.coll17_1436464599_33 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.266-0400 m31202| 2015-07-09T13:56:41.266-0400 I COMMAND [repl writer worker 9] CMD: drop db17.map_reduce_replace0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.268-0400 m31201| 2015-07-09T13:56:41.268-0400 I COMMAND [repl writer worker 8] CMD: drop db17.map_reduce_replace2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.269-0400 m31200| 2015-07-09T13:56:41.268-0400 I COMMAND [conn39] command db17.map_reduce_replace3 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.269-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.269-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.270-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.270-0400 m31200| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.270-0400 m31200| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.272-0400 m31200| }, out: { replace: "map_reduce_replace3" }, query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 } }, inputDB: "db17", shardedOutputCollection: "tmp.mrs.coll17_1436464600_34", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll17_1436464600_34", timeMillis: 953, counts: { input: 1019, emit: 1019, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464600000|110, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll17_1436464600_34", timeMillis: 861, counts: { input: 981, emit: 981, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464600000|110, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1019, emit: 1019, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 981, emit: 981, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:223 locks:{ Global: { acquireCount: { r: 55, w: 50, W: 3 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 11539, W: 19905 } }, Database: { acquireCount: { r: 1, w: 45, W: 6 }, acquireWaitCount: { w: 11, W: 3 }, timeAcquiringMicros: { w: 88188, W: 17358 } }, Collection: { acquireCount: { r: 1, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 291ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.272-0400 m31100| 2015-07-09T13:56:41.269-0400 I COMMAND [conn38] CMD: drop db17.tmp.mrs.coll17_1436464600_34 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.273-0400 m31200| 2015-07-09T13:56:41.268-0400 I COMMAND [conn29] command db17.map_reduce_replace0 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.273-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.273-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.274-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.274-0400 m31200| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.274-0400 m31200| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.276-0400 m31200| }, out: { replace: "map_reduce_replace0" }, query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 } }, inputDB: "db17", shardedOutputCollection: "tmp.mrs.coll17_1436464599_28", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll17_1436464599_28", timeMillis: 972, counts: { input: 1019, emit: 1019, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464600000|106, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll17_1436464599_28", timeMillis: 763, counts: { input: 981, emit: 981, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464600000|26, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1019, emit: 1019, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 981, emit: 981, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:223 locks:{ Global: { acquireCount: { r: 55, w: 50, W: 3 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 41118, W: 599 } }, Database: { acquireCount: { r: 1, w: 45, W: 6 }, acquireWaitCount: { w: 12, W: 3 }, timeAcquiringMicros: { w: 96007, W: 93779 } }, Collection: { acquireCount: { r: 1, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 329ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.277-0400 m31100| 2015-07-09T13:56:41.271-0400 I COMMAND [conn36] CMD: drop db17.tmp.mrs.coll17_1436464599_28 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.278-0400 m31200| 2015-07-09T13:56:41.269-0400 I COMMAND [conn37] command db17.map_reduce_replace2 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.278-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.279-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.279-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.279-0400 m31200| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.280-0400 m31200| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.281-0400 m31200| }, out: { replace: "map_reduce_replace2" }, query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 } }, inputDB: "db17", shardedOutputCollection: "tmp.mrs.coll17_1436464599_29", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll17_1436464599_29", timeMillis: 946, counts: { input: 1019, emit: 1019, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464600000|104, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll17_1436464599_29", timeMillis: 869, counts: { input: 981, emit: 981, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464600000|102, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1019, emit: 1019, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 981, emit: 981, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:223 locks:{ Global: { acquireCount: { r: 55, w: 50, W: 3 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 32751, W: 9574 } }, Database: { acquireCount: { r: 1, w: 45, W: 6 }, acquireWaitCount: { w: 12, W: 3 }, timeAcquiringMicros: { w: 95709, W: 66360 } }, Collection: { acquireCount: { r: 1, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 331ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.281-0400 m31200| 2015-07-09T13:56:41.271-0400 I COMMAND [conn63] CMD: drop db17.tmp.mrs.coll17_1436464600_34 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.282-0400 m31100| 2015-07-09T13:56:41.273-0400 I COMMAND [conn39] CMD: drop db17.tmp.mrs.coll17_1436464599_29 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.282-0400 m31102| 2015-07-09T13:56:41.273-0400 I COMMAND [repl writer worker 9] CMD: drop db17.tmp.mrs.coll17_1436464600_34 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.282-0400 m31200| 2015-07-09T13:56:41.274-0400 I COMMAND [conn34] CMD: drop db17.tmp.mrs.coll17_1436464599_28 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.282-0400 m31101| 2015-07-09T13:56:41.274-0400 I COMMAND [repl writer worker 0] CMD: drop db17.tmp.mrs.coll17_1436464600_34 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.283-0400 m31202| 2015-07-09T13:56:41.274-0400 I COMMAND [repl writer worker 10] CMD: drop db17.map_reduce_replace2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.283-0400 m31200| 2015-07-09T13:56:41.278-0400 I COMMAND [conn62] CMD: drop db17.tmp.mrs.coll17_1436464599_29 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.283-0400 m31101| 2015-07-09T13:56:41.278-0400 I COMMAND [repl writer worker 11] CMD: drop db17.tmp.mrs.coll17_1436464599_28 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.284-0400 m31102| 2015-07-09T13:56:41.280-0400 I COMMAND [repl writer worker 14] CMD: drop db17.tmp.mrs.coll17_1436464599_28 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.285-0400 m31201| 2015-07-09T13:56:41.281-0400 I COMMAND [repl writer worker 6] CMD: drop db17.map_reduce_replace3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.285-0400 m31101| 2015-07-09T13:56:41.284-0400 I COMMAND [repl writer worker 14] CMD: drop db17.tmp.mrs.coll17_1436464599_29 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.289-0400 m31102| 2015-07-09T13:56:41.289-0400 I COMMAND [repl writer worker 1] CMD: drop db17.tmp.mrs.coll17_1436464599_29 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.290-0400 m31202| 2015-07-09T13:56:41.289-0400 I COMMAND [repl writer worker 6] CMD: drop db17.map_reduce_replace3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.290-0400 m31100| 2015-07-09T13:56:41.290-0400 I COMMAND [conn60] CMD: drop db17.tmp.mr.coll17_111 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.292-0400 m31201| 2015-07-09T13:56:41.292-0400 I COMMAND [repl writer worker 2] CMD: drop db17.map_reduce_replace1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.294-0400 m31200| 2015-07-09T13:56:41.294-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_86 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.298-0400 m31202| 2015-07-09T13:56:41.298-0400 I COMMAND [repl writer worker 4] CMD: drop db17.map_reduce_replace1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.303-0400 m31200| 2015-07-09T13:56:41.302-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_88 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.303-0400 m31200| 2015-07-09T13:56:41.303-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_87 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.304-0400 m31201| 2015-07-09T13:56:41.304-0400 I COMMAND [repl writer worker 12] CMD: drop db17.tmp.mrs.coll17_1436464599_33 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.309-0400 m31202| 2015-07-09T13:56:41.309-0400 I COMMAND [repl writer worker 5] CMD: drop db17.tmp.mrs.coll17_1436464599_33 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.314-0400 m31201| 2015-07-09T13:56:41.314-0400 I COMMAND [repl writer worker 7] CMD: drop db17.tmp.mrs.coll17_1436464600_34 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.315-0400 m31202| 2015-07-09T13:56:41.315-0400 I COMMAND [repl writer worker 0] CMD: drop db17.tmp.mrs.coll17_1436464600_34 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.316-0400 m31201| 2015-07-09T13:56:41.316-0400 I COMMAND [repl writer worker 10] CMD: drop db17.tmp.mrs.coll17_1436464599_28 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.318-0400 m31100| 2015-07-09T13:56:41.318-0400 I COMMAND [conn56] CMD: drop db17.tmp.mr.coll17_113 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.319-0400 m31100| 2015-07-09T13:56:41.318-0400 I COMMAND [conn33] CMD: drop db17.tmp.mr.coll17_112 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.319-0400 m31100| 2015-07-09T13:56:41.318-0400 I COMMAND [conn48] CMD: drop db17.tmp.mr.coll17_114 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.319-0400 m31202| 2015-07-09T13:56:41.319-0400 I COMMAND [repl writer worker 13] CMD: drop db17.tmp.mrs.coll17_1436464599_28 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.320-0400 m31201| 2015-07-09T13:56:41.319-0400 I COMMAND [repl writer worker 14] CMD: drop db17.tmp.mrs.coll17_1436464599_29 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.321-0400 m31202| 2015-07-09T13:56:41.320-0400 I COMMAND [repl writer worker 1] CMD: drop db17.tmp.mrs.coll17_1436464599_29 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.328-0400 m31200| 2015-07-09T13:56:41.328-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_89 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.795-0400 m31200| 2015-07-09T13:56:41.795-0400 I COMMAND [conn38] CMD: drop db17.tmp.mrs.coll17_1436464601_30 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.799-0400 m31200| 2015-07-09T13:56:41.798-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_85 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.799-0400 m31200| 2015-07-09T13:56:41.798-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_85 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.801-0400 m31200| 2015-07-09T13:56:41.801-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_85 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.805-0400 m31200| 2015-07-09T13:56:41.805-0400 I COMMAND [conn38] command db17.tmp.mrs.coll17_1436464601_30 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.805-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.806-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.806-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.807-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464601_30", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 40981 } }, Database: { acquireCount: { r: 25, w: 66, R: 12, W: 11 }, acquireWaitCount: { w: 26, R: 12, W: 9 }, timeAcquiringMicros: { w: 439904, R: 31005, W: 9965 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 751ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.807-0400 m31200| 2015-07-09T13:56:41.806-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_90 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.903-0400 m31200| 2015-07-09T13:56:41.903-0400 I COMMAND [conn39] CMD: drop db17.tmp.mrs.coll17_1436464601_35 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.908-0400 m31200| 2015-07-09T13:56:41.908-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_86 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.913-0400 m31200| 2015-07-09T13:56:41.909-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_86 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.913-0400 m31200| 2015-07-09T13:56:41.912-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_86 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.914-0400 m31200| 2015-07-09T13:56:41.913-0400 I COMMAND [conn39] command db17.tmp.mrs.coll17_1436464601_35 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.914-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.914-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.914-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.915-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464601_35", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:7 reslen:212 locks:{ Global: { acquireCount: { r: 163, w: 74, W: 3 }, acquireWaitCount: { w: 3, W: 1 }, timeAcquiringMicros: { w: 2626, W: 1570 } }, Database: { acquireCount: { r: 25, w: 66, R: 18, W: 11 }, acquireWaitCount: { r: 1, w: 17, R: 17, W: 8 }, timeAcquiringMicros: { r: 4397, w: 72510, R: 123196, W: 47920 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 623ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.916-0400 m31200| 2015-07-09T13:56:41.914-0400 I COMMAND [conn38] CMD: drop db17.map_reduce_replace4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.922-0400 m31200| 2015-07-09T13:56:41.922-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_90 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.923-0400 m31200| 2015-07-09T13:56:41.922-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_90 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.923-0400 m31200| 2015-07-09T13:56:41.922-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_90 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.923-0400 m31200| 2015-07-09T13:56:41.923-0400 I COMMAND [conn38] command db17.map_reduce_replace4 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.924-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.924-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.924-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.924-0400 m31200| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.924-0400 m31200| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.925-0400 m31200| }, out: { replace: "map_reduce_replace4" }, query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 } }, inputDB: "db17", shardedOutputCollection: "tmp.mrs.coll17_1436464601_30", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll17_1436464601_30", timeMillis: 175, counts: { input: 1019, emit: 1019, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464601000|22, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll17_1436464601_30", timeMillis: 745, counts: { input: 981, emit: 981, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464601000|121, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1019, emit: 1019, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 981, emit: 981, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:223 locks:{ Global: { acquireCount: { r: 55, w: 50, W: 3 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 7064, W: 2018 } }, Database: { acquireCount: { r: 1, w: 45, W: 6 }, acquireWaitCount: { w: 9, W: 2 }, timeAcquiringMicros: { w: 35360, W: 11107 } }, Collection: { acquireCount: { r: 1, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 117ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.926-0400 m31100| 2015-07-09T13:56:41.926-0400 I COMMAND [conn39] CMD: drop db17.tmp.mrs.coll17_1436464601_30 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.928-0400 m31201| 2015-07-09T13:56:41.928-0400 I COMMAND [repl writer worker 2] CMD: drop db17.map_reduce_replace4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.934-0400 m31200| 2015-07-09T13:56:41.933-0400 I COMMAND [conn29] CMD: drop db17.tmp.mrs.coll17_1436464601_31 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.936-0400 m31202| 2015-07-09T13:56:41.935-0400 I COMMAND [repl writer worker 1] CMD: drop db17.map_reduce_replace4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.938-0400 m31200| 2015-07-09T13:56:41.937-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_87 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.939-0400 m31200| 2015-07-09T13:56:41.939-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_87 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.940-0400 m31200| 2015-07-09T13:56:41.940-0400 I COMMAND [conn41] CMD: drop db17.tmp.mrs.coll17_1436464601_36 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.944-0400 m31200| 2015-07-09T13:56:41.944-0400 I COMMAND [conn62] CMD: drop db17.tmp.mrs.coll17_1436464601_30 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.948-0400 m31200| 2015-07-09T13:56:41.946-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_88 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.948-0400 m31101| 2015-07-09T13:56:41.946-0400 I COMMAND [repl writer worker 13] CMD: drop db17.tmp.mrs.coll17_1436464601_30 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.949-0400 m31102| 2015-07-09T13:56:41.946-0400 I COMMAND [repl writer worker 15] CMD: drop db17.tmp.mrs.coll17_1436464601_30 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.949-0400 m31200| 2015-07-09T13:56:41.947-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_88 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.950-0400 m31200| 2015-07-09T13:56:41.950-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_87 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.951-0400 m31200| 2015-07-09T13:56:41.950-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_88 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.952-0400 m31200| 2015-07-09T13:56:41.951-0400 I COMMAND [conn29] command db17.tmp.mrs.coll17_1436464601_31 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.952-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.952-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.953-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.953-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464601_31", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:7 reslen:212 locks:{ Global: { acquireCount: { r: 163, w: 74, W: 3 }, acquireWaitCount: { r: 4, w: 4, W: 1 }, timeAcquiringMicros: { r: 6292, w: 17982, W: 327 } }, Database: { acquireCount: { r: 25, w: 66, R: 18, W: 11 }, acquireWaitCount: { r: 3, w: 8, R: 18, W: 8 }, timeAcquiringMicros: { r: 8605, w: 121484, R: 59978, W: 68127 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 656ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.954-0400 m31200| 2015-07-09T13:56:41.952-0400 I COMMAND [conn41] command db17.tmp.mrs.coll17_1436464601_36 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.954-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.954-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.954-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.955-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464601_36", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:8 reslen:212 locks:{ Global: { acquireCount: { r: 165, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 1, W: 1 }, timeAcquiringMicros: { r: 17283, w: 10706, W: 593 } }, Database: { acquireCount: { r: 25, w: 66, R: 19, W: 11 }, acquireWaitCount: { r: 1, w: 11, R: 19, W: 9 }, timeAcquiringMicros: { r: 6042, w: 76201, R: 122916, W: 53466 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 656ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.955-0400 m31200| 2015-07-09T13:56:41.954-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_91 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.967-0400 m31100| 2015-07-09T13:56:41.966-0400 I COMMAND [conn72] CMD: drop db17.tmp.mr.coll17_115 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.968-0400 m31201| 2015-07-09T13:56:41.967-0400 I COMMAND [repl writer worker 8] CMD: drop db17.tmp.mrs.coll17_1436464601_30 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:41.972-0400 m31202| 2015-07-09T13:56:41.972-0400 I COMMAND [repl writer worker 11] CMD: drop db17.tmp.mrs.coll17_1436464601_30 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.025-0400 m31200| 2015-07-09T13:56:42.025-0400 I COMMAND [conn37] CMD: drop db17.tmp.mrs.coll17_1436464601_32 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.031-0400 m31200| 2015-07-09T13:56:42.030-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_89 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.031-0400 m31200| 2015-07-09T13:56:42.031-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_89 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.034-0400 m31200| 2015-07-09T13:56:42.032-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_89 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.044-0400 m31200| 2015-07-09T13:56:42.044-0400 I COMMAND [conn37] command db17.tmp.mrs.coll17_1436464601_32 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.044-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.045-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.045-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.046-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464601_32", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:8 reslen:212 locks:{ Global: { acquireCount: { r: 165, w: 74, W: 3 }, acquireWaitCount: { w: 7 }, timeAcquiringMicros: { w: 30501 } }, Database: { acquireCount: { r: 25, w: 66, R: 19, W: 11 }, acquireWaitCount: { r: 6, w: 15, R: 19, W: 7 }, timeAcquiringMicros: { r: 39921, w: 102169, R: 54005, W: 113876 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 740ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.104-0400 m31100| 2015-07-09T13:56:42.103-0400 I COMMAND [conn60] CMD: drop db17.tmp.mrs.coll17_1436464601_35 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.109-0400 m31100| 2015-07-09T13:56:42.109-0400 I COMMAND [conn60] CMD: drop db17.tmp.mr.coll17_111 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.109-0400 m31100| 2015-07-09T13:56:42.109-0400 I COMMAND [conn60] CMD: drop db17.tmp.mr.coll17_111 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.120-0400 m31100| 2015-07-09T13:56:42.120-0400 I COMMAND [conn60] CMD: drop db17.tmp.mr.coll17_111 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.122-0400 m31100| 2015-07-09T13:56:42.121-0400 I COMMAND [conn60] command db17.tmp.mrs.coll17_1436464601_35 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.122-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.122-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.122-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.124-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464601_35", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:12 reslen:212 locks:{ Global: { acquireCount: { r: 175, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 19751 } }, Database: { acquireCount: { r: 25, w: 66, R: 24, W: 11 }, acquireWaitCount: { w: 21, R: 5, W: 6 }, timeAcquiringMicros: { w: 161980, R: 157614, W: 11754 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 832ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.130-0400 m31200| 2015-07-09T13:56:42.124-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_92 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.131-0400 m31100| 2015-07-09T13:56:42.130-0400 I COMMAND [conn48] CMD: drop db17.tmp.mrs.coll17_1436464601_32 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.140-0400 m31100| 2015-07-09T13:56:42.139-0400 I COMMAND [conn48] CMD: drop db17.tmp.mr.coll17_114 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.141-0400 m31100| 2015-07-09T13:56:42.139-0400 I COMMAND [conn48] CMD: drop db17.tmp.mr.coll17_114 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.150-0400 m31100| 2015-07-09T13:56:42.150-0400 I COMMAND [conn48] CMD: drop db17.tmp.mr.coll17_114 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.156-0400 m31100| 2015-07-09T13:56:42.154-0400 I COMMAND [conn48] command db17.tmp.mrs.coll17_1436464601_32 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.156-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.156-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.157-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.157-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464601_32", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:11 reslen:212 locks:{ Global: { acquireCount: { r: 173, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 2, W: 1 }, timeAcquiringMicros: { r: 1858, w: 22576, W: 3755 } }, Database: { acquireCount: { r: 25, w: 66, R: 23, W: 11 }, acquireWaitCount: { r: 4, w: 24, R: 4, W: 8 }, timeAcquiringMicros: { r: 14795, w: 188586, R: 57774, W: 54521 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 851ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.158-0400 m31200| 2015-07-09T13:56:42.155-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_93 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.173-0400 m31100| 2015-07-09T13:56:42.172-0400 I COMMAND [conn33] CMD: drop db17.tmp.mrs.coll17_1436464601_31 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.183-0400 m31100| 2015-07-09T13:56:42.182-0400 I COMMAND [conn33] CMD: drop db17.tmp.mr.coll17_112 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.183-0400 m31100| 2015-07-09T13:56:42.182-0400 I COMMAND [conn33] CMD: drop db17.tmp.mr.coll17_112 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.184-0400 m31100| 2015-07-09T13:56:42.184-0400 I COMMAND [conn33] CMD: drop db17.tmp.mr.coll17_112 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.185-0400 m31100| 2015-07-09T13:56:42.184-0400 I COMMAND [conn33] command db17.tmp.mrs.coll17_1436464601_31 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.185-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.185-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.186-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.189-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464601_31", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:14 reslen:212 locks:{ Global: { acquireCount: { r: 179, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 1, W: 1 }, timeAcquiringMicros: { r: 18162, w: 14394, W: 4003 } }, Database: { acquireCount: { r: 25, w: 66, R: 26, W: 11 }, acquireWaitCount: { r: 5, w: 19, R: 7, W: 7 }, timeAcquiringMicros: { r: 24025, w: 117308, R: 77328, W: 67390 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 889ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.190-0400 m31200| 2015-07-09T13:56:42.186-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_94 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.190-0400 m31100| 2015-07-09T13:56:42.188-0400 I COMMAND [conn56] CMD: drop db17.tmp.mrs.coll17_1436464601_36 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.193-0400 m31100| 2015-07-09T13:56:42.192-0400 I COMMAND [conn56] CMD: drop db17.tmp.mr.coll17_113 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.193-0400 m31100| 2015-07-09T13:56:42.193-0400 I COMMAND [conn56] CMD: drop db17.tmp.mr.coll17_113 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.194-0400 m31100| 2015-07-09T13:56:42.194-0400 I COMMAND [conn56] CMD: drop db17.tmp.mr.coll17_113 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.195-0400 m31100| 2015-07-09T13:56:42.194-0400 I COMMAND [conn56] command db17.tmp.mrs.coll17_1436464601_36 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.195-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.195-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.196-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.196-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464601_36", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:16 reslen:212 locks:{ Global: { acquireCount: { r: 183, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 2, W: 1 }, timeAcquiringMicros: { r: 22697, w: 15892, W: 178 } }, Database: { acquireCount: { r: 25, w: 66, R: 28, W: 11 }, acquireWaitCount: { r: 7, w: 20, R: 7, W: 9 }, timeAcquiringMicros: { r: 24274, w: 164740, R: 13872, W: 82487 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 899ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.197-0400 m31200| 2015-07-09T13:56:42.196-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_95 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.207-0400 m31100| 2015-07-09T13:56:42.207-0400 I COMMAND [conn72] CMD: drop db17.tmp.mrs.coll17_1436464601_33 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.217-0400 m31100| 2015-07-09T13:56:42.216-0400 I COMMAND [conn72] CMD: drop db17.tmp.mr.coll17_115 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.217-0400 m31100| 2015-07-09T13:56:42.217-0400 I COMMAND [conn72] CMD: drop db17.tmp.mr.coll17_115 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.220-0400 m31100| 2015-07-09T13:56:42.220-0400 I COMMAND [conn72] CMD: drop db17.tmp.mr.coll17_115 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.221-0400 m31100| 2015-07-09T13:56:42.221-0400 I COMMAND [conn72] command db17.tmp.mrs.coll17_1436464601_33 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.221-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.222-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.222-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.222-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464601_33", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 3, w: 2 }, timeAcquiringMicros: { r: 13844, w: 16771 } }, Database: { acquireCount: { r: 25, w: 66, R: 12, W: 11 }, acquireWaitCount: { w: 8, R: 11, W: 6 }, timeAcquiringMicros: { w: 12081, R: 29288, W: 6908 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 266ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.254-0400 m31200| 2015-07-09T13:56:42.253-0400 I COMMAND [conn38] CMD: drop db17.tmp.mrs.coll17_1436464601_33 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.262-0400 m31200| 2015-07-09T13:56:42.262-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_91 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.263-0400 m31200| 2015-07-09T13:56:42.262-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_91 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.264-0400 m31200| 2015-07-09T13:56:42.263-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_91 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.267-0400 m31200| 2015-07-09T13:56:42.266-0400 I COMMAND [conn38] command db17.tmp.mrs.coll17_1436464601_33 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.267-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.267-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.267-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.268-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464601_33", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 149, w: 74, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 2522, W: 1048 } }, Database: { acquireCount: { r: 25, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 5, w: 3, R: 7, W: 7 }, timeAcquiringMicros: { r: 81089, w: 28718, R: 3806, W: 7039 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 313ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.274-0400 m31200| 2015-07-09T13:56:42.273-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_96 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.330-0400 m31200| 2015-07-09T13:56:42.329-0400 I COMMAND [conn39] CMD: drop db17.map_reduce_replace1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.337-0400 m31200| 2015-07-09T13:56:42.336-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_92 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.337-0400 m31200| 2015-07-09T13:56:42.337-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_92 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.338-0400 m31200| 2015-07-09T13:56:42.337-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_92 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.340-0400 m31202| 2015-07-09T13:56:42.339-0400 I COMMAND [repl writer worker 13] CMD: drop db17.map_reduce_replace1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.340-0400 m31201| 2015-07-09T13:56:42.339-0400 I COMMAND [repl writer worker 7] CMD: drop db17.map_reduce_replace1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.342-0400 m31200| 2015-07-09T13:56:42.342-0400 I COMMAND [conn39] command db17.map_reduce_replace1 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.342-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.342-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.342-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.343-0400 m31200| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.343-0400 m31200| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.344-0400 m31200| }, out: { replace: "map_reduce_replace1" }, query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 } }, inputDB: "db17", shardedOutputCollection: "tmp.mrs.coll17_1436464601_35", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll17_1436464601_35", timeMillis: 820, counts: { input: 1019, emit: 1019, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464602000|35, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll17_1436464601_35", timeMillis: 619, counts: { input: 981, emit: 981, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464601000|179, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1019, emit: 1019, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 981, emit: 981, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:223 locks:{ Global: { acquireCount: { r: 55, w: 50, W: 3 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 8423, W: 231 } }, Database: { acquireCount: { r: 1, w: 45, W: 6 }, acquireWaitCount: { w: 7, W: 2 }, timeAcquiringMicros: { w: 92784, W: 708 } }, Collection: { acquireCount: { r: 1, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 218ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.344-0400 m31100| 2015-07-09T13:56:42.343-0400 I COMMAND [conn38] CMD: drop db17.tmp.mrs.coll17_1436464601_35 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.346-0400 m31200| 2015-07-09T13:56:42.346-0400 I COMMAND [conn63] CMD: drop db17.tmp.mrs.coll17_1436464601_35 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.347-0400 m31102| 2015-07-09T13:56:42.346-0400 I COMMAND [repl writer worker 1] CMD: drop db17.tmp.mrs.coll17_1436464601_35 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.347-0400 m31101| 2015-07-09T13:56:42.347-0400 I COMMAND [repl writer worker 7] CMD: drop db17.tmp.mrs.coll17_1436464601_35 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.349-0400 m31200| 2015-07-09T13:56:42.349-0400 I COMMAND [conn29] CMD: drop db17.map_reduce_replace0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.357-0400 m31200| 2015-07-09T13:56:42.357-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_94 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.358-0400 m31200| 2015-07-09T13:56:42.357-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_94 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.358-0400 m31200| 2015-07-09T13:56:42.357-0400 I COMMAND [conn41] CMD: drop db17.map_reduce_replace3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.363-0400 m31200| 2015-07-09T13:56:42.363-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_95 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.364-0400 m31200| 2015-07-09T13:56:42.363-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_95 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.365-0400 m31200| 2015-07-09T13:56:42.363-0400 I COMMAND [conn37] CMD: drop db17.map_reduce_replace2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.370-0400 m31200| 2015-07-09T13:56:42.370-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_93 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.370-0400 m31200| 2015-07-09T13:56:42.370-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_93 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.371-0400 m31200| 2015-07-09T13:56:42.370-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_95 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.371-0400 m31200| 2015-07-09T13:56:42.371-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_94 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.371-0400 m31200| 2015-07-09T13:56:42.371-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_93 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.372-0400 m31200| 2015-07-09T13:56:42.371-0400 I COMMAND [conn41] command db17.map_reduce_replace3 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.374-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.374-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.374-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.374-0400 m31200| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.374-0400 m31200| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.376-0400 m31200| }, out: { replace: "map_reduce_replace3" }, query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 } }, inputDB: "db17", shardedOutputCollection: "tmp.mrs.coll17_1436464601_36", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll17_1436464601_36", timeMillis: 897, counts: { input: 1019, emit: 1019, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464602000|85, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll17_1436464601_36", timeMillis: 651, counts: { input: 981, emit: 981, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464601000|207, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1019, emit: 1019, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 981, emit: 981, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:223 locks:{ Global: { acquireCount: { r: 55, w: 50, W: 3 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 16088, W: 10002 } }, Database: { acquireCount: { r: 1, w: 45, W: 6 }, acquireWaitCount: { w: 3, W: 4 }, timeAcquiringMicros: { w: 18441, W: 14013 } }, Collection: { acquireCount: { r: 1, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 175ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.376-0400 m31100| 2015-07-09T13:56:42.371-0400 I COMMAND [conn38] CMD: drop db17.tmp.mrs.coll17_1436464601_36 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.377-0400 m31200| 2015-07-09T13:56:42.371-0400 I COMMAND [conn29] command db17.map_reduce_replace0 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.377-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.378-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.378-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.378-0400 m31200| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.378-0400 m31200| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.380-0400 m31200| }, out: { replace: "map_reduce_replace0" }, query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 } }, inputDB: "db17", shardedOutputCollection: "tmp.mrs.coll17_1436464601_31", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll17_1436464601_31", timeMillis: 887, counts: { input: 1019, emit: 1019, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464602000|80, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll17_1436464601_31", timeMillis: 643, counts: { input: 981, emit: 981, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464601000|206, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1019, emit: 1019, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 981, emit: 981, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:223 locks:{ Global: { acquireCount: { r: 55, w: 50, W: 3 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 21209, W: 1366 } }, Database: { acquireCount: { r: 1, w: 45, W: 6 }, acquireWaitCount: { w: 4, W: 4 }, timeAcquiringMicros: { w: 42041, W: 3304 } }, Collection: { acquireCount: { r: 1, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 185ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.381-0400 m31100| 2015-07-09T13:56:42.372-0400 I COMMAND [conn39] CMD: drop db17.tmp.mrs.coll17_1436464601_31 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.381-0400 m31200| 2015-07-09T13:56:42.371-0400 I COMMAND [conn37] command db17.map_reduce_replace2 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.381-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.381-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.381-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.382-0400 m31200| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.382-0400 m31200| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.384-0400 m31200| }, out: { replace: "map_reduce_replace2" }, query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 } }, inputDB: "db17", shardedOutputCollection: "tmp.mrs.coll17_1436464601_32", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll17_1436464601_32", timeMillis: 836, counts: { input: 1019, emit: 1019, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464602000|61, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll17_1436464601_32", timeMillis: 727, counts: { input: 981, emit: 981, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464602000|16, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1019, emit: 1019, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 981, emit: 981, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:223 locks:{ Global: { acquireCount: { r: 55, w: 50, W: 3 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 14364, W: 17158 } }, Database: { acquireCount: { r: 1, w: 45, W: 6 }, acquireWaitCount: { w: 3, W: 4 }, timeAcquiringMicros: { w: 48464, W: 7676 } }, Collection: { acquireCount: { r: 1, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 216ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.385-0400 m31100| 2015-07-09T13:56:42.373-0400 I COMMAND [conn36] CMD: drop db17.tmp.mrs.coll17_1436464601_32 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.386-0400 m31202| 2015-07-09T13:56:42.374-0400 I COMMAND [repl writer worker 2] CMD: drop db17.tmp.mrs.coll17_1436464601_35 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.387-0400 m31200| 2015-07-09T13:56:42.374-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_97 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.387-0400 m31201| 2015-07-09T13:56:42.376-0400 I COMMAND [repl writer worker 6] CMD: drop db17.tmp.mrs.coll17_1436464601_35 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.388-0400 m31202| 2015-07-09T13:56:42.377-0400 I COMMAND [repl writer worker 1] CMD: drop db17.map_reduce_replace0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.388-0400 m31200| 2015-07-09T13:56:42.377-0400 I COMMAND [conn63] CMD: drop db17.tmp.mrs.coll17_1436464601_36 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.388-0400 m31201| 2015-07-09T13:56:42.381-0400 I COMMAND [repl writer worker 8] CMD: drop db17.map_reduce_replace0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.388-0400 m31102| 2015-07-09T13:56:42.381-0400 I COMMAND [repl writer worker 13] CMD: drop db17.tmp.mrs.coll17_1436464601_36 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.389-0400 m31200| 2015-07-09T13:56:42.384-0400 I COMMAND [conn62] CMD: drop db17.tmp.mrs.coll17_1436464601_31 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.389-0400 m31102| 2015-07-09T13:56:42.386-0400 I COMMAND [repl writer worker 14] CMD: drop db17.tmp.mrs.coll17_1436464601_31 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.389-0400 m31202| 2015-07-09T13:56:42.387-0400 I COMMAND [repl writer worker 6] CMD: drop db17.map_reduce_replace3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.391-0400 m31201| 2015-07-09T13:56:42.390-0400 I COMMAND [repl writer worker 5] CMD: drop db17.map_reduce_replace3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.395-0400 m31200| 2015-07-09T13:56:42.395-0400 I COMMAND [conn62] CMD: drop db17.tmp.mrs.coll17_1436464601_32 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.396-0400 m31100| 2015-07-09T13:56:42.395-0400 I COMMAND [conn60] CMD: drop db17.tmp.mr.coll17_116 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.396-0400 m31100| 2015-07-09T13:56:42.396-0400 I COMMAND [conn56] CMD: drop db17.tmp.mr.coll17_117 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.400-0400 m31201| 2015-07-09T13:56:42.400-0400 I COMMAND [repl writer worker 15] CMD: drop db17.map_reduce_replace2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.404-0400 m31202| 2015-07-09T13:56:42.404-0400 I COMMAND [repl writer worker 3] CMD: drop db17.map_reduce_replace2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.414-0400 m31201| 2015-07-09T13:56:42.414-0400 I COMMAND [repl writer worker 4] CMD: drop db17.tmp.mrs.coll17_1436464601_36 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.420-0400 m31200| 2015-07-09T13:56:42.420-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_98 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.420-0400 m31201| 2015-07-09T13:56:42.420-0400 I COMMAND [repl writer worker 10] CMD: drop db17.tmp.mrs.coll17_1436464601_31 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.424-0400 m31202| 2015-07-09T13:56:42.424-0400 I COMMAND [repl writer worker 4] CMD: drop db17.tmp.mrs.coll17_1436464601_36 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.428-0400 m31202| 2015-07-09T13:56:42.428-0400 I COMMAND [repl writer worker 5] CMD: drop db17.tmp.mrs.coll17_1436464601_31 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.430-0400 m31100| 2015-07-09T13:56:42.430-0400 I COMMAND [conn33] CMD: drop db17.tmp.mr.coll17_118 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.431-0400 m31200| 2015-07-09T13:56:42.430-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_99 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.436-0400 m31201| 2015-07-09T13:56:42.436-0400 I COMMAND [repl writer worker 1] CMD: drop db17.tmp.mrs.coll17_1436464601_32 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.438-0400 m31200| 2015-07-09T13:56:42.438-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_100 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.442-0400 m31101| 2015-07-09T13:56:42.441-0400 I COMMAND [repl writer worker 11] CMD: drop db17.tmp.mrs.coll17_1436464601_36 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.445-0400 m31202| 2015-07-09T13:56:42.445-0400 I COMMAND [repl writer worker 9] CMD: drop db17.tmp.mrs.coll17_1436464601_32 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.446-0400 m31101| 2015-07-09T13:56:42.446-0400 I COMMAND [repl writer worker 0] CMD: drop db17.tmp.mrs.coll17_1436464601_31 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.447-0400 m31102| 2015-07-09T13:56:42.446-0400 I COMMAND [repl writer worker 12] CMD: drop db17.tmp.mrs.coll17_1436464601_32 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.456-0400 m31101| 2015-07-09T13:56:42.456-0400 I COMMAND [repl writer worker 14] CMD: drop db17.tmp.mrs.coll17_1436464601_32 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.471-0400 m31100| 2015-07-09T13:56:42.471-0400 I COMMAND [conn48] CMD: drop db17.tmp.mr.coll17_119 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.596-0400 m31200| 2015-07-09T13:56:42.595-0400 I COMMAND [conn38] CMD: drop db17.map_reduce_replace4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.612-0400 m31200| 2015-07-09T13:56:42.612-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_96 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.613-0400 m31200| 2015-07-09T13:56:42.613-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_96 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.614-0400 m31202| 2015-07-09T13:56:42.614-0400 I COMMAND [repl writer worker 4] CMD: drop db17.map_reduce_replace4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.623-0400 m31200| 2015-07-09T13:56:42.623-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_96 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.635-0400 m31201| 2015-07-09T13:56:42.635-0400 I COMMAND [repl writer worker 4] CMD: drop db17.map_reduce_replace4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.640-0400 m31200| 2015-07-09T13:56:42.639-0400 I COMMAND [conn38] command db17.map_reduce_replace4 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.640-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.640-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.640-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.641-0400 m31200| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.641-0400 m31200| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.642-0400 m31200| }, out: { replace: "map_reduce_replace4" }, query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 } }, inputDB: "db17", shardedOutputCollection: "tmp.mrs.coll17_1436464601_33", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll17_1436464601_33", timeMillis: 262, counts: { input: 1019, emit: 1019, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464602000|106, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll17_1436464601_33", timeMillis: 309, counts: { input: 981, emit: 981, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464602000|61, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1019, emit: 1019, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 981, emit: 981, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:223 locks:{ Global: { acquireCount: { r: 55, w: 50, W: 3 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 23235, W: 77379 } }, Database: { acquireCount: { r: 1, w: 45, W: 6 }, acquireWaitCount: { w: 10, W: 3 }, timeAcquiringMicros: { w: 123273, W: 25443 } }, Collection: { acquireCount: { r: 1, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 371ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.643-0400 m31100| 2015-07-09T13:56:42.642-0400 I COMMAND [conn36] CMD: drop db17.tmp.mrs.coll17_1436464601_33 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.647-0400 m31200| 2015-07-09T13:56:42.647-0400 I COMMAND [conn62] CMD: drop db17.tmp.mrs.coll17_1436464601_33 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.649-0400 m31101| 2015-07-09T13:56:42.649-0400 I COMMAND [repl writer worker 8] CMD: drop db17.tmp.mrs.coll17_1436464601_33 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.649-0400 m31102| 2015-07-09T13:56:42.649-0400 I COMMAND [repl writer worker 10] CMD: drop db17.tmp.mrs.coll17_1436464601_33 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.656-0400 m31201| 2015-07-09T13:56:42.656-0400 I COMMAND [repl writer worker 13] CMD: drop db17.tmp.mrs.coll17_1436464601_33 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.656-0400 m31202| 2015-07-09T13:56:42.656-0400 I COMMAND [repl writer worker 0] CMD: drop db17.tmp.mrs.coll17_1436464601_33 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.658-0400 m31100| 2015-07-09T13:56:42.658-0400 I COMMAND [conn72] CMD: drop db17.tmp.mr.coll17_120 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.659-0400 m31200| 2015-07-09T13:56:42.659-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_101 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.953-0400 m31200| 2015-07-09T13:56:42.953-0400 I COMMAND [conn39] CMD: drop db17.tmp.mrs.coll17_1436464602_37 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.958-0400 m31200| 2015-07-09T13:56:42.958-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_97 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.959-0400 m31200| 2015-07-09T13:56:42.959-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_97 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.960-0400 m31200| 2015-07-09T13:56:42.960-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_97 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.970-0400 m31200| 2015-07-09T13:56:42.969-0400 I COMMAND [conn39] command db17.tmp.mrs.coll17_1436464602_37 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.970-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.970-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.970-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.971-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464602_37", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 3, W: 1 }, timeAcquiringMicros: { r: 55080, W: 68 } }, Database: { acquireCount: { r: 25, w: 66, R: 12, W: 11 }, acquireWaitCount: { w: 17, R: 11, W: 3 }, timeAcquiringMicros: { w: 239801, R: 76076, W: 3346 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 597ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.973-0400 m31100| 2015-07-09T13:56:42.973-0400 I COMMAND [conn60] CMD: drop db17.tmp.mrs.coll17_1436464602_37 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.980-0400 m31100| 2015-07-09T13:56:42.980-0400 I COMMAND [conn60] CMD: drop db17.tmp.mr.coll17_116 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.981-0400 m31100| 2015-07-09T13:56:42.980-0400 I COMMAND [conn60] CMD: drop db17.tmp.mr.coll17_116 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.982-0400 m31100| 2015-07-09T13:56:42.982-0400 I COMMAND [conn60] CMD: drop db17.tmp.mr.coll17_116 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.996-0400 m31100| 2015-07-09T13:56:42.995-0400 I COMMAND [conn60] command db17.tmp.mrs.coll17_1436464602_37 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.996-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.996-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.996-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:42.997-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464602_37", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:212 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 445 } }, Database: { acquireCount: { r: 25, w: 66, R: 14, W: 11 }, acquireWaitCount: { r: 1, w: 19, R: 11, W: 6 }, timeAcquiringMicros: { r: 10007, w: 237108, R: 123727, W: 18167 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 623ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.004-0400 m31200| 2015-07-09T13:56:43.004-0400 I COMMAND [conn37] CMD: drop db17.tmp.mrs.coll17_1436464602_35 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.011-0400 m31200| 2015-07-09T13:56:43.010-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_100 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.012-0400 m31200| 2015-07-09T13:56:43.011-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_100 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.013-0400 m31100| 2015-07-09T13:56:43.013-0400 I COMMAND [conn56] CMD: drop db17.tmp.mrs.coll17_1436464602_38 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.018-0400 m31200| 2015-07-09T13:56:43.017-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_100 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.020-0400 m31200| 2015-07-09T13:56:43.019-0400 I COMMAND [conn37] command db17.tmp.mrs.coll17_1436464602_35 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.020-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.020-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.020-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.020-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464602_35", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:3 reslen:212 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 2, W: 1 }, timeAcquiringMicros: { r: 30528, w: 15737, W: 744 } }, Database: { acquireCount: { r: 25, w: 66, R: 14, W: 11 }, acquireWaitCount: { r: 1, w: 14, R: 13, W: 8 }, timeAcquiringMicros: { r: 8362, w: 97718, R: 60312, W: 58757 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 590ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.022-0400 m31200| 2015-07-09T13:56:43.022-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_102 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.022-0400 m31100| 2015-07-09T13:56:43.022-0400 I COMMAND [conn56] CMD: drop db17.tmp.mr.coll17_117 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.023-0400 m31100| 2015-07-09T13:56:43.022-0400 I COMMAND [conn56] CMD: drop db17.tmp.mr.coll17_117 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.024-0400 m31100| 2015-07-09T13:56:43.023-0400 I COMMAND [conn56] CMD: drop db17.tmp.mr.coll17_117 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.030-0400 m31100| 2015-07-09T13:56:43.030-0400 I COMMAND [conn56] command db17.tmp.mrs.coll17_1436464602_38 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.030-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.031-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.031-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.031-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464602_38", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:7 reslen:212 locks:{ Global: { acquireCount: { r: 165, w: 74, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 7553, W: 715 } }, Database: { acquireCount: { r: 25, w: 66, R: 19, W: 11 }, acquireWaitCount: { r: 4, w: 14, R: 17, W: 7 }, timeAcquiringMicros: { r: 7392, w: 85408, R: 126987, W: 68475 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 641ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.033-0400 m31100| 2015-07-09T13:56:43.032-0400 I COMMAND [conn48] CMD: drop db17.tmp.mrs.coll17_1436464602_35 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.038-0400 m31100| 2015-07-09T13:56:43.037-0400 I COMMAND [conn48] CMD: drop db17.tmp.mr.coll17_119 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.038-0400 m31100| 2015-07-09T13:56:43.037-0400 I COMMAND [conn48] CMD: drop db17.tmp.mr.coll17_119 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.040-0400 m31100| 2015-07-09T13:56:43.039-0400 I COMMAND [conn48] CMD: drop db17.tmp.mr.coll17_119 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.042-0400 m31100| 2015-07-09T13:56:43.040-0400 I COMMAND [conn48] command db17.tmp.mrs.coll17_1436464602_35 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.042-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.043-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.043-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.044-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464602_35", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:6 reslen:212 locks:{ Global: { acquireCount: { r: 163, w: 74, W: 3 }, acquireWaitCount: { r: 2, W: 1 }, timeAcquiringMicros: { r: 17408, W: 968 } }, Database: { acquireCount: { r: 25, w: 66, R: 18, W: 11 }, acquireWaitCount: { r: 6, w: 11, R: 16, W: 7 }, timeAcquiringMicros: { r: 5101, w: 67916, R: 114264, W: 70030 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 612ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.044-0400 m31200| 2015-07-09T13:56:43.042-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_103 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.046-0400 m31100| 2015-07-09T13:56:43.046-0400 I COMMAND [conn33] CMD: drop db17.tmp.mrs.coll17_1436464602_34 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.058-0400 m31100| 2015-07-09T13:56:43.057-0400 I COMMAND [conn33] CMD: drop db17.tmp.mr.coll17_118 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.058-0400 m31100| 2015-07-09T13:56:43.057-0400 I COMMAND [conn33] CMD: drop db17.tmp.mr.coll17_118 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.058-0400 m31100| 2015-07-09T13:56:43.058-0400 I COMMAND [conn33] CMD: drop db17.tmp.mr.coll17_118 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.059-0400 m31100| 2015-07-09T13:56:43.059-0400 I COMMAND [conn33] command db17.tmp.mrs.coll17_1436464602_34 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.059-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.059-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.059-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.060-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464602_34", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:6 reslen:212 locks:{ Global: { acquireCount: { r: 163, w: 74, W: 3 }, acquireWaitCount: { r: 3, W: 1 }, timeAcquiringMicros: { r: 24781, W: 283 } }, Database: { acquireCount: { r: 25, w: 66, R: 18, W: 11 }, acquireWaitCount: { r: 7, w: 13, R: 16, W: 9 }, timeAcquiringMicros: { r: 17843, w: 102725, R: 85315, W: 63037 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 646ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.062-0400 m31200| 2015-07-09T13:56:43.061-0400 I COMMAND [conn29] CMD: drop db17.tmp.mrs.coll17_1436464602_34 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.064-0400 m31100| 2015-07-09T13:56:43.064-0400 I COMMAND [conn72] CMD: drop db17.tmp.mrs.coll17_1436464602_36 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.067-0400 m31200| 2015-07-09T13:56:43.067-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_99 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.067-0400 m31200| 2015-07-09T13:56:43.067-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_99 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.068-0400 m31200| 2015-07-09T13:56:43.068-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_99 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.073-0400 m31200| 2015-07-09T13:56:43.072-0400 I COMMAND [conn29] command db17.tmp.mrs.coll17_1436464602_34 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.073-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.073-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.074-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.075-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464602_34", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:6 reslen:212 locks:{ Global: { acquireCount: { r: 161, w: 74, W: 3 }, acquireWaitCount: { r: 3, w: 1, W: 1 }, timeAcquiringMicros: { r: 34295, w: 8066, W: 15176 } }, Database: { acquireCount: { r: 25, w: 66, R: 17, W: 11 }, acquireWaitCount: { r: 5, w: 13, R: 15, W: 7 }, timeAcquiringMicros: { r: 10044, w: 87975, R: 114964, W: 76845 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 660ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.075-0400 m31200| 2015-07-09T13:56:43.074-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_104 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.076-0400 m31100| 2015-07-09T13:56:43.076-0400 I COMMAND [conn72] CMD: drop db17.tmp.mr.coll17_120 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.076-0400 m31100| 2015-07-09T13:56:43.076-0400 I COMMAND [conn72] CMD: drop db17.tmp.mr.coll17_120 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.080-0400 m31100| 2015-07-09T13:56:43.079-0400 I COMMAND [conn72] CMD: drop db17.tmp.mr.coll17_120 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.080-0400 m31100| 2015-07-09T13:56:43.079-0400 I COMMAND [conn72] command db17.tmp.mrs.coll17_1436464602_36 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.081-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.081-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.081-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.082-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464602_36", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:3 reslen:212 locks:{ Global: { acquireCount: { r: 157, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 2 }, timeAcquiringMicros: { r: 15659, w: 16672 } }, Database: { acquireCount: { r: 25, w: 66, R: 15, W: 11 }, acquireWaitCount: { r: 2, w: 12, R: 14, W: 5 }, timeAcquiringMicros: { r: 304, w: 44724, R: 16336, W: 54870 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 424ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.103-0400 m31200| 2015-07-09T13:56:43.103-0400 I COMMAND [conn41] CMD: drop db17.tmp.mrs.coll17_1436464602_38 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.109-0400 m31200| 2015-07-09T13:56:43.109-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_98 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.110-0400 m31200| 2015-07-09T13:56:43.109-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_98 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.110-0400 m31200| 2015-07-09T13:56:43.110-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_98 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.111-0400 m31200| 2015-07-09T13:56:43.111-0400 I COMMAND [conn41] command db17.tmp.mrs.coll17_1436464602_38 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.112-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.112-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.112-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.113-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464602_38", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:5 reslen:212 locks:{ Global: { acquireCount: { r: 159, w: 74, W: 3 }, acquireWaitCount: { r: 5, w: 1 }, timeAcquiringMicros: { r: 58802, w: 6759 } }, Database: { acquireCount: { r: 25, w: 66, R: 16, W: 11 }, acquireWaitCount: { r: 10, w: 20, R: 16, W: 9 }, timeAcquiringMicros: { r: 77798, w: 83114, R: 105834, W: 48391 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 722ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.115-0400 m31200| 2015-07-09T13:56:43.113-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_105 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.147-0400 m31200| 2015-07-09T13:56:43.147-0400 I COMMAND [conn38] CMD: drop db17.tmp.mrs.coll17_1436464602_36 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.153-0400 m31200| 2015-07-09T13:56:43.153-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_101 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.153-0400 m31200| 2015-07-09T13:56:43.153-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_101 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.154-0400 m31200| 2015-07-09T13:56:43.154-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_101 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.166-0400 m31200| 2015-07-09T13:56:43.165-0400 I COMMAND [conn38] command db17.tmp.mrs.coll17_1436464602_36 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.166-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.166-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.166-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.167-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464602_36", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { w: 3 }, timeAcquiringMicros: { w: 28716 } }, Database: { acquireCount: { r: 25, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 4, w: 17, R: 11, W: 7 }, timeAcquiringMicros: { r: 3168, w: 127499, R: 24448, W: 67641 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 510ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.186-0400 m31200| 2015-07-09T13:56:43.186-0400 I COMMAND [conn39] CMD: drop db17.map_reduce_replace1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.192-0400 m31200| 2015-07-09T13:56:43.191-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_102 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.192-0400 m31200| 2015-07-09T13:56:43.192-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_102 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.192-0400 m31200| 2015-07-09T13:56:43.192-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_102 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.193-0400 m31200| 2015-07-09T13:56:43.192-0400 I COMMAND [conn39] command db17.map_reduce_replace1 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.193-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.194-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.194-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.194-0400 m31200| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.195-0400 m31200| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.197-0400 m31200| }, out: { replace: "map_reduce_replace1" }, query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 } }, inputDB: "db17", shardedOutputCollection: "tmp.mrs.coll17_1436464602_37", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll17_1436464602_37", timeMillis: 608, counts: { input: 1019, emit: 1019, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464602000|153, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll17_1436464602_37", timeMillis: 587, counts: { input: 981, emit: 981, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464602000|183, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1019, emit: 1019, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 981, emit: 981, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:223 locks:{ Global: { acquireCount: { r: 57, w: 50, W: 3 }, acquireWaitCount: { w: 3, W: 1 }, timeAcquiringMicros: { w: 29700, W: 2291 } }, Database: { acquireCount: { r: 2, w: 45, W: 6 }, acquireWaitCount: { w: 10, W: 2 }, timeAcquiringMicros: { w: 54530, W: 529 } }, Collection: { acquireCount: { r: 2, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 195ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.197-0400 m31100| 2015-07-09T13:56:43.193-0400 I COMMAND [conn38] CMD: drop db17.tmp.mrs.coll17_1436464602_37 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.197-0400 m31200| 2015-07-09T13:56:43.196-0400 I COMMAND [conn63] CMD: drop db17.tmp.mrs.coll17_1436464602_37 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.197-0400 m31101| 2015-07-09T13:56:43.196-0400 I COMMAND [repl writer worker 6] CMD: drop db17.tmp.mrs.coll17_1436464602_37 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.197-0400 m31102| 2015-07-09T13:56:43.197-0400 I COMMAND [repl writer worker 0] CMD: drop db17.tmp.mrs.coll17_1436464602_37 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.198-0400 m31200| 2015-07-09T13:56:43.197-0400 I COMMAND [conn37] CMD: drop db17.map_reduce_replace2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.198-0400 m31201| 2015-07-09T13:56:43.198-0400 I COMMAND [repl writer worker 5] CMD: drop db17.map_reduce_replace1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.205-0400 m31200| 2015-07-09T13:56:43.204-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_103 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.205-0400 m31200| 2015-07-09T13:56:43.205-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_103 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.206-0400 m31202| 2015-07-09T13:56:43.206-0400 I COMMAND [repl writer worker 15] CMD: drop db17.map_reduce_replace1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.210-0400 m31201| 2015-07-09T13:56:43.210-0400 I COMMAND [repl writer worker 15] CMD: drop db17.map_reduce_replace2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.211-0400 m31200| 2015-07-09T13:56:43.211-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_103 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.213-0400 m31100| 2015-07-09T13:56:43.213-0400 I COMMAND [conn60] CMD: drop db17.tmp.mr.coll17_121 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.214-0400 m31200| 2015-07-09T13:56:43.214-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_107 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.216-0400 m31201| 2015-07-09T13:56:43.216-0400 I COMMAND [repl writer worker 12] CMD: drop db17.tmp.mrs.coll17_1436464602_37 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.220-0400 m31200| 2015-07-09T13:56:43.220-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_106 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.224-0400 m31202| 2015-07-09T13:56:43.223-0400 I COMMAND [repl writer worker 1] CMD: drop db17.map_reduce_replace2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.227-0400 m31200| 2015-07-09T13:56:43.226-0400 I COMMAND [conn37] command db17.map_reduce_replace2 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.228-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.228-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.228-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.228-0400 m31200| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.229-0400 m31200| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.230-0400 m31200| }, out: { replace: "map_reduce_replace2" }, query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 } }, inputDB: "db17", shardedOutputCollection: "tmp.mrs.coll17_1436464602_35", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll17_1436464602_35", timeMillis: 609, counts: { input: 1019, emit: 1019, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464603000|28, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll17_1436464602_35", timeMillis: 582, counts: { input: 981, emit: 981, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464603000|10, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1019, emit: 1019, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 981, emit: 981, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:223 locks:{ Global: { acquireCount: { r: 55, w: 50, W: 3 }, acquireWaitCount: { w: 3, W: 1 }, timeAcquiringMicros: { w: 15419, W: 4540 } }, Database: { acquireCount: { r: 1, w: 45, W: 6 }, acquireWaitCount: { w: 7, W: 4 }, timeAcquiringMicros: { w: 52411, W: 10749 } }, Collection: { acquireCount: { r: 1, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 184ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.231-0400 m31100| 2015-07-09T13:56:43.227-0400 I COMMAND [conn36] CMD: drop db17.tmp.mrs.coll17_1436464602_35 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.233-0400 m31200| 2015-07-09T13:56:43.232-0400 I COMMAND [conn29] CMD: drop db17.map_reduce_replace0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.236-0400 m31202| 2015-07-09T13:56:43.235-0400 I COMMAND [repl writer worker 2] CMD: drop db17.tmp.mrs.coll17_1436464602_37 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.238-0400 m31200| 2015-07-09T13:56:43.237-0400 I COMMAND [conn62] CMD: drop db17.tmp.mrs.coll17_1436464602_35 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.238-0400 m31102| 2015-07-09T13:56:43.238-0400 I COMMAND [repl writer worker 1] CMD: drop db17.tmp.mrs.coll17_1436464602_35 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.238-0400 m31101| 2015-07-09T13:56:43.238-0400 I COMMAND [repl writer worker 3] CMD: drop db17.tmp.mrs.coll17_1436464602_35 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.245-0400 m31200| 2015-07-09T13:56:43.245-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_104 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.246-0400 m31200| 2015-07-09T13:56:43.245-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_104 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.247-0400 m31201| 2015-07-09T13:56:43.247-0400 I COMMAND [repl writer worker 14] CMD: drop db17.map_reduce_replace0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.247-0400 m31202| 2015-07-09T13:56:43.247-0400 I COMMAND [repl writer worker 8] CMD: drop db17.map_reduce_replace0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.266-0400 m31200| 2015-07-09T13:56:43.264-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_104 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.277-0400 m31100| 2015-07-09T13:56:43.276-0400 I COMMAND [conn48] CMD: drop db17.tmp.mr.coll17_122 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.284-0400 m31202| 2015-07-09T13:56:43.283-0400 I COMMAND [repl writer worker 12] CMD: drop db17.tmp.mrs.coll17_1436464602_35 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.290-0400 m31201| 2015-07-09T13:56:43.289-0400 I COMMAND [repl writer worker 7] CMD: drop db17.tmp.mrs.coll17_1436464602_35 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.294-0400 m31200| 2015-07-09T13:56:43.294-0400 I COMMAND [conn29] command db17.map_reduce_replace0 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.295-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.295-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.295-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.296-0400 m31200| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.296-0400 m31200| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.299-0400 m31200| }, out: { replace: "map_reduce_replace0" }, query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 } }, inputDB: "db17", shardedOutputCollection: "tmp.mrs.coll17_1436464602_34", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll17_1436464602_34", timeMillis: 644, counts: { input: 1019, emit: 1019, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464603000|38, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll17_1436464602_34", timeMillis: 654, counts: { input: 981, emit: 981, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464603000|31, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1019, emit: 1019, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 981, emit: 981, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:223 locks:{ Global: { acquireCount: { r: 55, w: 50, W: 3 }, acquireWaitCount: { w: 4, W: 1 }, timeAcquiringMicros: { w: 21641, W: 166 } }, Database: { acquireCount: { r: 1, w: 45, W: 6 }, acquireWaitCount: { w: 8, W: 4 }, timeAcquiringMicros: { w: 43843, W: 37464 } }, Collection: { acquireCount: { r: 1, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 219ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.299-0400 m31100| 2015-07-09T13:56:43.294-0400 I COMMAND [conn36] CMD: drop db17.tmp.mrs.coll17_1436464602_34 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.299-0400 m31200| 2015-07-09T13:56:43.299-0400 I COMMAND [conn62] CMD: drop db17.tmp.mrs.coll17_1436464602_34 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.301-0400 m31102| 2015-07-09T13:56:43.301-0400 I COMMAND [repl writer worker 6] CMD: drop db17.tmp.mrs.coll17_1436464602_34 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.303-0400 m31101| 2015-07-09T13:56:43.302-0400 I COMMAND [repl writer worker 7] CMD: drop db17.tmp.mrs.coll17_1436464602_34 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.316-0400 m31202| 2015-07-09T13:56:43.316-0400 I COMMAND [repl writer worker 7] CMD: drop db17.tmp.mrs.coll17_1436464602_34 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.322-0400 m31200| 2015-07-09T13:56:43.321-0400 I COMMAND [conn41] CMD: drop db17.map_reduce_replace3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.323-0400 m31201| 2015-07-09T13:56:43.323-0400 I COMMAND [repl writer worker 2] CMD: drop db17.tmp.mrs.coll17_1436464602_34 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.333-0400 m31200| 2015-07-09T13:56:43.332-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_105 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.334-0400 m31200| 2015-07-09T13:56:43.333-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_105 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.335-0400 m31200| 2015-07-09T13:56:43.334-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_108 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.336-0400 m31202| 2015-07-09T13:56:43.335-0400 I COMMAND [repl writer worker 1] CMD: drop db17.map_reduce_replace3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.336-0400 m31201| 2015-07-09T13:56:43.336-0400 I COMMAND [repl writer worker 15] CMD: drop db17.map_reduce_replace3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.339-0400 m31200| 2015-07-09T13:56:43.334-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_105 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.343-0400 m31100| 2015-07-09T13:56:43.343-0400 I COMMAND [conn33] CMD: drop db17.tmp.mr.coll17_123 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.360-0400 m31100| 2015-07-09T13:56:43.359-0400 I COMMAND [conn38] CMD: drop db17.tmp.mrs.coll17_1436464602_38 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.366-0400 m31200| 2015-07-09T13:56:43.359-0400 I COMMAND [conn41] command db17.map_reduce_replace3 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.366-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.366-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.367-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.367-0400 m31200| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.367-0400 m31200| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.370-0400 m31200| }, out: { replace: "map_reduce_replace3" }, query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 } }, inputDB: "db17", shardedOutputCollection: "tmp.mrs.coll17_1436464602_38", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll17_1436464602_38", timeMillis: 633, counts: { input: 1019, emit: 1019, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464603000|18, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll17_1436464602_38", timeMillis: 720, counts: { input: 981, emit: 981, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464603000|51, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1019, emit: 1019, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 981, emit: 981, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:223 locks:{ Global: { acquireCount: { r: 55, w: 50, W: 3 }, acquireWaitCount: { w: 4, W: 1 }, timeAcquiringMicros: { w: 30398, W: 47200 } }, Database: { acquireCount: { r: 1, w: 45, W: 6 }, acquireWaitCount: { w: 6, W: 4 }, timeAcquiringMicros: { w: 43787, W: 12883 } }, Collection: { acquireCount: { r: 1, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 246ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.371-0400 m31200| 2015-07-09T13:56:43.371-0400 I COMMAND [conn63] CMD: drop db17.tmp.mrs.coll17_1436464602_38 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.376-0400 m31101| 2015-07-09T13:56:43.375-0400 I COMMAND [repl writer worker 0] CMD: drop db17.tmp.mrs.coll17_1436464602_38 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.380-0400 m31200| 2015-07-09T13:56:43.379-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_109 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.386-0400 m31202| 2015-07-09T13:56:43.386-0400 I COMMAND [repl writer worker 6] CMD: drop db17.tmp.mrs.coll17_1436464602_38 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.387-0400 m31201| 2015-07-09T13:56:43.387-0400 I COMMAND [repl writer worker 10] CMD: drop db17.tmp.mrs.coll17_1436464602_38 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.394-0400 m31102| 2015-07-09T13:56:43.394-0400 I COMMAND [repl writer worker 11] CMD: drop db17.tmp.mrs.coll17_1436464602_38 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.406-0400 m31100| 2015-07-09T13:56:43.406-0400 I COMMAND [conn56] CMD: drop db17.tmp.mr.coll17_124 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.432-0400 m31200| 2015-07-09T13:56:43.432-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_110 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.605-0400 m31200| 2015-07-09T13:56:43.604-0400 I COMMAND [conn38] CMD: drop db17.map_reduce_replace4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.612-0400 m31200| 2015-07-09T13:56:43.609-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_106 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.614-0400 m31200| 2015-07-09T13:56:43.609-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_106 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.615-0400 m31202| 2015-07-09T13:56:43.611-0400 I COMMAND [repl writer worker 13] CMD: drop db17.map_reduce_replace4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.615-0400 m31201| 2015-07-09T13:56:43.612-0400 I COMMAND [repl writer worker 6] CMD: drop db17.map_reduce_replace4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.634-0400 m31100| 2015-07-09T13:56:43.630-0400 I COMMAND [conn60] CMD: drop db17.tmp.mrs.coll17_1436464603_39 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.639-0400 m31100| 2015-07-09T13:56:43.638-0400 I COMMAND [conn60] CMD: drop db17.tmp.mr.coll17_121 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.640-0400 m31100| 2015-07-09T13:56:43.639-0400 I COMMAND [conn60] CMD: drop db17.tmp.mr.coll17_121 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.640-0400 m31200| 2015-07-09T13:56:43.639-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_106 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.643-0400 m31100| 2015-07-09T13:56:43.642-0400 I COMMAND [conn60] CMD: drop db17.tmp.mr.coll17_121 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.645-0400 m31200| 2015-07-09T13:56:43.644-0400 I COMMAND [conn38] command db17.map_reduce_replace4 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.645-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.645-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.645-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.645-0400 m31200| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.645-0400 m31200| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.647-0400 m31200| }, out: { replace: "map_reduce_replace4" }, query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 } }, inputDB: "db17", shardedOutputCollection: "tmp.mrs.coll17_1436464602_36", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll17_1436464602_36", timeMillis: 420, counts: { input: 1019, emit: 1019, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464603000|58, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll17_1436464602_36", timeMillis: 497, counts: { input: 981, emit: 981, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464603000|94, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1019, emit: 1019, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 981, emit: 981, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:223 locks:{ Global: { acquireCount: { r: 57, w: 50, W: 3 }, acquireWaitCount: { r: 1, w: 2, W: 1 }, timeAcquiringMicros: { r: 5241, w: 34153, W: 14656 } }, Database: { acquireCount: { r: 2, w: 45, W: 6 }, acquireWaitCount: { r: 1, w: 21, W: 4 }, timeAcquiringMicros: { r: 5704, w: 248973, W: 55190 } }, Collection: { acquireCount: { r: 2, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 476ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.647-0400 m31100| 2015-07-09T13:56:43.645-0400 I COMMAND [conn36] CMD: drop db17.tmp.mrs.coll17_1436464602_36 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.648-0400 m31100| 2015-07-09T13:56:43.647-0400 I COMMAND [conn60] command db17.tmp.mrs.coll17_1436464603_39 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.648-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.648-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.648-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.649-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464603_39", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 3304 } }, Database: { acquireCount: { r: 25, w: 66, R: 12, W: 11 }, acquireWaitCount: { w: 22, R: 7, W: 3 }, timeAcquiringMicros: { w: 207630, R: 40927, W: 3155 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 434ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.649-0400 m31200| 2015-07-09T13:56:43.649-0400 I COMMAND [conn62] CMD: drop db17.tmp.mrs.coll17_1436464602_36 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.653-0400 m31202| 2015-07-09T13:56:43.653-0400 I COMMAND [repl writer worker 12] CMD: drop db17.tmp.mrs.coll17_1436464602_36 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.654-0400 m31201| 2015-07-09T13:56:43.654-0400 I COMMAND [repl writer worker 7] CMD: drop db17.tmp.mrs.coll17_1436464602_36 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.655-0400 m31102| 2015-07-09T13:56:43.654-0400 I COMMAND [repl writer worker 2] CMD: drop db17.tmp.mrs.coll17_1436464602_36 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.663-0400 m31101| 2015-07-09T13:56:43.663-0400 I COMMAND [repl writer worker 3] CMD: drop db17.tmp.mrs.coll17_1436464602_36 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.673-0400 m31100| 2015-07-09T13:56:43.671-0400 I COMMAND [conn72] CMD: drop db17.tmp.mr.coll17_125 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.674-0400 m31200| 2015-07-09T13:56:43.674-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_111 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.719-0400 m31100| 2015-07-09T13:56:43.719-0400 I COMMAND [conn48] CMD: drop db17.tmp.mrs.coll17_1436464603_37 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.722-0400 m31100| 2015-07-09T13:56:43.722-0400 I COMMAND [conn48] CMD: drop db17.tmp.mr.coll17_122 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.723-0400 m31100| 2015-07-09T13:56:43.722-0400 I COMMAND [conn48] CMD: drop db17.tmp.mr.coll17_122 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.734-0400 m31100| 2015-07-09T13:56:43.734-0400 I COMMAND [conn48] CMD: drop db17.tmp.mr.coll17_122 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.742-0400 m31100| 2015-07-09T13:56:43.742-0400 I COMMAND [conn48] command db17.tmp.mrs.coll17_1436464603_37 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.742-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.742-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.742-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.743-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464603_37", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 9231, W: 181 } }, Database: { acquireCount: { r: 25, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 5, w: 20, R: 10, W: 5 }, timeAcquiringMicros: { r: 27067, w: 114948, R: 49790, W: 23767 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 466ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.770-0400 m31100| 2015-07-09T13:56:43.770-0400 I COMMAND [conn33] CMD: drop db17.tmp.mrs.coll17_1436464603_38 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.771-0400 m31200| 2015-07-09T13:56:43.771-0400 I COMMAND [conn39] CMD: drop db17.tmp.mrs.coll17_1436464603_39 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.774-0400 m31100| 2015-07-09T13:56:43.774-0400 I COMMAND [conn33] CMD: drop db17.tmp.mr.coll17_123 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.774-0400 m31100| 2015-07-09T13:56:43.774-0400 I COMMAND [conn33] CMD: drop db17.tmp.mr.coll17_123 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.776-0400 m31200| 2015-07-09T13:56:43.776-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_107 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.776-0400 m31100| 2015-07-09T13:56:43.776-0400 I COMMAND [conn33] CMD: drop db17.tmp.mr.coll17_123 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.777-0400 m31200| 2015-07-09T13:56:43.776-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_107 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.786-0400 m31200| 2015-07-09T13:56:43.786-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_107 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.786-0400 m31200| 2015-07-09T13:56:43.786-0400 I COMMAND [conn37] CMD: drop db17.tmp.mrs.coll17_1436464603_37 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.788-0400 m31100| 2015-07-09T13:56:43.787-0400 I COMMAND [conn33] command db17.tmp.mrs.coll17_1436464603_38 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.788-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.789-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.789-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.790-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464603_38", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 3922, w: 9415, W: 67 } }, Database: { acquireCount: { r: 25, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 2, w: 23, R: 11, W: 9 }, timeAcquiringMicros: { r: 12388, w: 117259, R: 48620, W: 41894 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 450ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.793-0400 m31200| 2015-07-09T13:56:43.792-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_108 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.793-0400 m31100| 2015-07-09T13:56:43.793-0400 I COMMAND [conn56] CMD: drop db17.tmp.mrs.coll17_1436464603_40 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.793-0400 m31200| 2015-07-09T13:56:43.793-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_108 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.793-0400 m31200| 2015-07-09T13:56:43.793-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_108 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.797-0400 m31200| 2015-07-09T13:56:43.796-0400 I COMMAND [conn39] command db17.tmp.mrs.coll17_1436464603_39 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.797-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.797-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.797-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.798-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464603_39", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 149, w: 74, W: 3 }, acquireWaitCount: { r: 3, w: 3, W: 1 }, timeAcquiringMicros: { r: 18514, w: 32477, W: 5870 } }, Database: { acquireCount: { r: 25, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 3, w: 32, R: 9, W: 3 }, timeAcquiringMicros: { r: 1812, w: 271089, R: 42997, W: 8763 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 583ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.798-0400 m31200| 2015-07-09T13:56:43.798-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_112 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.799-0400 m31100| 2015-07-09T13:56:43.799-0400 I COMMAND [conn56] CMD: drop db17.tmp.mr.coll17_124 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.800-0400 m31100| 2015-07-09T13:56:43.799-0400 I COMMAND [conn56] CMD: drop db17.tmp.mr.coll17_124 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.801-0400 m31100| 2015-07-09T13:56:43.801-0400 I COMMAND [conn56] CMD: drop db17.tmp.mr.coll17_124 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.803-0400 m31200| 2015-07-09T13:56:43.802-0400 I COMMAND [conn37] command db17.tmp.mrs.coll17_1436464603_37 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.804-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.804-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.804-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.805-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464603_37", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:212 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { r: 5, W: 1 }, timeAcquiringMicros: { r: 23069, W: 8236 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 2, w: 26, R: 13, W: 7 }, timeAcquiringMicros: { r: 272, w: 118746, R: 102238, W: 38795 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 527ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.806-0400 m31200| 2015-07-09T13:56:43.804-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_113 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.807-0400 m31100| 2015-07-09T13:56:43.806-0400 I COMMAND [conn56] command db17.tmp.mrs.coll17_1436464603_40 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.807-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.808-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.808-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.808-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464603_40", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 1 }, timeAcquiringMicros: { r: 3982, w: 4917 } }, Database: { acquireCount: { r: 25, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 3, w: 19, R: 12, W: 7 }, timeAcquiringMicros: { r: 2809, w: 127331, R: 29411, W: 48902 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 414ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.816-0400 m31100| 2015-07-09T13:56:43.815-0400 I COMMAND [conn72] CMD: drop db17.tmp.mrs.coll17_1436464603_39 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.821-0400 m31100| 2015-07-09T13:56:43.820-0400 I COMMAND [conn72] CMD: drop db17.tmp.mr.coll17_125 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.821-0400 m31100| 2015-07-09T13:56:43.821-0400 I COMMAND [conn72] CMD: drop db17.tmp.mr.coll17_125 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.822-0400 m31100| 2015-07-09T13:56:43.821-0400 I COMMAND [conn72] CMD: drop db17.tmp.mr.coll17_125 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.823-0400 m31100| 2015-07-09T13:56:43.822-0400 I COMMAND [conn72] command db17.tmp.mrs.coll17_1436464603_39 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.823-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.823-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.823-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.824-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464603_39", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 152, w: 75, W: 3 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 576 } }, Database: { acquireCount: { r: 25, w: 67, R: 12, W: 10 }, acquireWaitCount: { w: 3, R: 11, W: 4 }, timeAcquiringMicros: { w: 2791, R: 12353, W: 2132 } }, Collection: { acquireCount: { r: 25, w: 46 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_query 151ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.827-0400 m31102| 2015-07-09T13:56:43.827-0400 I COMMAND [repl writer worker 1] CMD: drop db17.tmp.mrs.coll17_1436464603_39 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.829-0400 m31101| 2015-07-09T13:56:43.829-0400 I COMMAND [repl writer worker 7] CMD: drop db17.tmp.mrs.coll17_1436464603_39 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.865-0400 m31200| 2015-07-09T13:56:43.865-0400 I COMMAND [conn41] CMD: drop db17.tmp.mrs.coll17_1436464603_40 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.871-0400 m31200| 2015-07-09T13:56:43.870-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_110 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.871-0400 m31200| 2015-07-09T13:56:43.870-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_110 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.873-0400 m31200| 2015-07-09T13:56:43.872-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_110 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.875-0400 m31200| 2015-07-09T13:56:43.875-0400 I COMMAND [conn41] command db17.tmp.mrs.coll17_1436464603_40 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.876-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.876-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.876-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.877-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464603_40", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 4, w: 1, W: 1 }, timeAcquiringMicros: { r: 23730, w: 9054, W: 1136 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 4, w: 21, R: 11, W: 9 }, timeAcquiringMicros: { r: 24310, w: 113414, R: 40373, W: 44184 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 483ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.878-0400 m31200| 2015-07-09T13:56:43.877-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_114 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.903-0400 m31200| 2015-07-09T13:56:43.903-0400 I COMMAND [conn29] CMD: drop db17.tmp.mrs.coll17_1436464603_38 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.912-0400 m31200| 2015-07-09T13:56:43.910-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_109 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.912-0400 m31200| 2015-07-09T13:56:43.910-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_109 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.912-0400 m31200| 2015-07-09T13:56:43.912-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_109 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.918-0400 m31200| 2015-07-09T13:56:43.917-0400 I COMMAND [conn29] command db17.tmp.mrs.coll17_1436464603_38 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.918-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.919-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.919-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.920-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464603_38", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:212 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { r: 5, w: 1, W: 1 }, timeAcquiringMicros: { r: 25288, w: 8681, W: 924 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 9, w: 30, R: 11, W: 9 }, timeAcquiringMicros: { r: 30864, w: 157372, R: 38766, W: 50987 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 581ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.920-0400 m31200| 2015-07-09T13:56:43.918-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_115 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.943-0400 m31200| 2015-07-09T13:56:43.942-0400 I COMMAND [conn38] CMD: drop db17.tmp.mrs.coll17_1436464603_39 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.961-0400 m31200| 2015-07-09T13:56:43.961-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_111 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.962-0400 m31200| 2015-07-09T13:56:43.961-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_111 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.962-0400 m31202| 2015-07-09T13:56:43.962-0400 I COMMAND [repl writer worker 5] CMD: drop db17.tmp.mrs.coll17_1436464603_39 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.965-0400 m31201| 2015-07-09T13:56:43.963-0400 I COMMAND [repl writer worker 9] CMD: drop db17.tmp.mrs.coll17_1436464603_39 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.966-0400 m31200| 2015-07-09T13:56:43.964-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_111 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.966-0400 m31200| 2015-07-09T13:56:43.965-0400 I COMMAND [conn39] CMD: drop db17.map_reduce_replace1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.970-0400 m31200| 2015-07-09T13:56:43.970-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_112 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.971-0400 m31200| 2015-07-09T13:56:43.970-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_112 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.972-0400 m31200| 2015-07-09T13:56:43.972-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_112 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.972-0400 m31200| 2015-07-09T13:56:43.972-0400 I COMMAND [conn37] CMD: drop db17.map_reduce_replace2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.973-0400 m31200| 2015-07-09T13:56:43.972-0400 I COMMAND [conn38] command db17.tmp.mrs.coll17_1436464603_39 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.973-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.973-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.973-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.975-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464603_39", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 150, w: 75, W: 3 }, acquireWaitCount: { r: 3, w: 2, W: 1 }, timeAcquiringMicros: { r: 16829, w: 8591, W: 109 } }, Database: { acquireCount: { r: 25, w: 67, R: 11, W: 11 }, acquireWaitCount: { r: 2, w: 10, R: 11, W: 9 }, timeAcquiringMicros: { r: 2008, w: 41583, R: 47507, W: 20329 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_query 301ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.978-0400 m31202| 2015-07-09T13:56:43.977-0400 I COMMAND [repl writer worker 14] CMD: drop db17.map_reduce_replace1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.979-0400 m31200| 2015-07-09T13:56:43.978-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_116 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.979-0400 m31201| 2015-07-09T13:56:43.979-0400 I COMMAND [repl writer worker 2] CMD: drop db17.map_reduce_replace1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.983-0400 m31200| 2015-07-09T13:56:43.982-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_113 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.984-0400 m31200| 2015-07-09T13:56:43.983-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_113 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.985-0400 m31200| 2015-07-09T13:56:43.983-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_113 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.986-0400 m31200| 2015-07-09T13:56:43.983-0400 I COMMAND [conn39] command db17.map_reduce_replace1 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.987-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.987-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.987-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.987-0400 m31200| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.987-0400 m31200| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.990-0400 m31200| }, out: { replace: "map_reduce_replace1" }, query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 } }, inputDB: "db17", shardedOutputCollection: "tmp.mrs.coll17_1436464603_39", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll17_1436464603_39", timeMillis: 426, counts: { input: 1019, emit: 1019, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464603000|94, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll17_1436464603_39", timeMillis: 563, counts: { input: 981, emit: 981, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464603000|233, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1019, emit: 1019, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 981, emit: 981, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:223 locks:{ Global: { acquireCount: { r: 55, w: 50, W: 3 }, acquireWaitCount: { w: 4, W: 1 }, timeAcquiringMicros: { w: 44179, W: 1173 } }, Database: { acquireCount: { r: 1, w: 45, W: 6 }, acquireWaitCount: { w: 11, W: 3 }, timeAcquiringMicros: { w: 47435, W: 16397 } }, Collection: { acquireCount: { r: 1, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 186ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.990-0400 m31100| 2015-07-09T13:56:43.984-0400 I COMMAND [conn38] CMD: drop db17.tmp.mrs.coll17_1436464603_39 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.991-0400 m31200| 2015-07-09T13:56:43.986-0400 I COMMAND [conn37] command db17.map_reduce_replace2 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.991-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.991-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.991-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.991-0400 m31200| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.992-0400 m31200| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.993-0400 m31200| }, out: { replace: "map_reduce_replace2" }, query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 } }, inputDB: "db17", shardedOutputCollection: "tmp.mrs.coll17_1436464603_37", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll17_1436464603_37", timeMillis: 447, counts: { input: 1019, emit: 1019, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464603000|112, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll17_1436464603_37", timeMillis: 517, counts: { input: 981, emit: 981, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464603000|236, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1019, emit: 1019, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 981, emit: 981, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:223 locks:{ Global: { acquireCount: { r: 55, w: 50, W: 3 }, acquireWaitCount: { w: 4, W: 1 }, timeAcquiringMicros: { w: 38561, W: 293 } }, Database: { acquireCount: { r: 1, w: 45, W: 6 }, acquireWaitCount: { w: 13, W: 3 }, timeAcquiringMicros: { w: 48892, W: 31189 } }, Collection: { acquireCount: { r: 1, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 182ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.994-0400 m31200| 2015-07-09T13:56:43.988-0400 I COMMAND [conn63] CMD: drop db17.tmp.mrs.coll17_1436464603_39 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.994-0400 m31100| 2015-07-09T13:56:43.988-0400 I COMMAND [conn36] CMD: drop db17.tmp.mrs.coll17_1436464603_37 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.994-0400 m31102| 2015-07-09T13:56:43.988-0400 I COMMAND [repl writer worker 9] CMD: drop db17.tmp.mrs.coll17_1436464603_39 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.994-0400 m31101| 2015-07-09T13:56:43.989-0400 I COMMAND [repl writer worker 2] CMD: drop db17.tmp.mrs.coll17_1436464603_39 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.996-0400 m31200| 2015-07-09T13:56:43.991-0400 I COMMAND [conn62] CMD: drop db17.tmp.mrs.coll17_1436464603_37 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.997-0400 m31102| 2015-07-09T13:56:43.993-0400 I COMMAND [repl writer worker 0] CMD: drop db17.tmp.mrs.coll17_1436464603_37 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.997-0400 m31202| 2015-07-09T13:56:43.995-0400 I COMMAND [repl writer worker 10] CMD: drop db17.map_reduce_replace2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:43.998-0400 m31201| 2015-07-09T13:56:43.998-0400 I COMMAND [repl writer worker 6] CMD: drop db17.map_reduce_replace2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.017-0400 m31200| 2015-07-09T13:56:44.016-0400 I COMMAND [conn38] CMD: drop db17.map_reduce_replace4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.019-0400 m31100| 2015-07-09T13:56:44.019-0400 I COMMAND [conn60] CMD: drop db17.tmp.mr.coll17_126 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.023-0400 m31202| 2015-07-09T13:56:44.022-0400 I COMMAND [repl writer worker 4] CMD: drop db17.tmp.mrs.coll17_1436464603_39 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.027-0400 m31201| 2015-07-09T13:56:44.027-0400 I COMMAND [repl writer worker 10] CMD: drop db17.tmp.mrs.coll17_1436464603_39 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.030-0400 m31200| 2015-07-09T13:56:44.028-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_116 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.030-0400 m31200| 2015-07-09T13:56:44.029-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_116 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.031-0400 m31200| 2015-07-09T13:56:44.030-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_116 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.032-0400 m31202| 2015-07-09T13:56:44.032-0400 I COMMAND [repl writer worker 9] CMD: drop db17.tmp.mrs.coll17_1436464603_37 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.034-0400 m31201| 2015-07-09T13:56:44.034-0400 I COMMAND [repl writer worker 7] CMD: drop db17.tmp.mrs.coll17_1436464603_37 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.036-0400 m31202| 2015-07-09T13:56:44.035-0400 I COMMAND [repl writer worker 14] CMD: drop db17.map_reduce_replace4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.036-0400 m31100| 2015-07-09T13:56:44.036-0400 I COMMAND [conn36] CMD: drop db17.tmp.mrs.coll17_1436464603_39 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.037-0400 m31200| 2015-07-09T13:56:44.037-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_117 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.040-0400 m31200| 2015-07-09T13:56:44.037-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_118 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.040-0400 m31201| 2015-07-09T13:56:44.037-0400 I COMMAND [repl writer worker 2] CMD: drop db17.map_reduce_replace4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.041-0400 m31100| 2015-07-09T13:56:44.038-0400 I COMMAND [conn48] CMD: drop db17.tmp.mr.coll17_127 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.041-0400 m31200| 2015-07-09T13:56:44.039-0400 I COMMAND [conn62] CMD: drop db17.tmp.mrs.coll17_1436464603_39 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.060-0400 m31101| 2015-07-09T13:56:44.060-0400 I COMMAND [repl writer worker 15] CMD: drop db17.tmp.mrs.coll17_1436464603_37 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.110-0400 m31200| 2015-07-09T13:56:44.110-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_119 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.125-0400 m31100| 2015-07-09T13:56:44.125-0400 I COMMAND [conn72] CMD: drop db17.tmp.mr.coll17_128 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.143-0400 m31200| 2015-07-09T13:56:44.143-0400 I COMMAND [conn41] CMD: drop db17.map_reduce_replace3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.148-0400 m31200| 2015-07-09T13:56:44.148-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_114 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.149-0400 m31200| 2015-07-09T13:56:44.148-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_114 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.156-0400 m31200| 2015-07-09T13:56:44.155-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_114 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.166-0400 m31201| 2015-07-09T13:56:44.165-0400 I COMMAND [repl writer worker 13] CMD: drop db17.map_reduce_replace3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.167-0400 m31200| 2015-07-09T13:56:44.166-0400 I COMMAND [conn41] command db17.map_reduce_replace3 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.168-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.168-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.168-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.168-0400 m31200| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.169-0400 m31200| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.170-0400 m31200| }, out: { replace: "map_reduce_replace3" }, query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 } }, inputDB: "db17", shardedOutputCollection: "tmp.mrs.coll17_1436464603_40", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll17_1436464603_40", timeMillis: 407, counts: { input: 1019, emit: 1019, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464603000|152, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll17_1436464603_40", timeMillis: 478, counts: { input: 981, emit: 981, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464603000|261, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1019, emit: 1019, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 981, emit: 981, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:223 locks:{ Global: { acquireCount: { r: 55, w: 50, W: 3 }, acquireWaitCount: { w: 5, W: 1 }, timeAcquiringMicros: { w: 53972, W: 33403 } }, Database: { acquireCount: { r: 1, w: 45, W: 6 }, acquireWaitCount: { w: 14, W: 4 }, timeAcquiringMicros: { w: 116874, W: 16753 } }, Collection: { acquireCount: { r: 1, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 289ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.170-0400 m31100| 2015-07-09T13:56:44.168-0400 I COMMAND [conn38] CMD: drop db17.tmp.mrs.coll17_1436464603_40 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.172-0400 m31202| 2015-07-09T13:56:44.171-0400 I COMMAND [repl writer worker 11] CMD: drop db17.map_reduce_replace3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.189-0400 m31200| 2015-07-09T13:56:44.188-0400 I COMMAND [conn63] CMD: drop db17.tmp.mrs.coll17_1436464603_40 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.189-0400 m31101| 2015-07-09T13:56:44.189-0400 I COMMAND [repl writer worker 14] CMD: drop db17.tmp.mrs.coll17_1436464603_40 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.190-0400 m31102| 2015-07-09T13:56:44.189-0400 I COMMAND [repl writer worker 6] CMD: drop db17.tmp.mrs.coll17_1436464603_40 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.210-0400 m31201| 2015-07-09T13:56:44.209-0400 I COMMAND [repl writer worker 3] CMD: drop db17.tmp.mrs.coll17_1436464603_40 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.211-0400 m31202| 2015-07-09T13:56:44.210-0400 I COMMAND [repl writer worker 7] CMD: drop db17.tmp.mrs.coll17_1436464603_40 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.218-0400 m31200| 2015-07-09T13:56:44.217-0400 I COMMAND [conn29] CMD: drop db17.map_reduce_replace0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.223-0400 m31200| 2015-07-09T13:56:44.222-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_115 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.223-0400 m31200| 2015-07-09T13:56:44.223-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_115 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.223-0400 m31200| 2015-07-09T13:56:44.223-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_115 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.223-0400 m31201| 2015-07-09T13:56:44.223-0400 I COMMAND [repl writer worker 5] CMD: drop db17.map_reduce_replace0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.224-0400 m31200| 2015-07-09T13:56:44.223-0400 I COMMAND [conn29] command db17.map_reduce_replace0 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.224-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.224-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.225-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.225-0400 m31200| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.225-0400 m31200| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.228-0400 m31200| }, out: { replace: "map_reduce_replace0" }, query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 } }, inputDB: "db17", shardedOutputCollection: "tmp.mrs.coll17_1436464603_38", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll17_1436464603_38", timeMillis: 438, counts: { input: 1019, emit: 1019, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464603000|139, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll17_1436464603_38", timeMillis: 574, counts: { input: 981, emit: 981, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464603000|296, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1019, emit: 1019, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 981, emit: 981, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:223 locks:{ Global: { acquireCount: { r: 55, w: 50, W: 3 }, acquireWaitCount: { w: 7, W: 1 }, timeAcquiringMicros: { w: 61671, W: 19612 } }, Database: { acquireCount: { r: 1, w: 45, W: 6 }, acquireWaitCount: { w: 15, W: 2 }, timeAcquiringMicros: { w: 160607, W: 3014 } }, Collection: { acquireCount: { r: 1, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 304ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.229-0400 m31100| 2015-07-09T13:56:44.224-0400 I COMMAND [conn36] CMD: drop db17.tmp.mrs.coll17_1436464603_38 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.229-0400 m31202| 2015-07-09T13:56:44.225-0400 I COMMAND [repl writer worker 5] CMD: drop db17.map_reduce_replace0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.229-0400 m31100| 2015-07-09T13:56:44.226-0400 I COMMAND [conn56] CMD: drop db17.tmp.mr.coll17_129 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.251-0400 m31200| 2015-07-09T13:56:44.250-0400 I COMMAND [conn62] CMD: drop db17.tmp.mrs.coll17_1436464603_38 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.253-0400 m31101| 2015-07-09T13:56:44.252-0400 I COMMAND [repl writer worker 0] CMD: drop db17.tmp.mrs.coll17_1436464603_38 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.254-0400 m31102| 2015-07-09T13:56:44.253-0400 I COMMAND [repl writer worker 11] CMD: drop db17.tmp.mrs.coll17_1436464603_38 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.255-0400 m31200| 2015-07-09T13:56:44.255-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_120 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.274-0400 m31201| 2015-07-09T13:56:44.273-0400 I COMMAND [repl writer worker 12] CMD: drop db17.tmp.mrs.coll17_1436464603_38 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.277-0400 m31202| 2015-07-09T13:56:44.276-0400 I COMMAND [repl writer worker 13] CMD: drop db17.tmp.mrs.coll17_1436464603_38 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.294-0400 m31100| 2015-07-09T13:56:44.294-0400 I COMMAND [conn33] CMD: drop db17.tmp.mr.coll17_130 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.304-0400 m31200| 2015-07-09T13:56:44.304-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_121 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.487-0400 m31100| 2015-07-09T13:56:44.486-0400 I COMMAND [conn60] CMD: drop db17.tmp.mrs.coll17_1436464604_41 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.491-0400 m31100| 2015-07-09T13:56:44.491-0400 I COMMAND [conn60] CMD: drop db17.tmp.mr.coll17_126 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.492-0400 m31100| 2015-07-09T13:56:44.491-0400 I COMMAND [conn60] CMD: drop db17.tmp.mr.coll17_126 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.492-0400 m31100| 2015-07-09T13:56:44.491-0400 I COMMAND [conn60] CMD: drop db17.tmp.mr.coll17_126 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.504-0400 m31100| 2015-07-09T13:56:44.503-0400 I COMMAND [conn60] command db17.tmp.mrs.coll17_1436464604_41 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.506-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.506-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.506-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.507-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464604_41", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 852 } }, Database: { acquireCount: { r: 25, w: 66, R: 13, W: 11 }, acquireWaitCount: { w: 22, R: 12, W: 4 }, timeAcquiringMicros: { w: 183165, R: 64527, W: 11598 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 484ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.507-0400 m31100| 2015-07-09T13:56:44.503-0400 I COMMAND [conn48] CMD: drop db17.tmp.mrs.coll17_1436464604_40 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.509-0400 m31100| 2015-07-09T13:56:44.509-0400 I COMMAND [conn48] CMD: drop db17.tmp.mr.coll17_127 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.509-0400 m31100| 2015-07-09T13:56:44.509-0400 I COMMAND [conn48] CMD: drop db17.tmp.mr.coll17_127 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.510-0400 m31100| 2015-07-09T13:56:44.509-0400 I COMMAND [conn48] CMD: drop db17.tmp.mr.coll17_127 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.511-0400 m31100| 2015-07-09T13:56:44.510-0400 I COMMAND [conn48] command db17.tmp.mrs.coll17_1436464604_40 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.511-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.511-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.512-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.513-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464604_40", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:212 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 5341, W: 2235 } }, Database: { acquireCount: { r: 25, w: 66, R: 14, W: 11 }, acquireWaitCount: { r: 3, w: 19, R: 13, W: 7 }, timeAcquiringMicros: { r: 6942, w: 196765, R: 56932, W: 13395 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 479ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.517-0400 m31100| 2015-07-09T13:56:44.517-0400 I COMMAND [conn72] CMD: drop db17.tmp.mrs.coll17_1436464604_41 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.527-0400 m31100| 2015-07-09T13:56:44.526-0400 I COMMAND [conn72] CMD: drop db17.tmp.mr.coll17_128 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.527-0400 m31100| 2015-07-09T13:56:44.527-0400 I COMMAND [conn72] CMD: drop db17.tmp.mr.coll17_128 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.528-0400 m31100| 2015-07-09T13:56:44.527-0400 I COMMAND [conn72] CMD: drop db17.tmp.mr.coll17_128 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.529-0400 m31101| 2015-07-09T13:56:44.529-0400 I COMMAND [repl writer worker 6] CMD: drop db17.tmp.mrs.coll17_1436464604_41 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.530-0400 m31102| 2015-07-09T13:56:44.530-0400 I COMMAND [repl writer worker 2] CMD: drop db17.tmp.mrs.coll17_1436464604_41 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.531-0400 m31100| 2015-07-09T13:56:44.530-0400 I COMMAND [conn72] command db17.tmp.mrs.coll17_1436464604_41 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.531-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.531-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.531-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.531-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464604_41", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:212 locks:{ Global: { acquireCount: { r: 156, w: 75, W: 3 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 7231, w: 6084, W: 286 } }, Database: { acquireCount: { r: 25, w: 67, R: 14, W: 11 }, acquireWaitCount: { r: 4, w: 15, R: 13, W: 9 }, timeAcquiringMicros: { r: 15003, w: 76485, R: 82335, W: 42761 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_query 421ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.540-0400 m31100| 2015-07-09T13:56:44.540-0400 I COMMAND [conn56] CMD: drop db17.tmp.mrs.coll17_1436464604_42 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.549-0400 m31100| 2015-07-09T13:56:44.548-0400 I COMMAND [conn56] CMD: drop db17.tmp.mr.coll17_129 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.549-0400 m31100| 2015-07-09T13:56:44.549-0400 I COMMAND [conn56] CMD: drop db17.tmp.mr.coll17_129 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.551-0400 m31100| 2015-07-09T13:56:44.551-0400 I COMMAND [conn56] CMD: drop db17.tmp.mr.coll17_129 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.556-0400 m31100| 2015-07-09T13:56:44.553-0400 I COMMAND [conn56] command db17.tmp.mrs.coll17_1436464604_42 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.556-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.556-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.556-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.558-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464604_42", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 2, W: 1 }, timeAcquiringMicros: { r: 10323, w: 15564, W: 31 } }, Database: { acquireCount: { r: 25, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 3, w: 8, R: 12, W: 7 }, timeAcquiringMicros: { r: 9603, w: 33296, R: 46872, W: 35988 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 340ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.559-0400 m31100| 2015-07-09T13:56:44.557-0400 I COMMAND [conn33] CMD: drop db17.tmp.mrs.coll17_1436464604_42 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.565-0400 m31100| 2015-07-09T13:56:44.564-0400 I COMMAND [conn33] CMD: drop db17.tmp.mr.coll17_130 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.565-0400 m31100| 2015-07-09T13:56:44.565-0400 I COMMAND [conn33] CMD: drop db17.tmp.mr.coll17_130 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.571-0400 m31100| 2015-07-09T13:56:44.570-0400 I COMMAND [conn33] CMD: drop db17.tmp.mr.coll17_130 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.571-0400 m31100| 2015-07-09T13:56:44.571-0400 I COMMAND [conn33] command db17.tmp.mrs.coll17_1436464604_42 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.572-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.572-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.572-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.573-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464604_42", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 152, w: 75, W: 3 }, acquireWaitCount: { r: 2, w: 2 }, timeAcquiringMicros: { r: 12300, w: 14575 } }, Database: { acquireCount: { r: 25, w: 67, R: 12, W: 11 }, acquireWaitCount: { r: 3, w: 6, R: 12, W: 5 }, timeAcquiringMicros: { r: 9374, w: 16908, R: 14836, W: 17453 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_query 289ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.573-0400 m31101| 2015-07-09T13:56:44.571-0400 I COMMAND [repl writer worker 6] CMD: drop db17.tmp.mrs.coll17_1436464604_42 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.573-0400 m31102| 2015-07-09T13:56:44.572-0400 I COMMAND [repl writer worker 4] CMD: drop db17.tmp.mrs.coll17_1436464604_42 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.585-0400 m31200| 2015-07-09T13:56:44.585-0400 I COMMAND [conn39] CMD: drop db17.tmp.mrs.coll17_1436464604_41 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.593-0400 m31200| 2015-07-09T13:56:44.592-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_117 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.593-0400 m31200| 2015-07-09T13:56:44.592-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_117 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.594-0400 m31200| 2015-07-09T13:56:44.594-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_117 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.596-0400 m31200| 2015-07-09T13:56:44.596-0400 I COMMAND [conn39] command db17.tmp.mrs.coll17_1436464604_41 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.596-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.597-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.597-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.598-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464604_41", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 149, w: 74, W: 3 }, acquireWaitCount: { r: 5, W: 1 }, timeAcquiringMicros: { r: 43920, W: 4121 } }, Database: { acquireCount: { r: 25, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 1, w: 27, R: 11, W: 6 }, timeAcquiringMicros: { r: 1204, w: 259213, R: 79955, W: 3038 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 577ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.598-0400 m31200| 2015-07-09T13:56:44.596-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_122 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.598-0400 m31200| 2015-07-09T13:56:44.597-0400 I COMMAND [conn37] CMD: drop db17.tmp.mrs.coll17_1436464604_40 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.603-0400 m31200| 2015-07-09T13:56:44.603-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_118 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.604-0400 m31200| 2015-07-09T13:56:44.603-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_118 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.605-0400 m31200| 2015-07-09T13:56:44.605-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_118 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.627-0400 m31200| 2015-07-09T13:56:44.627-0400 I COMMAND [conn37] command db17.tmp.mrs.coll17_1436464604_40 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.628-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.628-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.628-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.629-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464604_40", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 5, w: 2, W: 1 }, timeAcquiringMicros: { r: 15375, w: 10983, W: 730 } }, Database: { acquireCount: { r: 25, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 2, w: 26, R: 10, W: 8 }, timeAcquiringMicros: { r: 680, w: 221871, R: 91033, W: 66699 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 596ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.629-0400 m31200| 2015-07-09T13:56:44.628-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_123 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.648-0400 m31200| 2015-07-09T13:56:44.648-0400 I COMMAND [conn38] CMD: drop db17.tmp.mrs.coll17_1436464604_41 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.659-0400 m31200| 2015-07-09T13:56:44.658-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_119 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.660-0400 m31200| 2015-07-09T13:56:44.659-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_119 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.662-0400 m31200| 2015-07-09T13:56:44.661-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_119 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.666-0400 m31200| 2015-07-09T13:56:44.666-0400 I COMMAND [conn38] command db17.tmp.mrs.coll17_1436464604_41 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.666-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.666-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.667-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.668-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464604_41", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:212 locks:{ Global: { acquireCount: { r: 152, w: 75, W: 3 }, acquireWaitCount: { r: 2, w: 4, W: 1 }, timeAcquiringMicros: { r: 10130, w: 53062, W: 1458 } }, Database: { acquireCount: { r: 25, w: 67, R: 12, W: 11 }, acquireWaitCount: { r: 8, w: 22, R: 12, W: 9 }, timeAcquiringMicros: { r: 35573, w: 87849, R: 85078, W: 36763 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_query 558ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.669-0400 m31200| 2015-07-09T13:56:44.667-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_124 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.672-0400 m31202| 2015-07-09T13:56:44.671-0400 I COMMAND [repl writer worker 15] CMD: drop db17.tmp.mrs.coll17_1436464604_41 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.693-0400 m31200| 2015-07-09T13:56:44.693-0400 I COMMAND [conn41] CMD: drop db17.tmp.mrs.coll17_1436464604_42 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.700-0400 m31200| 2015-07-09T13:56:44.699-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_120 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.701-0400 m31200| 2015-07-09T13:56:44.699-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_120 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.701-0400 m31200| 2015-07-09T13:56:44.700-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_120 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.703-0400 m31201| 2015-07-09T13:56:44.702-0400 I COMMAND [repl writer worker 6] CMD: drop db17.tmp.mrs.coll17_1436464604_41 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.704-0400 m31200| 2015-07-09T13:56:44.704-0400 I COMMAND [conn41] command db17.tmp.mrs.coll17_1436464604_42 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.704-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.704-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.705-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.705-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464604_42", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 3, w: 3, W: 1 }, timeAcquiringMicros: { r: 17134, w: 20913, W: 318 } }, Database: { acquireCount: { r: 25, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 10, w: 21, R: 12, W: 7 }, timeAcquiringMicros: { r: 20438, w: 71482, R: 63367, W: 70888 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 491ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.706-0400 m31200| 2015-07-09T13:56:44.706-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_125 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.732-0400 m31200| 2015-07-09T13:56:44.732-0400 I COMMAND [conn29] CMD: drop db17.tmp.mrs.coll17_1436464604_42 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.740-0400 m31200| 2015-07-09T13:56:44.739-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_121 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.741-0400 m31200| 2015-07-09T13:56:44.740-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_121 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.746-0400 m31200| 2015-07-09T13:56:44.745-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_121 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.749-0400 m31200| 2015-07-09T13:56:44.748-0400 I COMMAND [conn29] command db17.tmp.mrs.coll17_1436464604_42 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.750-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.750-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.750-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.751-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464604_42", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:212 locks:{ Global: { acquireCount: { r: 152, w: 75, W: 3 }, acquireWaitCount: { r: 1, w: 4, W: 1 }, timeAcquiringMicros: { r: 6773, w: 22013, W: 1324 } }, Database: { acquireCount: { r: 25, w: 67, R: 12, W: 11 }, acquireWaitCount: { r: 7, w: 21, R: 12, W: 9 }, timeAcquiringMicros: { r: 25920, w: 87915, R: 46822, W: 67009 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_query 467ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.752-0400 m31200| 2015-07-09T13:56:44.749-0400 I COMMAND [conn39] CMD: drop db17.map_reduce_replace1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.752-0400 m31200| 2015-07-09T13:56:44.749-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_126 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.755-0400 m31200| 2015-07-09T13:56:44.754-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_122 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.756-0400 m31200| 2015-07-09T13:56:44.755-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_122 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.756-0400 m31202| 2015-07-09T13:56:44.755-0400 I COMMAND [repl writer worker 3] CMD: drop db17.tmp.mrs.coll17_1436464604_42 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.758-0400 m31200| 2015-07-09T13:56:44.757-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_122 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.761-0400 m31200| 2015-07-09T13:56:44.758-0400 I COMMAND [conn39] command db17.map_reduce_replace1 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.761-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.761-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.761-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.762-0400 m31200| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.762-0400 m31200| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.765-0400 m31200| }, out: { replace: "map_reduce_replace1" }, query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 } }, inputDB: "db17", shardedOutputCollection: "tmp.mrs.coll17_1436464604_41", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll17_1436464604_41", timeMillis: 472, counts: { input: 1019, emit: 1019, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464604000|51, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll17_1436464604_41", timeMillis: 574, counts: { input: 981, emit: 981, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464604000|77, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1019, emit: 1019, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 981, emit: 981, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:223 locks:{ Global: { acquireCount: { r: 55, w: 50, W: 3 }, acquireWaitCount: { w: 4, W: 1 }, timeAcquiringMicros: { w: 32922, W: 1444 } }, Database: { acquireCount: { r: 1, w: 45, W: 6 }, acquireWaitCount: { w: 15, W: 4 }, timeAcquiringMicros: { w: 50113, W: 6058 } }, Collection: { acquireCount: { r: 1, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 161ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.765-0400 m31100| 2015-07-09T13:56:44.760-0400 I COMMAND [conn38] CMD: drop db17.tmp.mrs.coll17_1436464604_41 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.765-0400 m31200| 2015-07-09T13:56:44.763-0400 I COMMAND [conn63] CMD: drop db17.tmp.mrs.coll17_1436464604_41 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.766-0400 m31101| 2015-07-09T13:56:44.764-0400 I COMMAND [repl writer worker 15] CMD: drop db17.tmp.mrs.coll17_1436464604_41 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.767-0400 m31102| 2015-07-09T13:56:44.767-0400 I COMMAND [repl writer worker 13] CMD: drop db17.tmp.mrs.coll17_1436464604_41 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.773-0400 m31202| 2015-07-09T13:56:44.772-0400 I COMMAND [repl writer worker 9] CMD: drop db17.map_reduce_replace1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.774-0400 m31201| 2015-07-09T13:56:44.773-0400 I COMMAND [repl writer worker 4] CMD: drop db17.tmp.mrs.coll17_1436464604_42 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.787-0400 m31201| 2015-07-09T13:56:44.787-0400 I COMMAND [repl writer worker 15] CMD: drop db17.map_reduce_replace1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.788-0400 m31100| 2015-07-09T13:56:44.788-0400 I COMMAND [conn60] CMD: drop db17.tmp.mr.coll17_131 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.789-0400 m31200| 2015-07-09T13:56:44.788-0400 I COMMAND [conn37] CMD: drop db17.map_reduce_replace2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.805-0400 m31202| 2015-07-09T13:56:44.804-0400 I COMMAND [repl writer worker 12] CMD: drop db17.tmp.mrs.coll17_1436464604_41 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.805-0400 m31200| 2015-07-09T13:56:44.805-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_123 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.806-0400 m31200| 2015-07-09T13:56:44.805-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_123 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.806-0400 m31200| 2015-07-09T13:56:44.805-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_123 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.806-0400 m31200| 2015-07-09T13:56:44.806-0400 I COMMAND [conn37] command db17.map_reduce_replace2 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.807-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.807-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.807-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.807-0400 m31200| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.807-0400 m31200| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.809-0400 m31200| }, out: { replace: "map_reduce_replace2" }, query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 } }, inputDB: "db17", shardedOutputCollection: "tmp.mrs.coll17_1436464604_40", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll17_1436464604_40", timeMillis: 478, counts: { input: 1019, emit: 1019, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464604000|55, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll17_1436464604_40", timeMillis: 572, counts: { input: 981, emit: 981, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464604000|87, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1019, emit: 1019, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 981, emit: 981, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:223 locks:{ Global: { acquireCount: { r: 55, w: 50, W: 3 }, acquireWaitCount: { w: 4, W: 1 }, timeAcquiringMicros: { w: 28976, W: 1860 } }, Database: { acquireCount: { r: 1, w: 45, W: 6 }, acquireWaitCount: { w: 13, W: 2 }, timeAcquiringMicros: { w: 61333, W: 2316 } }, Collection: { acquireCount: { r: 1, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 177ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.809-0400 m31100| 2015-07-09T13:56:44.807-0400 I COMMAND [conn36] CMD: drop db17.tmp.mrs.coll17_1436464604_40 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.809-0400 m31200| 2015-07-09T13:56:44.807-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_127 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.816-0400 m31200| 2015-07-09T13:56:44.815-0400 I COMMAND [conn62] CMD: drop db17.tmp.mrs.coll17_1436464604_40 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.816-0400 m31201| 2015-07-09T13:56:44.816-0400 I COMMAND [repl writer worker 12] CMD: drop db17.tmp.mrs.coll17_1436464604_41 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.817-0400 m31202| 2015-07-09T13:56:44.816-0400 I COMMAND [repl writer worker 13] CMD: drop db17.map_reduce_replace2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.818-0400 m31102| 2015-07-09T13:56:44.817-0400 I COMMAND [repl writer worker 15] CMD: drop db17.tmp.mrs.coll17_1436464604_40 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.818-0400 m31101| 2015-07-09T13:56:44.818-0400 I COMMAND [repl writer worker 7] CMD: drop db17.tmp.mrs.coll17_1436464604_40 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.822-0400 m31201| 2015-07-09T13:56:44.821-0400 I COMMAND [repl writer worker 6] CMD: drop db17.map_reduce_replace2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.832-0400 m31200| 2015-07-09T13:56:44.832-0400 I COMMAND [conn38] CMD: drop db17.map_reduce_replace4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.847-0400 m31200| 2015-07-09T13:56:44.846-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_124 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.847-0400 m31200| 2015-07-09T13:56:44.846-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_124 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.848-0400 m31200| 2015-07-09T13:56:44.847-0400 I COMMAND [conn38] CMD: drop db17.tmp.mr.coll17_124 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.856-0400 m31201| 2015-07-09T13:56:44.855-0400 I COMMAND [repl writer worker 12] CMD: drop db17.map_reduce_replace4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.856-0400 m31202| 2015-07-09T13:56:44.856-0400 I COMMAND [repl writer worker 12] CMD: drop db17.map_reduce_replace4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.863-0400 m31201| 2015-07-09T13:56:44.862-0400 I COMMAND [repl writer worker 10] CMD: drop db17.tmp.mrs.coll17_1436464604_40 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.865-0400 m31200| 2015-07-09T13:56:44.864-0400 I COMMAND [conn38] command db17.map_reduce_replace4 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.865-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.865-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.865-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.865-0400 m31200| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.865-0400 m31200| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.867-0400 m31200| }, out: { replace: "map_reduce_replace4" }, query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 } }, inputDB: "db17", shardedOutputCollection: "tmp.mrs.coll17_1436464604_41", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll17_1436464604_41", timeMillis: 418, counts: { input: 1019, emit: 1019, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464604000|71, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll17_1436464604_41", timeMillis: 552, counts: { input: 981, emit: 981, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464604000|113, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1019, emit: 1019, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 981, emit: 981, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:223 locks:{ Global: { acquireCount: { r: 55, w: 50, W: 3 }, acquireWaitCount: { w: 4, W: 1 }, timeAcquiringMicros: { w: 36744, W: 18757 } }, Database: { acquireCount: { r: 1, w: 45, W: 6 }, acquireWaitCount: { w: 10, W: 4 }, timeAcquiringMicros: { w: 49169, W: 20209 } }, Collection: { acquireCount: { r: 1, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 197ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.868-0400 m31100| 2015-07-09T13:56:44.866-0400 I COMMAND [conn36] CMD: drop db17.tmp.mrs.coll17_1436464604_41 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.868-0400 m31202| 2015-07-09T13:56:44.867-0400 I COMMAND [repl writer worker 10] CMD: drop db17.tmp.mrs.coll17_1436464604_40 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.868-0400 m31200| 2015-07-09T13:56:44.868-0400 I COMMAND [conn62] CMD: drop db17.tmp.mrs.coll17_1436464604_41 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.870-0400 m31100| 2015-07-09T13:56:44.869-0400 I COMMAND [conn48] CMD: drop db17.tmp.mr.coll17_132 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.875-0400 m31200| 2015-07-09T13:56:44.875-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_128 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.888-0400 m30998| 2015-07-09T13:56:44.888-0400 I NETWORK [conn110] end connection 127.0.0.1:62955 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.968-0400 m31200| 2015-07-09T13:56:44.967-0400 I COMMAND [conn41] CMD: drop db17.map_reduce_replace3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.983-0400 m31100| 2015-07-09T13:56:44.983-0400 I COMMAND [conn60] CMD: drop db17.tmp.mrs.coll17_1436464604_43 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.984-0400 m31200| 2015-07-09T13:56:44.984-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_125 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.985-0400 m31200| 2015-07-09T13:56:44.984-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_125 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.986-0400 m31200| 2015-07-09T13:56:44.984-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_125 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.986-0400 m31200| 2015-07-09T13:56:44.984-0400 I COMMAND [conn41] command db17.map_reduce_replace3 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.987-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.987-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.987-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.987-0400 m31200| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.987-0400 m31200| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.989-0400 m31200| }, out: { replace: "map_reduce_replace3" }, query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 } }, inputDB: "db17", shardedOutputCollection: "tmp.mrs.coll17_1436464604_42", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll17_1436464604_42", timeMillis: 336, counts: { input: 1019, emit: 1019, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464604000|102, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll17_1436464604_42", timeMillis: 486, counts: { input: 981, emit: 981, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464604000|152, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1019, emit: 1019, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 981, emit: 981, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:223 locks:{ Global: { acquireCount: { r: 55, w: 50, W: 3 }, acquireWaitCount: { w: 4, W: 1 }, timeAcquiringMicros: { w: 65470, W: 17214 } }, Database: { acquireCount: { r: 1, w: 45, W: 6 }, acquireWaitCount: { w: 17, W: 2 }, timeAcquiringMicros: { w: 117699, W: 1583 } }, Collection: { acquireCount: { r: 1, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 278ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.989-0400 m31100| 2015-07-09T13:56:44.985-0400 I COMMAND [conn38] CMD: drop db17.tmp.mrs.coll17_1436464604_42 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.990-0400 m31202| 2015-07-09T13:56:44.990-0400 I COMMAND [repl writer worker 3] CMD: drop db17.map_reduce_replace3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.991-0400 m31100| 2015-07-09T13:56:44.990-0400 I COMMAND [conn60] CMD: drop db17.tmp.mr.coll17_131 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:44.991-0400 m31100| 2015-07-09T13:56:44.991-0400 I COMMAND [conn60] CMD: drop db17.tmp.mr.coll17_131 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.005-0400 m31200| 2015-07-09T13:56:45.004-0400 I COMMAND [conn63] CMD: drop db17.tmp.mrs.coll17_1436464604_42 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.011-0400 m31201| 2015-07-09T13:56:45.010-0400 I COMMAND [repl writer worker 5] CMD: drop db17.map_reduce_replace3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.019-0400 m31100| 2015-07-09T13:56:45.018-0400 I COMMAND [conn60] CMD: drop db17.tmp.mr.coll17_131 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.019-0400 m31202| 2015-07-09T13:56:45.019-0400 I COMMAND [repl writer worker 15] CMD: drop db17.tmp.mrs.coll17_1436464604_42 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.033-0400 m31201| 2015-07-09T13:56:45.032-0400 I COMMAND [repl writer worker 1] CMD: drop db17.tmp.mrs.coll17_1436464604_42 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.033-0400 m31100| 2015-07-09T13:56:45.033-0400 I COMMAND [conn60] command db17.tmp.mrs.coll17_1436464604_43 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.034-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.034-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.034-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.035-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464604_43", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 354 } }, Database: { acquireCount: { r: 25, w: 66, R: 12, W: 11 }, acquireWaitCount: { w: 5, R: 4, W: 4 }, timeAcquiringMicros: { w: 22556, R: 17448, W: 34038 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 245ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.035-0400 m31100| 2015-07-09T13:56:45.033-0400 I COMMAND [conn56] CMD: drop db17.tmp.mr.coll17_133 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.036-0400 m31200| 2015-07-09T13:56:45.034-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_129 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.037-0400 m31200| 2015-07-09T13:56:45.037-0400 I COMMAND [conn29] CMD: drop db17.map_reduce_replace0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.044-0400 m31200| 2015-07-09T13:56:45.044-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_126 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.045-0400 m31200| 2015-07-09T13:56:45.044-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_126 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.045-0400 m31200| 2015-07-09T13:56:45.044-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_126 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.046-0400 m31201| 2015-07-09T13:56:45.045-0400 I COMMAND [repl writer worker 2] CMD: drop db17.map_reduce_replace0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.046-0400 m31200| 2015-07-09T13:56:45.045-0400 I COMMAND [conn29] command db17.map_reduce_replace0 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.046-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.046-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.047-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.047-0400 m31200| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.047-0400 m31200| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.048-0400 m31200| }, out: { replace: "map_reduce_replace0" }, query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 } }, inputDB: "db17", shardedOutputCollection: "tmp.mrs.coll17_1436464604_42", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll17_1436464604_42", timeMillis: 283, counts: { input: 1019, emit: 1019, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464604000|114, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll17_1436464604_42", timeMillis: 458, counts: { input: 981, emit: 981, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464604000|184, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1019, emit: 1019, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 981, emit: 981, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:223 locks:{ Global: { acquireCount: { r: 55, w: 50, W: 3 }, acquireWaitCount: { w: 4, W: 1 }, timeAcquiringMicros: { w: 82639, W: 1262 } }, Database: { acquireCount: { r: 1, w: 45, W: 6 }, acquireWaitCount: { w: 19, W: 2 }, timeAcquiringMicros: { w: 120754, W: 3911 } }, Collection: { acquireCount: { r: 1, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 295ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.048-0400 m31100| 2015-07-09T13:56:45.047-0400 I COMMAND [conn36] CMD: drop db17.tmp.mrs.coll17_1436464604_42 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.048-0400 m31202| 2015-07-09T13:56:45.048-0400 I COMMAND [repl writer worker 14] CMD: drop db17.map_reduce_replace0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.059-0400 m31200| 2015-07-09T13:56:45.058-0400 I COMMAND [conn62] CMD: drop db17.tmp.mrs.coll17_1436464604_42 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.073-0400 m31102| 2015-07-09T13:56:45.073-0400 I COMMAND [repl writer worker 14] CMD: drop db17.tmp.mrs.coll17_1436464604_42 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.074-0400 m31101| 2015-07-09T13:56:45.073-0400 I COMMAND [repl writer worker 10] CMD: drop db17.tmp.mrs.coll17_1436464604_42 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.075-0400 m31100| 2015-07-09T13:56:45.075-0400 I COMMAND [conn33] CMD: drop db17.tmp.mr.coll17_134 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.080-0400 m31200| 2015-07-09T13:56:45.079-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_130 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.217-0400 m31100| 2015-07-09T13:56:45.216-0400 I COMMAND [conn48] CMD: drop db17.tmp.mrs.coll17_1436464604_43 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.226-0400 m31100| 2015-07-09T13:56:45.225-0400 I COMMAND [conn48] CMD: drop db17.tmp.mr.coll17_132 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.226-0400 m31100| 2015-07-09T13:56:45.225-0400 I COMMAND [conn48] CMD: drop db17.tmp.mr.coll17_132 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.234-0400 m31100| 2015-07-09T13:56:45.227-0400 I COMMAND [conn48] CMD: drop db17.tmp.mr.coll17_132 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.484-0400 m31101| 2015-07-09T13:56:45.233-0400 I COMMAND [repl writer worker 0] CMD: drop db17.tmp.mrs.coll17_1436464604_43 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.484-0400 m31102| 2015-07-09T13:56:45.233-0400 I COMMAND [repl writer worker 5] CMD: drop db17.tmp.mrs.coll17_1436464604_43 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.484-0400 m31100| 2015-07-09T13:56:45.240-0400 I COMMAND [conn48] command db17.tmp.mrs.coll17_1436464604_43 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.484-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.485-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.485-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.486-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464604_43", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 152, w: 75, W: 3 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 7553 } }, Database: { acquireCount: { r: 25, w: 67, R: 12, W: 11 }, acquireWaitCount: { r: 1, w: 19, R: 7, W: 3 }, timeAcquiringMicros: { r: 3184, w: 150464, R: 39323, W: 16934 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_query 374ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.486-0400 m31100| 2015-07-09T13:56:45.266-0400 I COMMAND [conn56] CMD: drop db17.tmp.mrs.coll17_1436464605_44 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.487-0400 m31100| 2015-07-09T13:56:45.279-0400 I COMMAND [conn56] CMD: drop db17.tmp.mr.coll17_133 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.487-0400 m31100| 2015-07-09T13:56:45.280-0400 I COMMAND [conn56] CMD: drop db17.tmp.mr.coll17_133 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.488-0400 m31100| 2015-07-09T13:56:45.282-0400 I COMMAND [conn56] CMD: drop db17.tmp.mr.coll17_133 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.488-0400 m31100| 2015-07-09T13:56:45.288-0400 I COMMAND [conn56] command db17.tmp.mrs.coll17_1436464605_44 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.488-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.488-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.488-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.489-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464605_44", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 3662 } }, Database: { acquireCount: { r: 25, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 1, w: 6, R: 13, W: 4 }, timeAcquiringMicros: { r: 3712, w: 33561, R: 33718, W: 7670 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 263ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.489-0400 m31200| 2015-07-09T13:56:45.294-0400 I COMMAND [conn37] CMD: drop db17.tmp.mrs.coll17_1436464604_43 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.489-0400 m31200| 2015-07-09T13:56:45.301-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_128 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.490-0400 m31200| 2015-07-09T13:56:45.301-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_128 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.490-0400 m31200| 2015-07-09T13:56:45.305-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_128 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.490-0400 m31200| 2015-07-09T13:56:45.306-0400 I COMMAND [conn39] CMD: drop db17.tmp.mrs.coll17_1436464604_43 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.490-0400 m31100| 2015-07-09T13:56:45.317-0400 I COMMAND [conn33] CMD: drop db17.tmp.mrs.coll17_1436464605_44 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.490-0400 m31200| 2015-07-09T13:56:45.324-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_127 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.490-0400 m31200| 2015-07-09T13:56:45.324-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_127 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.491-0400 m31200| 2015-07-09T13:56:45.326-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_127 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.491-0400 m31100| 2015-07-09T13:56:45.326-0400 I COMMAND [conn33] CMD: drop db17.tmp.mr.coll17_134 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.491-0400 m31202| 2015-07-09T13:56:45.326-0400 I COMMAND [repl writer worker 13] CMD: drop db17.tmp.mrs.coll17_1436464604_43 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.492-0400 m31100| 2015-07-09T13:56:45.326-0400 I COMMAND [conn33] CMD: drop db17.tmp.mr.coll17_134 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.492-0400 m31201| 2015-07-09T13:56:45.327-0400 I COMMAND [repl writer worker 10] CMD: drop db17.tmp.mrs.coll17_1436464604_43 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.492-0400 m31200| 2015-07-09T13:56:45.328-0400 I COMMAND [conn39] command db17.tmp.mrs.coll17_1436464604_43 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.492-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.492-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.493-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.493-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464604_43", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:212 locks:{ Global: { acquireCount: { r: 152, w: 75, W: 3 }, acquireWaitCount: { r: 4, w: 2, W: 1 }, timeAcquiringMicros: { r: 44340, w: 21017, W: 3240 } }, Database: { acquireCount: { r: 25, w: 67, R: 12, W: 11 }, acquireWaitCount: { r: 1, w: 23, R: 10, W: 8 }, timeAcquiringMicros: { r: 156, w: 203865, R: 41294, W: 14147 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_query 540ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.493-0400 m31200| 2015-07-09T13:56:45.328-0400 I COMMAND [conn37] command db17.tmp.mrs.coll17_1436464604_43 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.493-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.494-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.494-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.495-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464604_43", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 149, w: 74, W: 3 }, acquireWaitCount: { r: 3, w: 1, W: 1 }, timeAcquiringMicros: { r: 27716, w: 19976, W: 1304 } }, Database: { acquireCount: { r: 25, w: 66, R: 11, W: 11 }, acquireWaitCount: { w: 19, R: 8, W: 9 }, timeAcquiringMicros: { w: 145308, R: 33768, W: 32641 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 462ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.495-0400 m31200| 2015-07-09T13:56:45.329-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_131 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.495-0400 m31200| 2015-07-09T13:56:45.330-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_132 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.495-0400 m31101| 2015-07-09T13:56:45.331-0400 I COMMAND [repl writer worker 12] CMD: drop db17.tmp.mrs.coll17_1436464605_44 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.495-0400 m31102| 2015-07-09T13:56:45.332-0400 I COMMAND [repl writer worker 2] CMD: drop db17.tmp.mrs.coll17_1436464605_44 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.496-0400 m31100| 2015-07-09T13:56:45.333-0400 I COMMAND [conn33] CMD: drop db17.tmp.mr.coll17_134 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.496-0400 m31100| 2015-07-09T13:56:45.333-0400 I COMMAND [conn33] command db17.tmp.mrs.coll17_1436464605_44 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.496-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.496-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.497-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.498-0400 m31100| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464605_44", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 152, w: 75, W: 3 }, acquireWaitCount: { r: 1, w: 1 }, timeAcquiringMicros: { r: 7297, w: 6445 } }, Database: { acquireCount: { r: 25, w: 67, R: 12, W: 11 }, acquireWaitCount: { r: 1, w: 6, R: 12, W: 5 }, timeAcquiringMicros: { r: 1399, w: 19930, R: 20890, W: 21010 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_query 260ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.498-0400 m31200| 2015-07-09T13:56:45.386-0400 I COMMAND [conn29] CMD: drop db17.tmp.mrs.coll17_1436464605_44 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.498-0400 m31200| 2015-07-09T13:56:45.397-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_130 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.499-0400 m31200| 2015-07-09T13:56:45.398-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_130 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.499-0400 m31200| 2015-07-09T13:56:45.399-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_130 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.499-0400 m31200| 2015-07-09T13:56:45.400-0400 I COMMAND [conn41] CMD: drop db17.tmp.mrs.coll17_1436464605_44 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.499-0400 m31200| 2015-07-09T13:56:45.400-0400 I COMMAND [conn29] command db17.tmp.mrs.coll17_1436464605_44 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.499-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.499-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.500-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.500-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464605_44", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 149, w: 74, W: 3 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 29628, W: 1561 } }, Database: { acquireCount: { r: 25, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 5, w: 6, R: 11, W: 9 }, timeAcquiringMicros: { r: 16413, w: 34219, R: 37126, W: 35778 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 327ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.501-0400 m31200| 2015-07-09T13:56:45.401-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_133 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.501-0400 m31200| 2015-07-09T13:56:45.411-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_129 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.501-0400 m31200| 2015-07-09T13:56:45.411-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_129 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.501-0400 m31200| 2015-07-09T13:56:45.413-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_129 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.502-0400 m31200| 2015-07-09T13:56:45.414-0400 I COMMAND [conn41] command db17.tmp.mrs.coll17_1436464605_44 command: mapReduce { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.502-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.502-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.503-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.503-0400 m31200| values...., query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 }, out: "tmp.mrs.coll17_1436464605_44", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 150, w: 75, W: 3 }, acquireWaitCount: { r: 2, w: 2 }, timeAcquiringMicros: { r: 34972, w: 15868 } }, Database: { acquireCount: { r: 25, w: 67, R: 11, W: 11 }, acquireWaitCount: { r: 6, w: 12, R: 11, W: 7 }, timeAcquiringMicros: { r: 38657, w: 35546, R: 40302, W: 9202 } }, Collection: { acquireCount: { r: 25, w: 47 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_query 391ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.504-0400 m31200| 2015-07-09T13:56:45.415-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_134 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.504-0400 m31201| 2015-07-09T13:56:45.417-0400 I COMMAND [repl writer worker 9] CMD: drop db17.tmp.mrs.coll17_1436464605_44 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.504-0400 m31202| 2015-07-09T13:56:45.419-0400 I COMMAND [repl writer worker 0] CMD: drop db17.tmp.mrs.coll17_1436464605_44 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.504-0400 m31200| 2015-07-09T13:56:45.477-0400 I COMMAND [conn37] CMD: drop db17.map_reduce_replace2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.504-0400 m31200| 2015-07-09T13:56:45.482-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_131 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.504-0400 m31200| 2015-07-09T13:56:45.482-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_131 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.505-0400 m31200| 2015-07-09T13:56:45.483-0400 I COMMAND [conn37] CMD: drop db17.tmp.mr.coll17_131 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.505-0400 m31200| 2015-07-09T13:56:45.484-0400 I COMMAND [conn37] command db17.map_reduce_replace2 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.505-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.505-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.505-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.505-0400 m31200| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.505-0400 m31200| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.506-0400 m31200| }, out: { replace: "map_reduce_replace2" }, query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 } }, inputDB: "db17", shardedOutputCollection: "tmp.mrs.coll17_1436464604_43", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll17_1436464604_43", timeMillis: 359, counts: { input: 1019, emit: 1019, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464605000|24, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll17_1436464604_43", timeMillis: 435, counts: { input: 981, emit: 981, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464605000|50, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1019, emit: 1019, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 981, emit: 981, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:223 locks:{ Global: { acquireCount: { r: 55, w: 50, W: 3 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 22446, W: 1050 } }, Database: { acquireCount: { r: 1, w: 45, W: 6 }, acquireWaitCount: { w: 4, W: 4 }, timeAcquiringMicros: { w: 49352, W: 2655 } }, Collection: { acquireCount: { r: 1, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 155ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.506-0400 m31200| 2015-07-09T13:56:45.485-0400 I COMMAND [conn39] CMD: drop db17.map_reduce_replace1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.506-0400 m31100| 2015-07-09T13:56:45.485-0400 I COMMAND [conn36] CMD: drop db17.tmp.mrs.coll17_1436464604_43 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.507-0400 m31200| 2015-07-09T13:56:45.486-0400 I COMMAND [conn62] CMD: drop db17.tmp.mrs.coll17_1436464604_43 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.507-0400 m31101| 2015-07-09T13:56:45.486-0400 I COMMAND [repl writer worker 4] CMD: drop db17.tmp.mrs.coll17_1436464604_43 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.507-0400 m31201| 2015-07-09T13:56:45.488-0400 I COMMAND [repl writer worker 6] CMD: drop db17.map_reduce_replace2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.507-0400 m31102| 2015-07-09T13:56:45.489-0400 I COMMAND [repl writer worker 6] CMD: drop db17.tmp.mrs.coll17_1436464604_43 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.507-0400 m31202| 2015-07-09T13:56:45.489-0400 I COMMAND [repl writer worker 14] CMD: drop db17.map_reduce_replace2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.507-0400 m31200| 2015-07-09T13:56:45.494-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_132 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.507-0400 m31200| 2015-07-09T13:56:45.494-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_132 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.507-0400 m31201| 2015-07-09T13:56:45.498-0400 I COMMAND [repl writer worker 4] CMD: drop db17.map_reduce_replace1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.508-0400 m30998| 2015-07-09T13:56:45.504-0400 I NETWORK [conn111] end connection 127.0.0.1:62956 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.510-0400 m31200| 2015-07-09T13:56:45.510-0400 I COMMAND [conn39] CMD: drop db17.tmp.mr.coll17_132 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.511-0400 m31200| 2015-07-09T13:56:45.511-0400 I COMMAND [conn39] command db17.map_reduce_replace1 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.512-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.512-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.512-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.512-0400 m31200| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.512-0400 m31200| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.514-0400 m31200| }, out: { replace: "map_reduce_replace1" }, query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 } }, inputDB: "db17", shardedOutputCollection: "tmp.mrs.coll17_1436464604_43", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll17_1436464604_43", timeMillis: 203, counts: { input: 1019, emit: 1019, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464604000|139, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll17_1436464604_43", timeMillis: 536, counts: { input: 981, emit: 981, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464605000|54, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1019, emit: 1019, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 981, emit: 981, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:223 locks:{ Global: { acquireCount: { r: 55, w: 50, W: 3 }, acquireWaitCount: { w: 3, W: 1 }, timeAcquiringMicros: { w: 30725, W: 523 } }, Database: { acquireCount: { r: 1, w: 45, W: 6 }, acquireWaitCount: { w: 5, W: 4 }, timeAcquiringMicros: { w: 36016, W: 39811 } }, Collection: { acquireCount: { r: 1, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 180ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.514-0400 m31100| 2015-07-09T13:56:45.511-0400 I COMMAND [conn38] CMD: drop db17.tmp.mrs.coll17_1436464604_43 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.515-0400 m31201| 2015-07-09T13:56:45.512-0400 I COMMAND [repl writer worker 7] CMD: drop db17.tmp.mrs.coll17_1436464604_43 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.515-0400 m31200| 2015-07-09T13:56:45.513-0400 I COMMAND [conn63] CMD: drop db17.tmp.mrs.coll17_1436464604_43 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.515-0400 m31202| 2015-07-09T13:56:45.515-0400 I COMMAND [repl writer worker 3] CMD: drop db17.map_reduce_replace1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.519-0400 m30999| 2015-07-09T13:56:45.518-0400 I NETWORK [conn111] end connection 127.0.0.1:62954 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.525-0400 m31200| 2015-07-09T13:56:45.525-0400 I COMMAND [conn41] CMD: drop db17.map_reduce_replace3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.527-0400 m31202| 2015-07-09T13:56:45.527-0400 I COMMAND [repl writer worker 2] CMD: drop db17.tmp.mrs.coll17_1436464604_43 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.534-0400 m31200| 2015-07-09T13:56:45.533-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_134 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.534-0400 m31200| 2015-07-09T13:56:45.533-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_134 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.535-0400 m31201| 2015-07-09T13:56:45.534-0400 I COMMAND [repl writer worker 7] CMD: drop db17.map_reduce_replace3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.537-0400 m31202| 2015-07-09T13:56:45.536-0400 I COMMAND [repl writer worker 3] CMD: drop db17.map_reduce_replace3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.537-0400 m31200| 2015-07-09T13:56:45.537-0400 I COMMAND [conn41] CMD: drop db17.tmp.mr.coll17_134 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.537-0400 m31200| 2015-07-09T13:56:45.537-0400 I COMMAND [conn41] command db17.map_reduce_replace3 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.538-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.538-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.538-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.538-0400 m31200| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.538-0400 m31200| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.541-0400 m31200| }, out: { replace: "map_reduce_replace3" }, query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 } }, inputDB: "db17", shardedOutputCollection: "tmp.mrs.coll17_1436464605_44", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll17_1436464605_44", timeMillis: 255, counts: { input: 1019, emit: 1019, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464605000|45, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll17_1436464605_44", timeMillis: 388, counts: { input: 981, emit: 981, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464605000|115, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1019, emit: 1019, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 981, emit: 981, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:223 locks:{ Global: { acquireCount: { r: 55, w: 50, W: 3 }, acquireWaitCount: { w: 2 }, timeAcquiringMicros: { w: 14862 } }, Database: { acquireCount: { r: 1, w: 45, W: 6 }, acquireWaitCount: { w: 4, W: 2 }, timeAcquiringMicros: { w: 14195, W: 22631 } }, Collection: { acquireCount: { r: 1, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 121ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.542-0400 m31100| 2015-07-09T13:56:45.538-0400 I COMMAND [conn38] CMD: drop db17.tmp.mrs.coll17_1436464605_44 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.542-0400 m31200| 2015-07-09T13:56:45.539-0400 I COMMAND [conn29] CMD: drop db17.map_reduce_replace0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.543-0400 m31200| 2015-07-09T13:56:45.542-0400 I COMMAND [conn63] CMD: drop db17.tmp.mrs.coll17_1436464605_44 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.544-0400 m31101| 2015-07-09T13:56:45.544-0400 I COMMAND [repl writer worker 14] CMD: drop db17.tmp.mrs.coll17_1436464605_44 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.546-0400 m31102| 2015-07-09T13:56:45.545-0400 I COMMAND [repl writer worker 0] CMD: drop db17.tmp.mrs.coll17_1436464605_44 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.551-0400 m31200| 2015-07-09T13:56:45.550-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_133 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.551-0400 m31200| 2015-07-09T13:56:45.550-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_133 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.552-0400 m31202| 2015-07-09T13:56:45.552-0400 I COMMAND [repl writer worker 5] CMD: drop db17.map_reduce_replace0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.552-0400 m31201| 2015-07-09T13:56:45.552-0400 I COMMAND [repl writer worker 3] CMD: drop db17.map_reduce_replace0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.553-0400 m31200| 2015-07-09T13:56:45.553-0400 I COMMAND [conn29] CMD: drop db17.tmp.mr.coll17_133 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.554-0400 m31200| 2015-07-09T13:56:45.553-0400 I COMMAND [conn29] command db17.map_reduce_replace0 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll17", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.554-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.554-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.555-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.555-0400 m31200| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.555-0400 m31200| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.556-0400 m31200| }, out: { replace: "map_reduce_replace0" }, query: { key: { $exists: true }, value: { $exists: true } }, sort: { _id: -1.0 } }, inputDB: "db17", shardedOutputCollection: "tmp.mrs.coll17_1436464605_44", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll17_1436464605_44", timeMillis: 253, counts: { input: 1019, emit: 1019, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464605000|67, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll17_1436464605_44", timeMillis: 325, counts: { input: 981, emit: 981, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436464605000|108, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1019, emit: 1019, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 981, emit: 981, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:223 locks:{ Global: { acquireCount: { r: 55, w: 50, W: 3 }, acquireWaitCount: { w: 4 }, timeAcquiringMicros: { w: 31205 } }, Database: { acquireCount: { r: 1, w: 45, W: 6 }, acquireWaitCount: { w: 6, W: 3 }, timeAcquiringMicros: { w: 28378, W: 3702 } }, Collection: { acquireCount: { r: 1, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 152ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.556-0400 m31100| 2015-07-09T13:56:45.554-0400 I COMMAND [conn36] CMD: drop db17.tmp.mrs.coll17_1436464605_44 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.557-0400 m31200| 2015-07-09T13:56:45.554-0400 I COMMAND [conn62] CMD: drop db17.tmp.mrs.coll17_1436464605_44 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.558-0400 m30999| 2015-07-09T13:56:45.557-0400 I NETWORK [conn110] end connection 127.0.0.1:62953 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.558-0400 m31202| 2015-07-09T13:56:45.558-0400 I COMMAND [repl writer worker 4] CMD: drop db17.tmp.mrs.coll17_1436464605_44 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.560-0400 m30998| 2015-07-09T13:56:45.560-0400 I NETWORK [conn109] end connection 127.0.0.1:62952 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.562-0400 m31201| 2015-07-09T13:56:45.562-0400 I COMMAND [repl writer worker 0] CMD: drop db17.tmp.mrs.coll17_1436464605_44 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.583-0400 m30999| 2015-07-09T13:56:45.583-0400 I COMMAND [conn1] DROP: db17.map_reduce_replace0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.583-0400 m30999| 2015-07-09T13:56:45.583-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.583-0400 m31200| 2015-07-09T13:56:45.583-0400 I COMMAND [conn41] CMD: drop db17.map_reduce_replace0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.587-0400 m30999| 2015-07-09T13:56:45.586-0400 I COMMAND [conn1] DROP: db17.map_reduce_replace1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.587-0400 m30999| 2015-07-09T13:56:45.587-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.587-0400 m31200| 2015-07-09T13:56:45.587-0400 I COMMAND [conn41] CMD: drop db17.map_reduce_replace1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.587-0400 m31202| 2015-07-09T13:56:45.587-0400 I COMMAND [repl writer worker 10] CMD: drop db17.map_reduce_replace0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.588-0400 m31201| 2015-07-09T13:56:45.588-0400 I COMMAND [repl writer worker 15] CMD: drop db17.map_reduce_replace0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.588-0400 m30999| 2015-07-09T13:56:45.588-0400 I COMMAND [conn1] DROP: db17.map_reduce_replace2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.589-0400 m30999| 2015-07-09T13:56:45.588-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.589-0400 m31200| 2015-07-09T13:56:45.588-0400 I COMMAND [conn41] CMD: drop db17.map_reduce_replace2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.590-0400 m31202| 2015-07-09T13:56:45.589-0400 I COMMAND [repl writer worker 9] CMD: drop db17.map_reduce_replace1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.590-0400 m31201| 2015-07-09T13:56:45.590-0400 I COMMAND [repl writer worker 1] CMD: drop db17.map_reduce_replace1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.591-0400 m30999| 2015-07-09T13:56:45.590-0400 I COMMAND [conn1] DROP: db17.map_reduce_replace3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.591-0400 m30999| 2015-07-09T13:56:45.590-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.591-0400 m31200| 2015-07-09T13:56:45.590-0400 I COMMAND [conn41] CMD: drop db17.map_reduce_replace3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.591-0400 m31201| 2015-07-09T13:56:45.591-0400 I COMMAND [repl writer worker 6] CMD: drop db17.map_reduce_replace2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.591-0400 m30999| 2015-07-09T13:56:45.591-0400 I COMMAND [conn1] DROP: db17.map_reduce_replace4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.592-0400 m30999| 2015-07-09T13:56:45.591-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.592-0400 m31200| 2015-07-09T13:56:45.591-0400 I COMMAND [conn41] CMD: drop db17.map_reduce_replace4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.592-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.592-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.593-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.593-0400 jstests/concurrency/fsm_workloads/map_reduce_replace.js: Workload completed in 9910 ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.593-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.593-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.593-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.593-0400 m31202| 2015-07-09T13:56:45.592-0400 I COMMAND [repl writer worker 13] CMD: drop db17.map_reduce_replace2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.593-0400 m30999| 2015-07-09T13:56:45.592-0400 I COMMAND [conn1] DROP: db17.coll17 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.594-0400 m30999| 2015-07-09T13:56:45.593-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:45.593-0400-559eb5ddca4787b9985d1c3b", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464605593), what: "dropCollection.start", ns: "db17.coll17", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.594-0400 m31202| 2015-07-09T13:56:45.593-0400 I COMMAND [repl writer worker 14] CMD: drop db17.map_reduce_replace3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.594-0400 m31201| 2015-07-09T13:56:45.593-0400 I COMMAND [repl writer worker 13] CMD: drop db17.map_reduce_replace3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.595-0400 m31201| 2015-07-09T13:56:45.595-0400 I COMMAND [repl writer worker 11] CMD: drop db17.map_reduce_replace4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.595-0400 m31202| 2015-07-09T13:56:45.595-0400 I COMMAND [repl writer worker 8] CMD: drop db17.map_reduce_replace4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.649-0400 m30999| 2015-07-09T13:56:45.648-0400 I SHARDING [conn1] distributed lock 'db17.coll17/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5ddca4787b9985d1c3c [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.649-0400 m31100| 2015-07-09T13:56:45.649-0400 I COMMAND [conn38] CMD: drop db17.coll17 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.652-0400 m31200| 2015-07-09T13:56:45.652-0400 I COMMAND [conn63] CMD: drop db17.coll17 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.653-0400 m31102| 2015-07-09T13:56:45.653-0400 I COMMAND [repl writer worker 8] CMD: drop db17.coll17 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.653-0400 m31101| 2015-07-09T13:56:45.653-0400 I COMMAND [repl writer worker 13] CMD: drop db17.coll17 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.655-0400 m31202| 2015-07-09T13:56:45.655-0400 I COMMAND [repl writer worker 1] CMD: drop db17.coll17 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.656-0400 m31201| 2015-07-09T13:56:45.655-0400 I COMMAND [repl writer worker 4] CMD: drop db17.coll17 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.708-0400 m31100| 2015-07-09T13:56:45.708-0400 I SHARDING [conn38] remotely refreshing metadata for db17.coll17 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559eb5d2ca4787b9985d1c39, current metadata version is 2|3||559eb5d2ca4787b9985d1c39 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.709-0400 m31100| 2015-07-09T13:56:45.709-0400 W SHARDING [conn38] no chunks found when reloading db17.coll17, previous version was 0|0||559eb5d2ca4787b9985d1c39, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.710-0400 m31100| 2015-07-09T13:56:45.709-0400 I SHARDING [conn38] dropping metadata for db17.coll17 at shard version 2|3||559eb5d2ca4787b9985d1c39, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.711-0400 m31200| 2015-07-09T13:56:45.710-0400 I SHARDING [conn63] remotely refreshing metadata for db17.coll17 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559eb5d2ca4787b9985d1c39, current metadata version is 2|5||559eb5d2ca4787b9985d1c39 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.712-0400 m31200| 2015-07-09T13:56:45.712-0400 W SHARDING [conn63] no chunks found when reloading db17.coll17, previous version was 0|0||559eb5d2ca4787b9985d1c39, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.712-0400 m31200| 2015-07-09T13:56:45.712-0400 I SHARDING [conn63] dropping metadata for db17.coll17 at shard version 2|5||559eb5d2ca4787b9985d1c39, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.713-0400 m30999| 2015-07-09T13:56:45.713-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:45.713-0400-559eb5ddca4787b9985d1c3d", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464605713), what: "dropCollection", ns: "db17.coll17", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.768-0400 m30999| 2015-07-09T13:56:45.767-0400 I SHARDING [conn1] distributed lock 'db17.coll17/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.823-0400 m30999| 2015-07-09T13:56:45.822-0400 I COMMAND [conn1] DROP DATABASE: db17 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.823-0400 m30999| 2015-07-09T13:56:45.822-0400 I SHARDING [conn1] DBConfig::dropDatabase: db17 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.823-0400 m30999| 2015-07-09T13:56:45.822-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:45.822-0400-559eb5ddca4787b9985d1c3e", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464605822), what: "dropDatabase.start", ns: "db17", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.929-0400 m30999| 2015-07-09T13:56:45.928-0400 I SHARDING [conn1] DBConfig::dropDatabase: db17 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.929-0400 m31200| 2015-07-09T13:56:45.929-0400 I COMMAND [conn66] dropDatabase db17 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.929-0400 m31200| 2015-07-09T13:56:45.929-0400 I COMMAND [conn66] dropDatabase db17 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.930-0400 m30999| 2015-07-09T13:56:45.929-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:45.929-0400-559eb5ddca4787b9985d1c3f", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464605929), what: "dropDatabase", ns: "db17", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.930-0400 m31202| 2015-07-09T13:56:45.930-0400 I COMMAND [repl writer worker 6] dropDatabase db17 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.930-0400 m31202| 2015-07-09T13:56:45.930-0400 I COMMAND [repl writer worker 6] dropDatabase db17 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.930-0400 m31201| 2015-07-09T13:56:45.930-0400 I COMMAND [repl writer worker 5] dropDatabase db17 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:45.930-0400 m31201| 2015-07-09T13:56:45.930-0400 I COMMAND [repl writer worker 5] dropDatabase db17 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.018-0400 m31100| 2015-07-09T13:56:46.017-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.021-0400 m31101| 2015-07-09T13:56:46.021-0400 I COMMAND [repl writer worker 9] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.021-0400 m31102| 2015-07-09T13:56:46.021-0400 I COMMAND [repl writer worker 13] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.057-0400 m31200| 2015-07-09T13:56:46.056-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.059-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.059-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.059-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.060-0400 jstests/concurrency/fsm_workloads/touch_base.js [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.060-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.060-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.060-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.060-0400 m31201| 2015-07-09T13:56:46.060-0400 I COMMAND [repl writer worker 7] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.060-0400 m31202| 2015-07-09T13:56:46.060-0400 I COMMAND [repl writer worker 3] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.066-0400 m30999| 2015-07-09T13:56:46.066-0400 I SHARDING [conn1] distributed lock 'db18/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5deca4787b9985d1c40 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.070-0400 m30999| 2015-07-09T13:56:46.070-0400 I SHARDING [conn1] Placing [db18] on: test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.070-0400 m30999| 2015-07-09T13:56:46.070-0400 I SHARDING [conn1] Enabling sharding for database [db18] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.127-0400 m30999| 2015-07-09T13:56:46.126-0400 I SHARDING [conn1] distributed lock 'db18/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.148-0400 m31200| 2015-07-09T13:56:46.147-0400 I INDEX [conn51] build index on: db18.coll18 properties: { v: 1, key: { tid: 1.0 }, name: "tid_1", ns: "db18.coll18" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.148-0400 m31200| 2015-07-09T13:56:46.147-0400 I INDEX [conn51] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.154-0400 m31200| 2015-07-09T13:56:46.154-0400 I INDEX [conn51] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.155-0400 m30999| 2015-07-09T13:56:46.154-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db18.coll18", key: { tid: 1.0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.164-0400 m30999| 2015-07-09T13:56:46.164-0400 I SHARDING [conn1] distributed lock 'db18.coll18/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5deca4787b9985d1c41 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.165-0400 m30999| 2015-07-09T13:56:46.164-0400 I SHARDING [conn1] enable sharding on: db18.coll18 with shard key: { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.165-0400 m30999| 2015-07-09T13:56:46.165-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:46.164-0400-559eb5deca4787b9985d1c42", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464606165), what: "shardCollection.start", ns: "db18.coll18", details: { shardKey: { tid: 1.0 }, collection: "db18.coll18", primary: "test-rs1:test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", initShards: [], numChunks: 1 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.168-0400 m31202| 2015-07-09T13:56:46.167-0400 I INDEX [repl writer worker 11] build index on: db18.coll18 properties: { v: 1, key: { tid: 1.0 }, name: "tid_1", ns: "db18.coll18" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.168-0400 m31202| 2015-07-09T13:56:46.167-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.169-0400 m31201| 2015-07-09T13:56:46.168-0400 I INDEX [repl writer worker 14] build index on: db18.coll18 properties: { v: 1, key: { tid: 1.0 }, name: "tid_1", ns: "db18.coll18" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.169-0400 m31201| 2015-07-09T13:56:46.168-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.173-0400 m31201| 2015-07-09T13:56:46.173-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.174-0400 m31202| 2015-07-09T13:56:46.173-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.219-0400 m30999| 2015-07-09T13:56:46.219-0400 I SHARDING [conn1] going to create 1 chunk(s) for: db18.coll18 using new epoch 559eb5deca4787b9985d1c43 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.272-0400 m30999| 2015-07-09T13:56:46.272-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db18.coll18: 0ms sequenceNumber: 88 version: 1|0||559eb5deca4787b9985d1c43 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.328-0400 m30999| 2015-07-09T13:56:46.327-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db18.coll18: 0ms sequenceNumber: 89 version: 1|0||559eb5deca4787b9985d1c43 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.329-0400 m31200| 2015-07-09T13:56:46.329-0400 I SHARDING [conn41] remotely refreshing metadata for db18.coll18 with requested shard version 1|0||559eb5deca4787b9985d1c43, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.331-0400 m31200| 2015-07-09T13:56:46.330-0400 I SHARDING [conn41] collection db18.coll18 was previously unsharded, new metadata loaded with shard version 1|0||559eb5deca4787b9985d1c43 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.331-0400 m31200| 2015-07-09T13:56:46.330-0400 I SHARDING [conn41] collection version was loaded at version 1|0||559eb5deca4787b9985d1c43, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.331-0400 m30999| 2015-07-09T13:56:46.331-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:46.331-0400-559eb5deca4787b9985d1c44", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464606331), what: "shardCollection", ns: "db18.coll18", details: { version: "1|0||559eb5deca4787b9985d1c43" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.385-0400 m30999| 2015-07-09T13:56:46.385-0400 I SHARDING [conn1] distributed lock 'db18.coll18/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.391-0400 m31200| 2015-07-09T13:56:46.391-0400 I INDEX [conn41] build index on: db18.coll18 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db18.coll18" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.392-0400 m31200| 2015-07-09T13:56:46.391-0400 I INDEX [conn41] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.400-0400 m31200| 2015-07-09T13:56:46.399-0400 I INDEX [conn41] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.408-0400 m31100| 2015-07-09T13:56:46.408-0400 I INDEX [conn56] build index on: db18.coll18 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db18.coll18" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.409-0400 m31100| 2015-07-09T13:56:46.408-0400 I INDEX [conn56] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.410-0400 m31201| 2015-07-09T13:56:46.410-0400 I INDEX [repl writer worker 10] build index on: db18.coll18 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db18.coll18" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.410-0400 m31201| 2015-07-09T13:56:46.410-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.414-0400 m31202| 2015-07-09T13:56:46.413-0400 I INDEX [repl writer worker 2] build index on: db18.coll18 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db18.coll18" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.414-0400 m31202| 2015-07-09T13:56:46.413-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.418-0400 m31100| 2015-07-09T13:56:46.418-0400 I INDEX [conn56] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.419-0400 Using 10 threads (requested 10) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.501-0400 m30999| 2015-07-09T13:56:46.500-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62968 #112 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.506-0400 m30998| 2015-07-09T13:56:46.504-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62969 #112 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.514-0400 m31201| 2015-07-09T13:56:46.513-0400 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.514-0400 m30998| 2015-07-09T13:56:46.514-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62970 #113 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.524-0400 m30999| 2015-07-09T13:56:46.523-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62971 #113 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.531-0400 m30998| 2015-07-09T13:56:46.529-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62972 #114 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.538-0400 m31102| 2015-07-09T13:56:46.535-0400 I INDEX [repl writer worker 14] build index on: db18.coll18 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db18.coll18" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.538-0400 m31102| 2015-07-09T13:56:46.535-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.540-0400 m30998| 2015-07-09T13:56:46.540-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62973 #115 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.547-0400 m31202| 2015-07-09T13:56:46.546-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.547-0400 m30999| 2015-07-09T13:56:46.547-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62975 #114 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.548-0400 m30998| 2015-07-09T13:56:46.548-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62974 #116 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.549-0400 m30999| 2015-07-09T13:56:46.548-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62976 #115 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.552-0400 m30999| 2015-07-09T13:56:46.552-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62977 #116 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.553-0400 m31102| 2015-07-09T13:56:46.553-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.555-0400 m31101| 2015-07-09T13:56:46.555-0400 I INDEX [repl writer worker 1] build index on: db18.coll18 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db18.coll18" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.555-0400 m31101| 2015-07-09T13:56:46.555-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.562-0400 setting random seed: 3706699986942 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.563-0400 setting random seed: 8542270981706 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.563-0400 m31101| 2015-07-09T13:56:46.563-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.563-0400 setting random seed: 5663373316638 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.567-0400 setting random seed: 5947394901886 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.568-0400 setting random seed: 2267341078259 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.569-0400 setting random seed: 4315377059392 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.569-0400 setting random seed: 8506709542125 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.569-0400 setting random seed: 7201520041562 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.570-0400 setting random seed: 9948456394486 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.575-0400 m30998| 2015-07-09T13:56:46.571-0400 I SHARDING [conn113] ChunkManager: time to load chunks for db18.coll18: 0ms sequenceNumber: 20 version: 1|0||559eb5deca4787b9985d1c43 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.575-0400 setting random seed: 3957616668194 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.663-0400 m31200| 2015-07-09T13:56:46.662-0400 I SHARDING [conn62] request split points lookup for chunk db18.coll18 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.666-0400 m31200| 2015-07-09T13:56:46.664-0400 W SHARDING [conn62] possible low cardinality key detected in db18.coll18 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.666-0400 m31200| 2015-07-09T13:56:46.664-0400 W SHARDING [conn62] possible low cardinality key detected in db18.coll18 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.667-0400 m31200| 2015-07-09T13:56:46.664-0400 W SHARDING [conn62] possible low cardinality key detected in db18.coll18 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.667-0400 m31200| 2015-07-09T13:56:46.664-0400 W SHARDING [conn62] possible low cardinality key detected in db18.coll18 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.667-0400 m31200| 2015-07-09T13:56:46.664-0400 W SHARDING [conn62] possible low cardinality key detected in db18.coll18 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.667-0400 m31200| 2015-07-09T13:56:46.664-0400 W SHARDING [conn62] possible low cardinality key detected in db18.coll18 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.667-0400 m31200| 2015-07-09T13:56:46.664-0400 W SHARDING [conn62] possible low cardinality key detected in db18.coll18 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.668-0400 m31200| 2015-07-09T13:56:46.664-0400 W SHARDING [conn62] possible low cardinality key detected in db18.coll18 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.669-0400 m31200| 2015-07-09T13:56:46.664-0400 W SHARDING [conn62] possible low cardinality key detected in db18.coll18 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.670-0400 m31200| 2015-07-09T13:56:46.664-0400 W SHARDING [conn62] possible low cardinality key detected in db18.coll18 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.670-0400 m31200| 2015-07-09T13:56:46.664-0400 I SHARDING [conn62] received splitChunk request: { splitChunk: "db18.coll18", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5deca4787b9985d1c43') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.671-0400 m31200| 2015-07-09T13:56:46.665-0400 I SHARDING [conn34] request split points lookup for chunk db18.coll18 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.671-0400 m31200| 2015-07-09T13:56:46.668-0400 I SHARDING [conn65] request split points lookup for chunk db18.coll18 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.671-0400 m31200| 2015-07-09T13:56:46.668-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db18.coll18", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5deca4787b9985d1c43') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.671-0400 m31200| 2015-07-09T13:56:46.669-0400 I SHARDING [conn62] distributed lock 'db18.coll18/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb5ded5a107a5b9c0dad8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.672-0400 m31200| 2015-07-09T13:56:46.669-0400 I SHARDING [conn62] remotely refreshing metadata for db18.coll18 based on current shard version 1|0||559eb5deca4787b9985d1c43, current metadata version is 1|0||559eb5deca4787b9985d1c43 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.672-0400 m31200| 2015-07-09T13:56:46.670-0400 W SHARDING [conn34] could not acquire collection lock for db18.coll18 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db18.coll18 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.673-0400 m30998| 2015-07-09T13:56:46.670-0400 W SHARDING [conn116] splitChunk failed - cmd: { splitChunk: "db18.coll18", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5deca4787b9985d1c43') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db18.coll18 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.677-0400 m31200| 2015-07-09T13:56:46.677-0400 I SHARDING [conn62] metadata of collection db18.coll18 already up to date (shard version : 1|0||559eb5deca4787b9985d1c43, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.677-0400 m31200| 2015-07-09T13:56:46.677-0400 I SHARDING [conn62] splitChunk accepted at version 1|0||559eb5deca4787b9985d1c43 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.680-0400 m31200| 2015-07-09T13:56:46.678-0400 I SHARDING [conn65] received splitChunk request: { splitChunk: "db18.coll18", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5deca4787b9985d1c43') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.680-0400 m31200| 2015-07-09T13:56:46.680-0400 I SHARDING [conn48] request split points lookup for chunk db18.coll18 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.682-0400 m31200| 2015-07-09T13:56:46.681-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.682-0400 m31200| 2015-07-09T13:56:46.681-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.683-0400 m31200| 2015-07-09T13:56:46.681-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.683-0400 m31200| 2015-07-09T13:56:46.681-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.683-0400 m31200| 2015-07-09T13:56:46.681-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.684-0400 m31200| 2015-07-09T13:56:46.682-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.685-0400 m31200| 2015-07-09T13:56:46.682-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.687-0400 m31200| 2015-07-09T13:56:46.682-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.688-0400 m31200| 2015-07-09T13:56:46.682-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.689-0400 m31200| 2015-07-09T13:56:46.682-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.690-0400 m31200| 2015-07-09T13:56:46.683-0400 I SHARDING [conn48] received splitChunk request: { splitChunk: "db18.coll18", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5deca4787b9985d1c43') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.690-0400 m31200| 2015-07-09T13:56:46.684-0400 I SHARDING [conn63] request split points lookup for chunk db18.coll18 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.691-0400 m31200| 2015-07-09T13:56:46.685-0400 I SHARDING [conn63] received splitChunk request: { splitChunk: "db18.coll18", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5deca4787b9985d1c43') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.691-0400 m29000| 2015-07-09T13:56:46.688-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62978 #44 (44 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.696-0400 m31200| 2015-07-09T13:56:46.696-0400 I COMMAND [conn68] command db18.$cmd command: insert { insert: "coll18", documents: 100, ordered: false, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 1000|0, ObjectId('559eb5deca4787b9985d1c43') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 107, w: 107 } }, Database: { acquireCount: { w: 107 } }, Collection: { acquireCount: { w: 7 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 6413 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 124ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.697-0400 m31200| 2015-07-09T13:56:46.696-0400 I SHARDING [conn34] request split points lookup for chunk db18.coll18 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.698-0400 m31200| 2015-07-09T13:56:46.698-0400 W SHARDING [conn34] possible low cardinality key detected in db18.coll18 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.699-0400 m31200| 2015-07-09T13:56:46.698-0400 W SHARDING [conn34] possible low cardinality key detected in db18.coll18 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.699-0400 m31200| 2015-07-09T13:56:46.698-0400 W SHARDING [conn34] possible low cardinality key detected in db18.coll18 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.699-0400 m31200| 2015-07-09T13:56:46.698-0400 W SHARDING [conn34] possible low cardinality key detected in db18.coll18 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.700-0400 m31200| 2015-07-09T13:56:46.698-0400 W SHARDING [conn34] possible low cardinality key detected in db18.coll18 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.700-0400 m31200| 2015-07-09T13:56:46.698-0400 W SHARDING [conn34] possible low cardinality key detected in db18.coll18 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.700-0400 m31200| 2015-07-09T13:56:46.698-0400 W SHARDING [conn34] possible low cardinality key detected in db18.coll18 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.701-0400 m31200| 2015-07-09T13:56:46.698-0400 W SHARDING [conn34] possible low cardinality key detected in db18.coll18 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.701-0400 m31200| 2015-07-09T13:56:46.698-0400 W SHARDING [conn34] possible low cardinality key detected in db18.coll18 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.701-0400 m31200| 2015-07-09T13:56:46.698-0400 W SHARDING [conn34] possible low cardinality key detected in db18.coll18 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.702-0400 m31200| 2015-07-09T13:56:46.701-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db18.coll18", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5deca4787b9985d1c43') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.704-0400 m29000| 2015-07-09T13:56:46.703-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62979 #45 (45 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.711-0400 m31200| 2015-07-09T13:56:46.709-0400 W SHARDING [conn48] could not acquire collection lock for db18.coll18 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db18.coll18 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.711-0400 m29000| 2015-07-09T13:56:46.709-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62980 #46 (46 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.712-0400 m30999| 2015-07-09T13:56:46.710-0400 W SHARDING [conn112] splitChunk failed - cmd: { splitChunk: "db18.coll18", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5deca4787b9985d1c43') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db18.coll18 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.712-0400 m31200| 2015-07-09T13:56:46.710-0400 W SHARDING [conn34] could not acquire collection lock for db18.coll18 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db18.coll18 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.712-0400 m30998| 2015-07-09T13:56:46.710-0400 W SHARDING [conn112] splitChunk failed - cmd: { splitChunk: "db18.coll18", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5deca4787b9985d1c43') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db18.coll18 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.716-0400 m31200| 2015-07-09T13:56:46.715-0400 W SHARDING [conn63] could not acquire collection lock for db18.coll18 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db18.coll18 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.716-0400 m30999| 2015-07-09T13:56:46.715-0400 W SHARDING [conn113] splitChunk failed - cmd: { splitChunk: "db18.coll18", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5deca4787b9985d1c43') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db18.coll18 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.718-0400 m29000| 2015-07-09T13:56:46.717-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:62981 #47 (47 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.719-0400 m31200| 2015-07-09T13:56:46.719-0400 W SHARDING [conn65] could not acquire collection lock for db18.coll18 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db18.coll18 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.719-0400 m30998| 2015-07-09T13:56:46.719-0400 W SHARDING [conn114] splitChunk failed - cmd: { splitChunk: "db18.coll18", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5deca4787b9985d1c43') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db18.coll18 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.724-0400 m30998| 2015-07-09T13:56:46.723-0400 I SHARDING [conn114] ChunkManager: time to load chunks for db18.coll18: 0ms sequenceNumber: 21 version: 1|10||559eb5deca4787b9985d1c43 based on: 1|0||559eb5deca4787b9985d1c43 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.726-0400 m31200| 2015-07-09T13:56:46.725-0400 I SHARDING [conn62] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:46.725-0400-559eb5ded5a107a5b9c0dad9", server: "bs-osx108-8", clientAddr: "127.0.0.1:62861", time: new Date(1436464606725), what: "multi-split", ns: "db18.coll18", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 1, of: 10, chunk: { min: { tid: MinKey }, max: { tid: 0.0 }, lastmod: Timestamp 1000|1, lastmodEpoch: ObjectId('559eb5deca4787b9985d1c43') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.744-0400 m30998| 2015-07-09T13:56:46.744-0400 I NETWORK [conn112] end connection 127.0.0.1:62969 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.749-0400 m31200| 2015-07-09T13:56:46.745-0400 I COMMAND [conn59] command db18.$cmd command: insert { insert: "coll18", documents: 100, ordered: false, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 1000|0, ObjectId('559eb5deca4787b9985d1c43') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 108, w: 108 } }, Database: { acquireCount: { w: 108 } }, Collection: { acquireCount: { w: 8 }, acquireWaitCount: { w: 2 }, timeAcquiringMicros: { w: 13428 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 151ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.749-0400 m31200| 2015-07-09T13:56:46.746-0400 I SHARDING [conn63] request split points lookup for chunk db18.coll18 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.749-0400 m31200| 2015-07-09T13:56:46.748-0400 W SHARDING [conn63] possible low cardinality key detected in db18.coll18 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.750-0400 m31200| 2015-07-09T13:56:46.748-0400 W SHARDING [conn63] possible low cardinality key detected in db18.coll18 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.750-0400 m31200| 2015-07-09T13:56:46.748-0400 W SHARDING [conn63] possible low cardinality key detected in db18.coll18 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.750-0400 m31200| 2015-07-09T13:56:46.748-0400 W SHARDING [conn63] possible low cardinality key detected in db18.coll18 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.750-0400 m31200| 2015-07-09T13:56:46.748-0400 W SHARDING [conn63] possible low cardinality key detected in db18.coll18 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.750-0400 m31200| 2015-07-09T13:56:46.748-0400 W SHARDING [conn63] possible low cardinality key detected in db18.coll18 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.750-0400 m31200| 2015-07-09T13:56:46.748-0400 W SHARDING [conn63] possible low cardinality key detected in db18.coll18 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.751-0400 m31200| 2015-07-09T13:56:46.748-0400 W SHARDING [conn63] possible low cardinality key detected in db18.coll18 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.751-0400 m31200| 2015-07-09T13:56:46.748-0400 W SHARDING [conn63] possible low cardinality key detected in db18.coll18 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.751-0400 m31200| 2015-07-09T13:56:46.748-0400 W SHARDING [conn63] possible low cardinality key detected in db18.coll18 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.751-0400 m31200| 2015-07-09T13:56:46.748-0400 I SHARDING [conn63] received splitChunk request: { splitChunk: "db18.coll18", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5deca4787b9985d1c43') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.751-0400 m31200| 2015-07-09T13:56:46.750-0400 W SHARDING [conn63] could not acquire collection lock for db18.coll18 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db18.coll18 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.752-0400 m30999| 2015-07-09T13:56:46.750-0400 W SHARDING [conn115] splitChunk failed - cmd: { splitChunk: "db18.coll18", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5deca4787b9985d1c43') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db18.coll18 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.778-0400 m31200| 2015-07-09T13:56:46.777-0400 I SHARDING [conn62] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:46.777-0400-559eb5ded5a107a5b9c0dada", server: "bs-osx108-8", clientAddr: "127.0.0.1:62861", time: new Date(1436464606777), what: "multi-split", ns: "db18.coll18", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 2, of: 10, chunk: { min: { tid: 0.0 }, max: { tid: 2.0 }, lastmod: Timestamp 1000|2, lastmodEpoch: ObjectId('559eb5deca4787b9985d1c43') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.807-0400 m31200| 2015-07-09T13:56:46.807-0400 I COMMAND [conn68] command db18.$cmd command: insert { insert: "coll18", documents: 100, ordered: false, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 1000|0, ObjectId('559eb5deca4787b9985d1c43') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 105, w: 105 } }, Database: { acquireCount: { w: 105 } }, Collection: { acquireCount: { w: 5 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 513 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 102ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.813-0400 m31200| 2015-07-09T13:56:46.813-0400 I COMMAND [conn22] command db18.$cmd command: insert { insert: "coll18", documents: 100, ordered: false, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 1000|0, ObjectId('559eb5deca4787b9985d1c43') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 106, w: 106 } }, Database: { acquireCount: { w: 106 } }, Collection: { acquireCount: { w: 6 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 11507 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 111ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.814-0400 m31200| 2015-07-09T13:56:46.813-0400 I SHARDING [conn63] request split points lookup for chunk db18.coll18 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.816-0400 m31200| 2015-07-09T13:56:46.815-0400 I COMMAND [conn25] command db18.$cmd command: insert { insert: "coll18", documents: 100, ordered: false, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 1000|0, ObjectId('559eb5deca4787b9985d1c43') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 107, w: 107 } }, Database: { acquireCount: { w: 107 } }, Collection: { acquireCount: { w: 7 }, acquireWaitCount: { w: 2 }, timeAcquiringMicros: { w: 15658 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 142ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.816-0400 m31200| 2015-07-09T13:56:46.816-0400 W SHARDING [conn63] possible low cardinality key detected in db18.coll18 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.818-0400 m31200| 2015-07-09T13:56:46.816-0400 W SHARDING [conn63] possible low cardinality key detected in db18.coll18 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.819-0400 m31200| 2015-07-09T13:56:46.816-0400 W SHARDING [conn63] possible low cardinality key detected in db18.coll18 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.819-0400 m31200| 2015-07-09T13:56:46.816-0400 W SHARDING [conn63] possible low cardinality key detected in db18.coll18 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.819-0400 m31200| 2015-07-09T13:56:46.816-0400 W SHARDING [conn63] possible low cardinality key detected in db18.coll18 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.819-0400 m31200| 2015-07-09T13:56:46.816-0400 W SHARDING [conn63] possible low cardinality key detected in db18.coll18 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.819-0400 m31200| 2015-07-09T13:56:46.816-0400 W SHARDING [conn63] possible low cardinality key detected in db18.coll18 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.819-0400 m31200| 2015-07-09T13:56:46.816-0400 W SHARDING [conn63] possible low cardinality key detected in db18.coll18 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.819-0400 m31200| 2015-07-09T13:56:46.816-0400 W SHARDING [conn63] possible low cardinality key detected in db18.coll18 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.820-0400 m31200| 2015-07-09T13:56:46.816-0400 W SHARDING [conn63] possible low cardinality key detected in db18.coll18 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.820-0400 m30998| 2015-07-09T13:56:46.816-0400 I NETWORK [conn116] end connection 127.0.0.1:62974 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.820-0400 m31200| 2015-07-09T13:56:46.817-0400 I SHARDING [conn63] received splitChunk request: { splitChunk: "db18.coll18", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5deca4787b9985d1c43') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.820-0400 m31200| 2015-07-09T13:56:46.818-0400 W SHARDING [conn63] could not acquire collection lock for db18.coll18 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db18.coll18 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.821-0400 m30999| 2015-07-09T13:56:46.818-0400 W SHARDING [conn116] splitChunk failed - cmd: { splitChunk: "db18.coll18", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5deca4787b9985d1c43') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db18.coll18 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.825-0400 m31200| 2015-07-09T13:56:46.823-0400 I SHARDING [conn63] request split points lookup for chunk db18.coll18 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.828-0400 m31200| 2015-07-09T13:56:46.826-0400 W SHARDING [conn63] possible low cardinality key detected in db18.coll18 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.828-0400 m31200| 2015-07-09T13:56:46.827-0400 W SHARDING [conn63] possible low cardinality key detected in db18.coll18 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.828-0400 m31200| 2015-07-09T13:56:46.827-0400 W SHARDING [conn63] possible low cardinality key detected in db18.coll18 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.829-0400 m31200| 2015-07-09T13:56:46.827-0400 W SHARDING [conn63] possible low cardinality key detected in db18.coll18 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.829-0400 m31200| 2015-07-09T13:56:46.827-0400 W SHARDING [conn63] possible low cardinality key detected in db18.coll18 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.829-0400 m31200| 2015-07-09T13:56:46.827-0400 W SHARDING [conn63] possible low cardinality key detected in db18.coll18 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.829-0400 m31200| 2015-07-09T13:56:46.827-0400 W SHARDING [conn63] possible low cardinality key detected in db18.coll18 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.829-0400 m31200| 2015-07-09T13:56:46.827-0400 W SHARDING [conn63] possible low cardinality key detected in db18.coll18 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.829-0400 m31200| 2015-07-09T13:56:46.827-0400 W SHARDING [conn63] possible low cardinality key detected in db18.coll18 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.829-0400 m31200| 2015-07-09T13:56:46.827-0400 W SHARDING [conn63] possible low cardinality key detected in db18.coll18 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.830-0400 m31200| 2015-07-09T13:56:46.827-0400 I SHARDING [conn63] received splitChunk request: { splitChunk: "db18.coll18", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5deca4787b9985d1c43') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.830-0400 m31200| 2015-07-09T13:56:46.828-0400 I SHARDING [conn62] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:46.828-0400-559eb5ded5a107a5b9c0dadb", server: "bs-osx108-8", clientAddr: "127.0.0.1:62861", time: new Date(1436464606828), what: "multi-split", ns: "db18.coll18", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 3, of: 10, chunk: { min: { tid: 2.0 }, max: { tid: 3.0 }, lastmod: Timestamp 1000|3, lastmodEpoch: ObjectId('559eb5deca4787b9985d1c43') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.830-0400 m31200| 2015-07-09T13:56:46.828-0400 W SHARDING [conn63] could not acquire collection lock for db18.coll18 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db18.coll18 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.831-0400 m30999| 2015-07-09T13:56:46.829-0400 W SHARDING [conn113] splitChunk failed - cmd: { splitChunk: "db18.coll18", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5deca4787b9985d1c43') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db18.coll18 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.840-0400 m31200| 2015-07-09T13:56:46.838-0400 I COMMAND [conn51] command db18.$cmd command: insert { insert: "coll18", documents: 100, ordered: false, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 1000|0, ObjectId('559eb5deca4787b9985d1c43') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 106, w: 106 } }, Database: { acquireCount: { w: 106 } }, Collection: { acquireCount: { w: 6 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 7078 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 120ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.840-0400 m31200| 2015-07-09T13:56:46.839-0400 I SHARDING [conn63] request split points lookup for chunk db18.coll18 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.841-0400 m31200| 2015-07-09T13:56:46.840-0400 I COMMAND [conn72] command db18.$cmd command: insert { insert: "coll18", documents: 100, ordered: false, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 1000|0, ObjectId('559eb5deca4787b9985d1c43') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 106, w: 106 } }, Database: { acquireCount: { w: 106 } }, Collection: { acquireCount: { w: 6 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 105ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.841-0400 m31200| 2015-07-09T13:56:46.840-0400 I SHARDING [conn48] request split points lookup for chunk db18.coll18 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.843-0400 m31200| 2015-07-09T13:56:46.842-0400 W SHARDING [conn63] possible low cardinality key detected in db18.coll18 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.844-0400 m31200| 2015-07-09T13:56:46.842-0400 W SHARDING [conn63] possible low cardinality key detected in db18.coll18 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.844-0400 m31200| 2015-07-09T13:56:46.842-0400 W SHARDING [conn63] possible low cardinality key detected in db18.coll18 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.844-0400 m31200| 2015-07-09T13:56:46.842-0400 W SHARDING [conn63] possible low cardinality key detected in db18.coll18 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.844-0400 m31200| 2015-07-09T13:56:46.842-0400 W SHARDING [conn63] possible low cardinality key detected in db18.coll18 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.844-0400 m31200| 2015-07-09T13:56:46.842-0400 W SHARDING [conn63] possible low cardinality key detected in db18.coll18 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.844-0400 m31200| 2015-07-09T13:56:46.842-0400 W SHARDING [conn63] possible low cardinality key detected in db18.coll18 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.845-0400 m31200| 2015-07-09T13:56:46.842-0400 W SHARDING [conn63] possible low cardinality key detected in db18.coll18 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.845-0400 m31200| 2015-07-09T13:56:46.842-0400 W SHARDING [conn63] possible low cardinality key detected in db18.coll18 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.845-0400 m31200| 2015-07-09T13:56:46.842-0400 W SHARDING [conn63] possible low cardinality key detected in db18.coll18 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.845-0400 m31200| 2015-07-09T13:56:46.843-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.845-0400 m31200| 2015-07-09T13:56:46.843-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.845-0400 m31200| 2015-07-09T13:56:46.843-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.845-0400 m31200| 2015-07-09T13:56:46.843-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.846-0400 m31200| 2015-07-09T13:56:46.843-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.846-0400 m31200| 2015-07-09T13:56:46.843-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.846-0400 m31200| 2015-07-09T13:56:46.843-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.847-0400 m31200| 2015-07-09T13:56:46.843-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.848-0400 m31200| 2015-07-09T13:56:46.843-0400 I SHARDING [conn63] received splitChunk request: { splitChunk: "db18.coll18", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5deca4787b9985d1c43') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.848-0400 m31200| 2015-07-09T13:56:46.844-0400 I SHARDING [conn48] received splitChunk request: { splitChunk: "db18.coll18", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5deca4787b9985d1c43') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.849-0400 m30999| 2015-07-09T13:56:46.845-0400 W SHARDING [conn114] splitChunk failed - cmd: { splitChunk: "db18.coll18", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5deca4787b9985d1c43') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db18.coll18 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.849-0400 m31200| 2015-07-09T13:56:46.844-0400 W SHARDING [conn63] could not acquire collection lock for db18.coll18 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db18.coll18 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.849-0400 m31200| 2015-07-09T13:56:46.846-0400 W SHARDING [conn48] could not acquire collection lock for db18.coll18 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db18.coll18 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.850-0400 m30999| 2015-07-09T13:56:46.846-0400 W SHARDING [conn112] splitChunk failed - cmd: { splitChunk: "db18.coll18", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5deca4787b9985d1c43') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db18.coll18 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.858-0400 m31200| 2015-07-09T13:56:46.858-0400 I SHARDING [conn48] request split points lookup for chunk db18.coll18 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.862-0400 m31200| 2015-07-09T13:56:46.861-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.862-0400 m31200| 2015-07-09T13:56:46.861-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.862-0400 m31200| 2015-07-09T13:56:46.861-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.862-0400 m31200| 2015-07-09T13:56:46.861-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.862-0400 m31200| 2015-07-09T13:56:46.861-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.863-0400 m31200| 2015-07-09T13:56:46.861-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.863-0400 m31200| 2015-07-09T13:56:46.861-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.863-0400 m31200| 2015-07-09T13:56:46.861-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.863-0400 m31200| 2015-07-09T13:56:46.862-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.863-0400 m31200| 2015-07-09T13:56:46.862-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.863-0400 m31200| 2015-07-09T13:56:46.862-0400 I SHARDING [conn48] received splitChunk request: { splitChunk: "db18.coll18", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5deca4787b9985d1c43') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.864-0400 m31200| 2015-07-09T13:56:46.864-0400 W SHARDING [conn48] could not acquire collection lock for db18.coll18 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db18.coll18 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.864-0400 m30999| 2015-07-09T13:56:46.864-0400 W SHARDING [conn115] splitChunk failed - cmd: { splitChunk: "db18.coll18", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5deca4787b9985d1c43') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db18.coll18 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.880-0400 m31200| 2015-07-09T13:56:46.880-0400 I SHARDING [conn62] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:46.880-0400-559eb5ded5a107a5b9c0dadc", server: "bs-osx108-8", clientAddr: "127.0.0.1:62861", time: new Date(1436464606880), what: "multi-split", ns: "db18.coll18", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 4, of: 10, chunk: { min: { tid: 3.0 }, max: { tid: 4.0 }, lastmod: Timestamp 1000|4, lastmodEpoch: ObjectId('559eb5deca4787b9985d1c43') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.904-0400 m31200| 2015-07-09T13:56:46.902-0400 I SHARDING [conn48] request split points lookup for chunk db18.coll18 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.907-0400 m31200| 2015-07-09T13:56:46.907-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.907-0400 m31200| 2015-07-09T13:56:46.907-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.907-0400 m31200| 2015-07-09T13:56:46.907-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.908-0400 m31200| 2015-07-09T13:56:46.907-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.908-0400 m31200| 2015-07-09T13:56:46.907-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.908-0400 m31200| 2015-07-09T13:56:46.907-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.908-0400 m31200| 2015-07-09T13:56:46.907-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.908-0400 m31200| 2015-07-09T13:56:46.907-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.908-0400 m31200| 2015-07-09T13:56:46.907-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.908-0400 m31200| 2015-07-09T13:56:46.907-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.909-0400 m31200| 2015-07-09T13:56:46.908-0400 I SHARDING [conn48] received splitChunk request: { splitChunk: "db18.coll18", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5deca4787b9985d1c43') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.909-0400 m31200| 2015-07-09T13:56:46.909-0400 W SHARDING [conn48] could not acquire collection lock for db18.coll18 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db18.coll18 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.910-0400 m30999| 2015-07-09T13:56:46.909-0400 W SHARDING [conn116] splitChunk failed - cmd: { splitChunk: "db18.coll18", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5deca4787b9985d1c43') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db18.coll18 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.933-0400 m31200| 2015-07-09T13:56:46.932-0400 I SHARDING [conn62] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:46.932-0400-559eb5ded5a107a5b9c0dadd", server: "bs-osx108-8", clientAddr: "127.0.0.1:62861", time: new Date(1436464606932), what: "multi-split", ns: "db18.coll18", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 5, of: 10, chunk: { min: { tid: 4.0 }, max: { tid: 5.0 }, lastmod: Timestamp 1000|5, lastmodEpoch: ObjectId('559eb5deca4787b9985d1c43') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.934-0400 m30998| 2015-07-09T13:56:46.934-0400 I NETWORK [conn114] end connection 127.0.0.1:62972 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.936-0400 m31200| 2015-07-09T13:56:46.936-0400 I SHARDING [conn48] request split points lookup for chunk db18.coll18 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.941-0400 m31200| 2015-07-09T13:56:46.936-0400 I SHARDING [conn63] request split points lookup for chunk db18.coll18 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.941-0400 m31200| 2015-07-09T13:56:46.940-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.942-0400 m31200| 2015-07-09T13:56:46.940-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.942-0400 m31200| 2015-07-09T13:56:46.940-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.942-0400 m31200| 2015-07-09T13:56:46.940-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.942-0400 m31200| 2015-07-09T13:56:46.940-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.942-0400 m31200| 2015-07-09T13:56:46.940-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.942-0400 m31200| 2015-07-09T13:56:46.940-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.942-0400 m31200| 2015-07-09T13:56:46.941-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.942-0400 m31200| 2015-07-09T13:56:46.941-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.943-0400 m31200| 2015-07-09T13:56:46.941-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.943-0400 m31200| 2015-07-09T13:56:46.941-0400 W SHARDING [conn63] possible low cardinality key detected in db18.coll18 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.943-0400 m31200| 2015-07-09T13:56:46.941-0400 W SHARDING [conn63] possible low cardinality key detected in db18.coll18 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.943-0400 m31200| 2015-07-09T13:56:46.941-0400 W SHARDING [conn63] possible low cardinality key detected in db18.coll18 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.943-0400 m31200| 2015-07-09T13:56:46.941-0400 W SHARDING [conn63] possible low cardinality key detected in db18.coll18 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.944-0400 m31200| 2015-07-09T13:56:46.941-0400 W SHARDING [conn63] possible low cardinality key detected in db18.coll18 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.944-0400 m30999| 2015-07-09T13:56:46.943-0400 W SHARDING [conn112] splitChunk failed - cmd: { splitChunk: "db18.coll18", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5deca4787b9985d1c43') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db18.coll18 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.945-0400 m31200| 2015-07-09T13:56:46.941-0400 W SHARDING [conn63] possible low cardinality key detected in db18.coll18 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.945-0400 m31200| 2015-07-09T13:56:46.941-0400 W SHARDING [conn63] possible low cardinality key detected in db18.coll18 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.946-0400 m31200| 2015-07-09T13:56:46.941-0400 W SHARDING [conn63] possible low cardinality key detected in db18.coll18 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.946-0400 m31200| 2015-07-09T13:56:46.941-0400 I SHARDING [conn48] received splitChunk request: { splitChunk: "db18.coll18", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5deca4787b9985d1c43') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.963-0400 m31200| 2015-07-09T13:56:46.943-0400 W SHARDING [conn48] could not acquire collection lock for db18.coll18 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db18.coll18 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.964-0400 m31200| 2015-07-09T13:56:46.943-0400 I SHARDING [conn63] received splitChunk request: { splitChunk: "db18.coll18", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5deca4787b9985d1c43') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.965-0400 m31200| 2015-07-09T13:56:46.944-0400 I SHARDING [conn48] request split points lookup for chunk db18.coll18 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.965-0400 m31200| 2015-07-09T13:56:46.945-0400 W SHARDING [conn63] could not acquire collection lock for db18.coll18 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db18.coll18 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.966-0400 m30999| 2015-07-09T13:56:46.945-0400 W SHARDING [conn113] splitChunk failed - cmd: { splitChunk: "db18.coll18", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5deca4787b9985d1c43') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db18.coll18 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.966-0400 m31200| 2015-07-09T13:56:46.948-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.966-0400 m31200| 2015-07-09T13:56:46.948-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.966-0400 m31200| 2015-07-09T13:56:46.948-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.966-0400 m31200| 2015-07-09T13:56:46.948-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.967-0400 m31200| 2015-07-09T13:56:46.948-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.967-0400 m31200| 2015-07-09T13:56:46.948-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.967-0400 m31200| 2015-07-09T13:56:46.948-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.967-0400 m31200| 2015-07-09T13:56:46.948-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.968-0400 m31200| 2015-07-09T13:56:46.948-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.968-0400 m31200| 2015-07-09T13:56:46.948-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.968-0400 m30999| 2015-07-09T13:56:46.950-0400 I NETWORK [conn113] end connection 127.0.0.1:62971 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.969-0400 m31200| 2015-07-09T13:56:46.950-0400 I SHARDING [conn48] received splitChunk request: { splitChunk: "db18.coll18", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5deca4787b9985d1c43') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.969-0400 m31200| 2015-07-09T13:56:46.952-0400 W SHARDING [conn48] could not acquire collection lock for db18.coll18 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db18.coll18 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.970-0400 m30999| 2015-07-09T13:56:46.952-0400 W SHARDING [conn114] splitChunk failed - cmd: { splitChunk: "db18.coll18", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5deca4787b9985d1c43') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db18.coll18 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.970-0400 m30999| 2015-07-09T13:56:46.956-0400 I NETWORK [conn114] end connection 127.0.0.1:62975 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.972-0400 m31200| 2015-07-09T13:56:46.970-0400 I SHARDING [conn48] request split points lookup for chunk db18.coll18 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.979-0400 m31200| 2015-07-09T13:56:46.976-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.979-0400 m31200| 2015-07-09T13:56:46.976-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.979-0400 m31200| 2015-07-09T13:56:46.976-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.979-0400 m31200| 2015-07-09T13:56:46.976-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.979-0400 m31200| 2015-07-09T13:56:46.976-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.979-0400 m31200| 2015-07-09T13:56:46.976-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.980-0400 m31200| 2015-07-09T13:56:46.976-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.980-0400 m31200| 2015-07-09T13:56:46.976-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.980-0400 m31200| 2015-07-09T13:56:46.976-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.980-0400 m31200| 2015-07-09T13:56:46.977-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.981-0400 m31200| 2015-07-09T13:56:46.978-0400 I SHARDING [conn48] received splitChunk request: { splitChunk: "db18.coll18", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5deca4787b9985d1c43') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.981-0400 m30999| 2015-07-09T13:56:46.978-0400 I NETWORK [conn112] end connection 127.0.0.1:62968 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.981-0400 m31200| 2015-07-09T13:56:46.979-0400 W SHARDING [conn48] could not acquire collection lock for db18.coll18 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db18.coll18 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.981-0400 m30999| 2015-07-09T13:56:46.980-0400 W SHARDING [conn115] splitChunk failed - cmd: { splitChunk: "db18.coll18", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5deca4787b9985d1c43') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db18.coll18 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:46.991-0400 m31200| 2015-07-09T13:56:46.984-0400 I SHARDING [conn62] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:46.984-0400-559eb5ded5a107a5b9c0dade", server: "bs-osx108-8", clientAddr: "127.0.0.1:62861", time: new Date(1436464606984), what: "multi-split", ns: "db18.coll18", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 6, of: 10, chunk: { min: { tid: 5.0 }, max: { tid: 6.0 }, lastmod: Timestamp 1000|6, lastmodEpoch: ObjectId('559eb5deca4787b9985d1c43') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.019-0400 m31200| 2015-07-09T13:56:47.019-0400 I SHARDING [conn48] request split points lookup for chunk db18.coll18 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.024-0400 m31200| 2015-07-09T13:56:47.024-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.024-0400 m31200| 2015-07-09T13:56:47.024-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.025-0400 m31200| 2015-07-09T13:56:47.024-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.025-0400 m31200| 2015-07-09T13:56:47.024-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.025-0400 m31200| 2015-07-09T13:56:47.024-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.025-0400 m31200| 2015-07-09T13:56:47.024-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.025-0400 m31200| 2015-07-09T13:56:47.024-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.025-0400 m31200| 2015-07-09T13:56:47.024-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.025-0400 m31200| 2015-07-09T13:56:47.024-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.026-0400 m31200| 2015-07-09T13:56:47.024-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.026-0400 m31200| 2015-07-09T13:56:47.025-0400 I SHARDING [conn48] received splitChunk request: { splitChunk: "db18.coll18", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5deca4787b9985d1c43') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.027-0400 m31200| 2015-07-09T13:56:47.027-0400 W SHARDING [conn48] could not acquire collection lock for db18.coll18 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db18.coll18 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.028-0400 m30999| 2015-07-09T13:56:47.027-0400 W SHARDING [conn116] splitChunk failed - cmd: { splitChunk: "db18.coll18", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5deca4787b9985d1c43') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db18.coll18 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.036-0400 m31200| 2015-07-09T13:56:47.035-0400 I SHARDING [conn62] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:47.035-0400-559eb5dfd5a107a5b9c0dadf", server: "bs-osx108-8", clientAddr: "127.0.0.1:62861", time: new Date(1436464607035), what: "multi-split", ns: "db18.coll18", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 7, of: 10, chunk: { min: { tid: 6.0 }, max: { tid: 7.0 }, lastmod: Timestamp 1000|7, lastmodEpoch: ObjectId('559eb5deca4787b9985d1c43') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.041-0400 m31200| 2015-07-09T13:56:47.040-0400 I SHARDING [conn48] request split points lookup for chunk db18.coll18 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.047-0400 m31200| 2015-07-09T13:56:47.047-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.047-0400 m31200| 2015-07-09T13:56:47.047-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.047-0400 m31200| 2015-07-09T13:56:47.047-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.047-0400 m31200| 2015-07-09T13:56:47.047-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.048-0400 m31200| 2015-07-09T13:56:47.047-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.048-0400 m31200| 2015-07-09T13:56:47.047-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.048-0400 m31200| 2015-07-09T13:56:47.047-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.048-0400 m31200| 2015-07-09T13:56:47.047-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.048-0400 m31200| 2015-07-09T13:56:47.047-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.048-0400 m31200| 2015-07-09T13:56:47.047-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.050-0400 m31200| 2015-07-09T13:56:47.049-0400 I SHARDING [conn48] received splitChunk request: { splitChunk: "db18.coll18", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5deca4787b9985d1c43') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.051-0400 m31200| 2015-07-09T13:56:47.050-0400 W SHARDING [conn48] could not acquire collection lock for db18.coll18 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db18.coll18 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.052-0400 m30999| 2015-07-09T13:56:47.051-0400 W SHARDING [conn115] splitChunk failed - cmd: { splitChunk: "db18.coll18", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5deca4787b9985d1c43') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db18.coll18 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.062-0400 m30999| 2015-07-09T13:56:47.062-0400 I NETWORK [conn115] end connection 127.0.0.1:62976 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.080-0400 m30998| 2015-07-09T13:56:47.077-0400 I NETWORK [conn113] end connection 127.0.0.1:62970 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.080-0400 m31200| 2015-07-09T13:56:47.079-0400 I SHARDING [conn48] request split points lookup for chunk db18.coll18 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.085-0400 m31200| 2015-07-09T13:56:47.085-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.086-0400 m31200| 2015-07-09T13:56:47.085-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.086-0400 m31200| 2015-07-09T13:56:47.085-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.086-0400 m31200| 2015-07-09T13:56:47.085-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.087-0400 m31200| 2015-07-09T13:56:47.085-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.087-0400 m31200| 2015-07-09T13:56:47.085-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.087-0400 m31200| 2015-07-09T13:56:47.085-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.087-0400 m31200| 2015-07-09T13:56:47.085-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.088-0400 m31200| 2015-07-09T13:56:47.085-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.088-0400 m31200| 2015-07-09T13:56:47.085-0400 W SHARDING [conn48] possible low cardinality key detected in db18.coll18 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.088-0400 m31200| 2015-07-09T13:56:47.087-0400 I SHARDING [conn62] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:47.087-0400-559eb5dfd5a107a5b9c0dae0", server: "bs-osx108-8", clientAddr: "127.0.0.1:62861", time: new Date(1436464607087), what: "multi-split", ns: "db18.coll18", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 8, of: 10, chunk: { min: { tid: 7.0 }, max: { tid: 8.0 }, lastmod: Timestamp 1000|8, lastmodEpoch: ObjectId('559eb5deca4787b9985d1c43') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.089-0400 m31200| 2015-07-09T13:56:47.087-0400 I SHARDING [conn48] received splitChunk request: { splitChunk: "db18.coll18", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5deca4787b9985d1c43') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.089-0400 m31200| 2015-07-09T13:56:47.089-0400 W SHARDING [conn48] could not acquire collection lock for db18.coll18 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db18.coll18 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.089-0400 m30999| 2015-07-09T13:56:47.089-0400 W SHARDING [conn116] splitChunk failed - cmd: { splitChunk: "db18.coll18", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5deca4787b9985d1c43') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db18.coll18 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.094-0400 m30999| 2015-07-09T13:56:47.093-0400 I NETWORK [conn116] end connection 127.0.0.1:62977 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.139-0400 m31200| 2015-07-09T13:56:47.139-0400 I SHARDING [conn62] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:47.139-0400-559eb5dfd5a107a5b9c0dae1", server: "bs-osx108-8", clientAddr: "127.0.0.1:62861", time: new Date(1436464607139), what: "multi-split", ns: "db18.coll18", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 9, of: 10, chunk: { min: { tid: 8.0 }, max: { tid: 9.0 }, lastmod: Timestamp 1000|9, lastmodEpoch: ObjectId('559eb5deca4787b9985d1c43') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.193-0400 m31200| 2015-07-09T13:56:47.192-0400 I SHARDING [conn62] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:47.192-0400-559eb5dfd5a107a5b9c0dae2", server: "bs-osx108-8", clientAddr: "127.0.0.1:62861", time: new Date(1436464607192), what: "multi-split", ns: "db18.coll18", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 10, of: 10, chunk: { min: { tid: 9.0 }, max: { tid: MaxKey }, lastmod: Timestamp 1000|10, lastmodEpoch: ObjectId('559eb5deca4787b9985d1c43') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.248-0400 m31200| 2015-07-09T13:56:47.248-0400 I SHARDING [conn62] distributed lock 'db18.coll18/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.249-0400 m31200| 2015-07-09T13:56:47.248-0400 I COMMAND [conn62] command db18.coll18 command: splitChunk { splitChunk: "db18.coll18", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5deca4787b9985d1c43') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 4, w: 2 } }, Database: { acquireCount: { r: 1, w: 2 } }, Collection: { acquireCount: { r: 1, W: 2 }, acquireWaitCount: { W: 2 }, timeAcquiringMicros: { W: 23788 } } } protocol:op_command 583ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.250-0400 m30998| 2015-07-09T13:56:47.250-0400 I SHARDING [conn115] autosplitted db18.coll18 shard: ns: db18.coll18, shard: test-rs1, lastmod: 1|0||000000000000000000000000, min: { tid: MinKey }, max: { tid: MaxKey } into 10 (splitThreshold 921) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.343-0400 m30998| 2015-07-09T13:56:47.342-0400 I NETWORK [conn115] end connection 127.0.0.1:62973 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.361-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.361-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.361-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.361-0400 jstests/concurrency/fsm_workloads/touch_base.js: Workload completed in 942 ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.362-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.362-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.362-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.362-0400 m30999| 2015-07-09T13:56:47.362-0400 I COMMAND [conn1] DROP: db18.coll18 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.362-0400 m30999| 2015-07-09T13:56:47.362-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:47.362-0400-559eb5dfca4787b9985d1c45", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464607362), what: "dropCollection.start", ns: "db18.coll18", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.420-0400 m30999| 2015-07-09T13:56:47.420-0400 I SHARDING [conn1] distributed lock 'db18.coll18/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5dfca4787b9985d1c46 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.421-0400 m31100| 2015-07-09T13:56:47.421-0400 I COMMAND [conn38] CMD: drop db18.coll18 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.423-0400 m31200| 2015-07-09T13:56:47.422-0400 I COMMAND [conn48] CMD: drop db18.coll18 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.425-0400 m31102| 2015-07-09T13:56:47.424-0400 I COMMAND [repl writer worker 4] CMD: drop db18.coll18 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.425-0400 m31101| 2015-07-09T13:56:47.424-0400 I COMMAND [repl writer worker 7] CMD: drop db18.coll18 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.426-0400 m31202| 2015-07-09T13:56:47.426-0400 I COMMAND [repl writer worker 14] CMD: drop db18.coll18 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.427-0400 m31201| 2015-07-09T13:56:47.426-0400 I COMMAND [repl writer worker 1] CMD: drop db18.coll18 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.479-0400 m31200| 2015-07-09T13:56:47.478-0400 I SHARDING [conn48] remotely refreshing metadata for db18.coll18 with requested shard version 0|0||000000000000000000000000, current shard version is 1|10||559eb5deca4787b9985d1c43, current metadata version is 1|10||559eb5deca4787b9985d1c43 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.480-0400 m31200| 2015-07-09T13:56:47.480-0400 W SHARDING [conn48] no chunks found when reloading db18.coll18, previous version was 0|0||559eb5deca4787b9985d1c43, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.480-0400 m31200| 2015-07-09T13:56:47.480-0400 I SHARDING [conn48] dropping metadata for db18.coll18 at shard version 1|10||559eb5deca4787b9985d1c43, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.481-0400 m30999| 2015-07-09T13:56:47.481-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:47.481-0400-559eb5dfca4787b9985d1c47", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464607481), what: "dropCollection", ns: "db18.coll18", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.535-0400 m30999| 2015-07-09T13:56:47.535-0400 I SHARDING [conn1] distributed lock 'db18.coll18/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.591-0400 m30999| 2015-07-09T13:56:47.591-0400 I COMMAND [conn1] DROP DATABASE: db18 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.591-0400 m30999| 2015-07-09T13:56:47.591-0400 I SHARDING [conn1] DBConfig::dropDatabase: db18 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.591-0400 m30999| 2015-07-09T13:56:47.591-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:47.591-0400-559eb5dfca4787b9985d1c48", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464607591), what: "dropDatabase.start", ns: "db18", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.698-0400 m30999| 2015-07-09T13:56:47.697-0400 I SHARDING [conn1] DBConfig::dropDatabase: db18 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.698-0400 m31200| 2015-07-09T13:56:47.698-0400 I COMMAND [conn66] dropDatabase db18 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.698-0400 m31200| 2015-07-09T13:56:47.698-0400 I COMMAND [conn66] dropDatabase db18 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.699-0400 m30999| 2015-07-09T13:56:47.698-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:47.698-0400-559eb5dfca4787b9985d1c49", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464607698), what: "dropDatabase", ns: "db18", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.699-0400 m31201| 2015-07-09T13:56:47.699-0400 I COMMAND [repl writer worker 11] dropDatabase db18 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.699-0400 m31201| 2015-07-09T13:56:47.699-0400 I COMMAND [repl writer worker 11] dropDatabase db18 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.699-0400 m31202| 2015-07-09T13:56:47.699-0400 I COMMAND [repl writer worker 3] dropDatabase db18 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.700-0400 m31202| 2015-07-09T13:56:47.699-0400 I COMMAND [repl writer worker 3] dropDatabase db18 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.785-0400 m31100| 2015-07-09T13:56:47.784-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.786-0400 m31101| 2015-07-09T13:56:47.786-0400 I COMMAND [repl writer worker 10] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.788-0400 m31102| 2015-07-09T13:56:47.788-0400 I COMMAND [repl writer worker 5] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.817-0400 m31200| 2015-07-09T13:56:47.816-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.819-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.819-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.820-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.820-0400 jstests/concurrency/fsm_workloads/findAndModify_upsert_collscan.js [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.820-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.820-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.820-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.820-0400 m31202| 2015-07-09T13:56:47.820-0400 I COMMAND [repl writer worker 1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.821-0400 m31201| 2015-07-09T13:56:47.820-0400 I COMMAND [repl writer worker 9] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.827-0400 m30999| 2015-07-09T13:56:47.826-0400 I SHARDING [conn1] distributed lock 'db19/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5dfca4787b9985d1c4a [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.831-0400 m30999| 2015-07-09T13:56:47.830-0400 I SHARDING [conn1] Placing [db19] on: test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.831-0400 m30999| 2015-07-09T13:56:47.830-0400 I SHARDING [conn1] Enabling sharding for database [db19] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.885-0400 m30999| 2015-07-09T13:56:47.884-0400 I SHARDING [conn1] distributed lock 'db19/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.906-0400 m31200| 2015-07-09T13:56:47.905-0400 I INDEX [conn71] build index on: db19.coll19 properties: { v: 1, key: { tid: 1.0 }, name: "tid_1", ns: "db19.coll19" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.906-0400 m31200| 2015-07-09T13:56:47.905-0400 I INDEX [conn71] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.914-0400 m31200| 2015-07-09T13:56:47.913-0400 I INDEX [conn71] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.915-0400 m30999| 2015-07-09T13:56:47.915-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db19.coll19", key: { tid: 1.0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.919-0400 m30999| 2015-07-09T13:56:47.919-0400 I SHARDING [conn1] distributed lock 'db19.coll19/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5dfca4787b9985d1c4b [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.920-0400 m30999| 2015-07-09T13:56:47.920-0400 I SHARDING [conn1] enable sharding on: db19.coll19 with shard key: { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.920-0400 m30999| 2015-07-09T13:56:47.920-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:47.920-0400-559eb5dfca4787b9985d1c4c", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464607920), what: "shardCollection.start", ns: "db19.coll19", details: { shardKey: { tid: 1.0 }, collection: "db19.coll19", primary: "test-rs1:test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", initShards: [], numChunks: 1 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.930-0400 m31202| 2015-07-09T13:56:47.929-0400 I INDEX [repl writer worker 2] build index on: db19.coll19 properties: { v: 1, key: { tid: 1.0 }, name: "tid_1", ns: "db19.coll19" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.930-0400 m31202| 2015-07-09T13:56:47.929-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.931-0400 m31201| 2015-07-09T13:56:47.931-0400 I INDEX [repl writer worker 8] build index on: db19.coll19 properties: { v: 1, key: { tid: 1.0 }, name: "tid_1", ns: "db19.coll19" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.932-0400 m31201| 2015-07-09T13:56:47.931-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.937-0400 m31202| 2015-07-09T13:56:47.937-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.940-0400 m31201| 2015-07-09T13:56:47.939-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:47.973-0400 m30999| 2015-07-09T13:56:47.973-0400 I SHARDING [conn1] going to create 1 chunk(s) for: db19.coll19 using new epoch 559eb5dfca4787b9985d1c4d [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.028-0400 m30999| 2015-07-09T13:56:48.028-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db19.coll19: 0ms sequenceNumber: 90 version: 1|0||559eb5dfca4787b9985d1c4d based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.083-0400 m30999| 2015-07-09T13:56:48.083-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db19.coll19: 0ms sequenceNumber: 91 version: 1|0||559eb5dfca4787b9985d1c4d based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.085-0400 m31200| 2015-07-09T13:56:48.085-0400 I SHARDING [conn41] remotely refreshing metadata for db19.coll19 with requested shard version 1|0||559eb5dfca4787b9985d1c4d, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.087-0400 m31200| 2015-07-09T13:56:48.086-0400 I SHARDING [conn41] collection db19.coll19 was previously unsharded, new metadata loaded with shard version 1|0||559eb5dfca4787b9985d1c4d [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.087-0400 m31200| 2015-07-09T13:56:48.086-0400 I SHARDING [conn41] collection version was loaded at version 1|0||559eb5dfca4787b9985d1c4d, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.087-0400 m30999| 2015-07-09T13:56:48.087-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:48.087-0400-559eb5e0ca4787b9985d1c4e", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464608087), what: "shardCollection", ns: "db19.coll19", details: { version: "1|0||559eb5dfca4787b9985d1c4d" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.141-0400 m30999| 2015-07-09T13:56:48.141-0400 I SHARDING [conn1] distributed lock 'db19.coll19/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.142-0400 Using 20 threads (requested 20) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.298-0400 m30998| 2015-07-09T13:56:48.297-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62982 #117 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.361-0400 m30999| 2015-07-09T13:56:48.360-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62983 #117 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.362-0400 m30998| 2015-07-09T13:56:48.362-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62984 #118 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.364-0400 m30998| 2015-07-09T13:56:48.364-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62985 #119 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.366-0400 m30999| 2015-07-09T13:56:48.366-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62986 #118 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.367-0400 m30999| 2015-07-09T13:56:48.366-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62987 #119 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.368-0400 m30998| 2015-07-09T13:56:48.367-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62990 #120 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.368-0400 m30999| 2015-07-09T13:56:48.368-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62988 #120 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.369-0400 m30998| 2015-07-09T13:56:48.368-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62991 #121 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.373-0400 m30999| 2015-07-09T13:56:48.373-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62989 #121 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.374-0400 m30998| 2015-07-09T13:56:48.373-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62993 #122 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.377-0400 m30999| 2015-07-09T13:56:48.377-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62992 #122 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.380-0400 m30998| 2015-07-09T13:56:48.380-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62994 #123 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.383-0400 m30998| 2015-07-09T13:56:48.382-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62995 #124 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.383-0400 m30999| 2015-07-09T13:56:48.383-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62997 #123 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.385-0400 m30998| 2015-07-09T13:56:48.385-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62996 #125 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.386-0400 m30999| 2015-07-09T13:56:48.386-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62998 #124 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.387-0400 m30998| 2015-07-09T13:56:48.386-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63000 #126 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.389-0400 m30999| 2015-07-09T13:56:48.386-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:62999 #125 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.392-0400 m30999| 2015-07-09T13:56:48.392-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63001 #126 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.400-0400 setting random seed: 3204641845077 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.400-0400 setting random seed: 415515294298 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.400-0400 setting random seed: 653489893302 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.401-0400 setting random seed: 2962495000101 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.401-0400 setting random seed: 7866162550635 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.402-0400 m30998| 2015-07-09T13:56:48.402-0400 I SHARDING [conn117] ChunkManager: time to load chunks for db19.coll19: 0ms sequenceNumber: 22 version: 1|0||559eb5dfca4787b9985d1c4d based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.404-0400 setting random seed: 8273800830356 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.404-0400 setting random seed: 8979898761026 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.406-0400 setting random seed: 2940091146156 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.407-0400 setting random seed: 4111916455440 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.407-0400 setting random seed: 6594050093553 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.408-0400 setting random seed: 5158546851016 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.408-0400 setting random seed: 819754721596 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.408-0400 setting random seed: 9209820940159 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.408-0400 setting random seed: 3565554525703 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.409-0400 setting random seed: 6199973965995 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.411-0400 setting random seed: 9696965403854 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.413-0400 setting random seed: 6897618477232 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.415-0400 m31200| 2015-07-09T13:56:48.414-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63002 #79 (73 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.419-0400 setting random seed: 7617612290196 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.420-0400 setting random seed: 6274941395968 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.421-0400 setting random seed: 7770620556548 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.424-0400 m31200| 2015-07-09T13:56:48.423-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63003 #80 (74 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.424-0400 m31200| 2015-07-09T13:56:48.424-0400 I SHARDING [conn62] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.424-0400 m31200| 2015-07-09T13:56:48.424-0400 I SHARDING [conn65] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.425-0400 m31200| 2015-07-09T13:56:48.424-0400 I SHARDING [conn63] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.426-0400 m31200| 2015-07-09T13:56:48.425-0400 I SHARDING [conn34] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.426-0400 m31200| 2015-07-09T13:56:48.425-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63004 #81 (75 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.427-0400 m31200| 2015-07-09T13:56:48.426-0400 I SHARDING [conn48] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.427-0400 m31200| 2015-07-09T13:56:48.427-0400 I SHARDING [conn65] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.428-0400 m31200| 2015-07-09T13:56:48.427-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63005 #82 (76 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.428-0400 m31200| 2015-07-09T13:56:48.428-0400 I SHARDING [conn18] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.430-0400 m31200| 2015-07-09T13:56:48.429-0400 I SHARDING [conn63] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 2.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.430-0400 m31200| 2015-07-09T13:56:48.429-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 2.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.431-0400 m31200| 2015-07-09T13:56:48.430-0400 I SHARDING [conn64] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.431-0400 m31200| 2015-07-09T13:56:48.431-0400 I SHARDING [conn34] could not acquire lock 'db19.coll19/bs-osx108-8:31200:1436464537:809424560' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.431-0400 m31200| 2015-07-09T13:56:48.431-0400 I SHARDING [conn34] distributed lock 'db19.coll19/bs-osx108-8:31200:1436464537:809424560' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.432-0400 m31200| 2015-07-09T13:56:48.431-0400 W SHARDING [conn34] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.432-0400 m31200| 2015-07-09T13:56:48.431-0400 I SHARDING [conn63] distributed lock 'db19.coll19/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb5e0d5a107a5b9c0dae4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.432-0400 m31200| 2015-07-09T13:56:48.432-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63006 #83 (77 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.433-0400 m30998| 2015-07-09T13:56:48.433-0400 W SHARDING [conn123] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 2.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.434-0400 m31200| 2015-07-09T13:56:48.433-0400 I SHARDING [conn65] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 2.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.434-0400 m31200| 2015-07-09T13:56:48.433-0400 I SHARDING [conn63] remotely refreshing metadata for db19.coll19 based on current shard version 1|0||559eb5dfca4787b9985d1c4d, current metadata version is 1|0||559eb5dfca4787b9985d1c4d [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.434-0400 m31200| 2015-07-09T13:56:48.434-0400 I SHARDING [conn48] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 2.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.436-0400 m31200| 2015-07-09T13:56:48.436-0400 W SHARDING [conn65] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.436-0400 m31200| 2015-07-09T13:56:48.436-0400 W SHARDING [conn48] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.437-0400 m31200| 2015-07-09T13:56:48.436-0400 I SHARDING [conn18] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 2.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.438-0400 m30998| 2015-07-09T13:56:48.437-0400 W SHARDING [conn121] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 2.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.438-0400 m30999| 2015-07-09T13:56:48.437-0400 W SHARDING [conn122] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 2.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.438-0400 m31200| 2015-07-09T13:56:48.437-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63007 #84 (78 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.438-0400 m31200| 2015-07-09T13:56:48.437-0400 W SHARDING [conn18] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.439-0400 m31200| 2015-07-09T13:56:48.438-0400 I SHARDING [conn64] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 2.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.439-0400 m30999| 2015-07-09T13:56:48.438-0400 W SHARDING [conn126] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 2.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.441-0400 m31200| 2015-07-09T13:56:48.441-0400 W SHARDING [conn64] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.442-0400 m30999| 2015-07-09T13:56:48.441-0400 W SHARDING [conn117] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 2.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.443-0400 m31200| 2015-07-09T13:56:48.442-0400 I SHARDING [conn63] metadata of collection db19.coll19 already up to date (shard version : 1|0||559eb5dfca4787b9985d1c4d, took 3ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.443-0400 m31200| 2015-07-09T13:56:48.443-0400 I SHARDING [conn63] splitChunk accepted at version 1|0||559eb5dfca4787b9985d1c4d [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.443-0400 m31200| 2015-07-09T13:56:48.443-0400 I SHARDING [conn84] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.450-0400 m31200| 2015-07-09T13:56:48.449-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63008 #85 (79 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.451-0400 m31200| 2015-07-09T13:56:48.449-0400 I SHARDING [conn47] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.451-0400 m31200| 2015-07-09T13:56:48.450-0400 I SHARDING [conn64] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.451-0400 m31200| 2015-07-09T13:56:48.450-0400 I SHARDING [conn18] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.451-0400 m31200| 2015-07-09T13:56:48.450-0400 I SHARDING [conn34] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.452-0400 m31200| 2015-07-09T13:56:48.450-0400 I SHARDING [conn62] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.452-0400 m31200| 2015-07-09T13:56:48.450-0400 I SHARDING [conn63] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:48.449-0400-559eb5e0d5a107a5b9c0dae6", server: "bs-osx108-8", clientAddr: "127.0.0.1:62862", time: new Date(1436464608450), what: "multi-split", ns: "db19.coll19", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 1, of: 3, chunk: { min: { tid: MinKey }, max: { tid: 2.0 }, lastmod: Timestamp 1000|1, lastmodEpoch: ObjectId('559eb5dfca4787b9985d1c4d') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.452-0400 m31200| 2015-07-09T13:56:48.450-0400 I SHARDING [conn65] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.452-0400 m31200| 2015-07-09T13:56:48.451-0400 I SHARDING [conn48] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.453-0400 m31200| 2015-07-09T13:56:48.452-0400 I SHARDING [conn84] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 15.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.454-0400 m31200| 2015-07-09T13:56:48.453-0400 I SHARDING [conn85] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.454-0400 m31200| 2015-07-09T13:56:48.454-0400 W SHARDING [conn84] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.455-0400 m30999| 2015-07-09T13:56:48.455-0400 W SHARDING [conn118] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 15.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.458-0400 m31200| 2015-07-09T13:56:48.458-0400 I SHARDING [conn64] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 11.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.459-0400 m31200| 2015-07-09T13:56:48.458-0400 I SHARDING [conn18] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 11.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.459-0400 m31200| 2015-07-09T13:56:48.458-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 11.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.459-0400 m31200| 2015-07-09T13:56:48.458-0400 I SHARDING [conn62] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 11.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.460-0400 m31200| 2015-07-09T13:56:48.458-0400 I SHARDING [conn47] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 11.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.460-0400 m31200| 2015-07-09T13:56:48.459-0400 I SHARDING [conn65] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 11.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.460-0400 m31200| 2015-07-09T13:56:48.459-0400 W SHARDING [conn64] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.461-0400 m29000| 2015-07-09T13:56:48.460-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63009 #48 (48 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.461-0400 m31200| 2015-07-09T13:56:48.460-0400 W SHARDING [conn62] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.461-0400 m31200| 2015-07-09T13:56:48.460-0400 W SHARDING [conn34] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.462-0400 m31200| 2015-07-09T13:56:48.460-0400 I SHARDING [conn48] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 11.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.462-0400 m31200| 2015-07-09T13:56:48.460-0400 I SHARDING [conn85] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 11.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.463-0400 m31200| 2015-07-09T13:56:48.461-0400 W SHARDING [conn18] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.463-0400 m31200| 2015-07-09T13:56:48.461-0400 W SHARDING [conn47] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.464-0400 m30999| 2015-07-09T13:56:48.461-0400 W SHARDING [conn124] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 11.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.464-0400 m30998| 2015-07-09T13:56:48.461-0400 W SHARDING [conn117] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 11.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.465-0400 m30998| 2015-07-09T13:56:48.462-0400 W SHARDING [conn119] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 11.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.465-0400 m31200| 2015-07-09T13:56:48.462-0400 W SHARDING [conn85] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.465-0400 m31200| 2015-07-09T13:56:48.462-0400 W SHARDING [conn48] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.465-0400 m30999| 2015-07-09T13:56:48.463-0400 W SHARDING [conn125] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 11.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.466-0400 m30999| 2015-07-09T13:56:48.463-0400 W SHARDING [conn117] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 11.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.466-0400 m30998| 2015-07-09T13:56:48.463-0400 W SHARDING [conn124] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 11.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.466-0400 m31200| 2015-07-09T13:56:48.463-0400 W SHARDING [conn65] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.467-0400 m30998| 2015-07-09T13:56:48.464-0400 W SHARDING [conn121] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 11.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.467-0400 m30998| 2015-07-09T13:56:48.464-0400 W SHARDING [conn125] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 11.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.469-0400 m31200| 2015-07-09T13:56:48.469-0400 I SHARDING [conn48] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.472-0400 m31200| 2015-07-09T13:56:48.472-0400 I SHARDING [conn18] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.473-0400 m31200| 2015-07-09T13:56:48.473-0400 I SHARDING [conn64] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.474-0400 m31200| 2015-07-09T13:56:48.474-0400 I SHARDING [conn47] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.474-0400 m31200| 2015-07-09T13:56:48.474-0400 I SHARDING [conn65] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.475-0400 m31200| 2015-07-09T13:56:48.474-0400 I SHARDING [conn48] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 11.0 }, { tid: 16.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.475-0400 m31200| 2015-07-09T13:56:48.475-0400 I SHARDING [conn85] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.475-0400 m31200| 2015-07-09T13:56:48.475-0400 I SHARDING [conn34] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.476-0400 m31200| 2015-07-09T13:56:48.476-0400 W SHARDING [conn48] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.476-0400 m31200| 2015-07-09T13:56:48.476-0400 I SHARDING [conn62] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.477-0400 m31200| 2015-07-09T13:56:48.476-0400 I SHARDING [conn84] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.477-0400 m30999| 2015-07-09T13:56:48.477-0400 W SHARDING [conn126] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 11.0 }, { tid: 16.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.479-0400 m31200| 2015-07-09T13:56:48.478-0400 I SHARDING [conn18] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 11.0 }, { tid: 16.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.479-0400 m31200| 2015-07-09T13:56:48.479-0400 I SHARDING [conn64] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 11.0 }, { tid: 16.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.480-0400 m31200| 2015-07-09T13:56:48.480-0400 W SHARDING [conn18] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.481-0400 m30999| 2015-07-09T13:56:48.481-0400 W SHARDING [conn124] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 11.0 }, { tid: 16.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.482-0400 m31200| 2015-07-09T13:56:48.480-0400 W SHARDING [conn64] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.482-0400 m31200| 2015-07-09T13:56:48.480-0400 I SHARDING [conn65] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 11.0 }, { tid: 16.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.483-0400 m31200| 2015-07-09T13:56:48.481-0400 I SHARDING [conn85] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 11.0 }, { tid: 16.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.483-0400 m30999| 2015-07-09T13:56:48.482-0400 W SHARDING [conn117] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 11.0 }, { tid: 16.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.484-0400 m31200| 2015-07-09T13:56:48.482-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 11.0 }, { tid: 16.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.484-0400 m31200| 2015-07-09T13:56:48.482-0400 I SHARDING [conn47] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 11.0 }, { tid: 16.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.484-0400 m31200| 2015-07-09T13:56:48.482-0400 W SHARDING [conn65] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.484-0400 m31200| 2015-07-09T13:56:48.483-0400 I SHARDING [conn62] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 9.0 }, { tid: 14.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.485-0400 m31200| 2015-07-09T13:56:48.483-0400 I SHARDING [conn84] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 9.0 }, { tid: 14.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.486-0400 m30998| 2015-07-09T13:56:48.483-0400 W SHARDING [conn125] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 11.0 }, { tid: 16.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.486-0400 m31200| 2015-07-09T13:56:48.483-0400 W SHARDING [conn47] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.486-0400 m31200| 2015-07-09T13:56:48.484-0400 W SHARDING [conn34] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.487-0400 m30998| 2015-07-09T13:56:48.484-0400 W SHARDING [conn121] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 11.0 }, { tid: 16.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.488-0400 m31200| 2015-07-09T13:56:48.485-0400 W SHARDING [conn85] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.488-0400 m30998| 2015-07-09T13:56:48.485-0400 W SHARDING [conn123] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 11.0 }, { tid: 16.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.488-0400 m31200| 2015-07-09T13:56:48.486-0400 W SHARDING [conn62] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.490-0400 m30998| 2015-07-09T13:56:48.486-0400 W SHARDING [conn124] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 11.0 }, { tid: 16.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.490-0400 m31200| 2015-07-09T13:56:48.486-0400 W SHARDING [conn84] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.491-0400 m30998| 2015-07-09T13:56:48.487-0400 W SHARDING [conn117] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 9.0 }, { tid: 14.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.492-0400 m30999| 2015-07-09T13:56:48.487-0400 W SHARDING [conn125] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 9.0 }, { tid: 14.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.493-0400 m31200| 2015-07-09T13:56:48.492-0400 I SHARDING [conn84] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.493-0400 m31200| 2015-07-09T13:56:48.493-0400 I SHARDING [conn64] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.495-0400 m31200| 2015-07-09T13:56:48.494-0400 I SHARDING [conn18] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.495-0400 m31200| 2015-07-09T13:56:48.495-0400 I SHARDING [conn62] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.496-0400 m31200| 2015-07-09T13:56:48.496-0400 I SHARDING [conn85] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.497-0400 m31200| 2015-07-09T13:56:48.496-0400 I SHARDING [conn34] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.497-0400 m31200| 2015-07-09T13:56:48.496-0400 I SHARDING [conn47] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.497-0400 m31200| 2015-07-09T13:56:48.497-0400 I SHARDING [conn48] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.499-0400 m31200| 2015-07-09T13:56:48.497-0400 I SHARDING [conn84] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 8.0 }, { tid: 13.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.500-0400 m31200| 2015-07-09T13:56:48.497-0400 I SHARDING [conn64] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 8.0 }, { tid: 13.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.500-0400 m31200| 2015-07-09T13:56:48.498-0400 I SHARDING [conn65] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.505-0400 m31200| 2015-07-09T13:56:48.499-0400 W SHARDING [conn84] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.505-0400 m31200| 2015-07-09T13:56:48.499-0400 W SHARDING [conn64] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.506-0400 m31200| 2015-07-09T13:56:48.500-0400 I SHARDING [conn18] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 8.0 }, { tid: 13.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.507-0400 m30999| 2015-07-09T13:56:48.500-0400 W SHARDING [conn123] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 8.0 }, { tid: 13.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.508-0400 m30999| 2015-07-09T13:56:48.500-0400 W SHARDING [conn117] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 8.0 }, { tid: 13.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.508-0400 m31200| 2015-07-09T13:56:48.500-0400 I SHARDING [conn62] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 8.0 }, { tid: 13.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.508-0400 m31200| 2015-07-09T13:56:48.501-0400 I SHARDING [conn85] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 8.0 }, { tid: 13.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.509-0400 m31200| 2015-07-09T13:56:48.501-0400 W SHARDING [conn62] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.509-0400 m31200| 2015-07-09T13:56:48.501-0400 W SHARDING [conn18] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.509-0400 m31200| 2015-07-09T13:56:48.501-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 8.0 }, { tid: 13.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.509-0400 m30998| 2015-07-09T13:56:48.502-0400 W SHARDING [conn121] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 8.0 }, { tid: 13.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.510-0400 m30999| 2015-07-09T13:56:48.502-0400 W SHARDING [conn124] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 8.0 }, { tid: 13.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.510-0400 m31200| 2015-07-09T13:56:48.502-0400 I SHARDING [conn47] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 8.0 }, { tid: 13.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.510-0400 m31200| 2015-07-09T13:56:48.502-0400 W SHARDING [conn85] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.510-0400 m31200| 2015-07-09T13:56:48.503-0400 W SHARDING [conn34] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.511-0400 m31200| 2015-07-09T13:56:48.503-0400 W SHARDING [conn47] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.511-0400 m31200| 2015-07-09T13:56:48.503-0400 I SHARDING [conn48] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 8.0 }, { tid: 13.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.511-0400 m30998| 2015-07-09T13:56:48.503-0400 W SHARDING [conn123] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 8.0 }, { tid: 13.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.512-0400 m31200| 2015-07-09T13:56:48.504-0400 I SHARDING [conn63] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:48.504-0400-559eb5e0d5a107a5b9c0dae7", server: "bs-osx108-8", clientAddr: "127.0.0.1:62862", time: new Date(1436464608504), what: "multi-split", ns: "db19.coll19", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 2, of: 3, chunk: { min: { tid: 2.0 }, max: { tid: 17.0 }, lastmod: Timestamp 1000|2, lastmodEpoch: ObjectId('559eb5dfca4787b9985d1c4d') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.512-0400 m30998| 2015-07-09T13:56:48.504-0400 W SHARDING [conn119] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 8.0 }, { tid: 13.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.512-0400 m30998| 2015-07-09T13:56:48.504-0400 W SHARDING [conn124] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 8.0 }, { tid: 13.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.513-0400 m31200| 2015-07-09T13:56:48.505-0400 W SHARDING [conn48] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.515-0400 m30999| 2015-07-09T13:56:48.506-0400 W SHARDING [conn125] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 8.0 }, { tid: 13.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.515-0400 m31200| 2015-07-09T13:56:48.507-0400 I SHARDING [conn65] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 8.0 }, { tid: 13.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.515-0400 m31200| 2015-07-09T13:56:48.512-0400 W SHARDING [conn65] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.515-0400 m30998| 2015-07-09T13:56:48.513-0400 W SHARDING [conn117] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 8.0 }, { tid: 13.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.518-0400 m31200| 2015-07-09T13:56:48.517-0400 I SHARDING [conn18] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.518-0400 m31200| 2015-07-09T13:56:48.517-0400 I SHARDING [conn48] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.518-0400 m31200| 2015-07-09T13:56:48.517-0400 I SHARDING [conn65] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.520-0400 m31200| 2015-07-09T13:56:48.519-0400 I SHARDING [conn64] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.520-0400 m31200| 2015-07-09T13:56:48.520-0400 I SHARDING [conn84] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.521-0400 m31200| 2015-07-09T13:56:48.520-0400 I SHARDING [conn34] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.524-0400 m31200| 2015-07-09T13:56:48.522-0400 I SHARDING [conn18] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 7.0 }, { tid: 12.0 }, { tid: 16.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.525-0400 m31200| 2015-07-09T13:56:48.522-0400 I SHARDING [conn48] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 7.0 }, { tid: 12.0 }, { tid: 16.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.526-0400 m31200| 2015-07-09T13:56:48.523-0400 I SHARDING [conn47] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.527-0400 m31200| 2015-07-09T13:56:48.524-0400 W SHARDING [conn18] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.527-0400 m31200| 2015-07-09T13:56:48.524-0400 W SHARDING [conn48] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.528-0400 m31200| 2015-07-09T13:56:48.524-0400 I SHARDING [conn65] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 7.0 }, { tid: 12.0 }, { tid: 16.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.528-0400 m31200| 2015-07-09T13:56:48.524-0400 I SHARDING [conn85] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.528-0400 m30999| 2015-07-09T13:56:48.524-0400 W SHARDING [conn124] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 7.0 }, { tid: 12.0 }, { tid: 16.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.529-0400 m30999| 2015-07-09T13:56:48.525-0400 W SHARDING [conn125] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 7.0 }, { tid: 12.0 }, { tid: 16.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.529-0400 m31200| 2015-07-09T13:56:48.525-0400 I SHARDING [conn62] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.530-0400 m31200| 2015-07-09T13:56:48.525-0400 I SHARDING [conn64] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 7.0 }, { tid: 12.0 }, { tid: 16.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.530-0400 m31200| 2015-07-09T13:56:48.526-0400 I SHARDING [conn84] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 7.0 }, { tid: 12.0 }, { tid: 16.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.531-0400 m31200| 2015-07-09T13:56:48.526-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 7.0 }, { tid: 12.0 }, { tid: 16.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.532-0400 m31200| 2015-07-09T13:56:48.526-0400 W SHARDING [conn64] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.533-0400 m31200| 2015-07-09T13:56:48.527-0400 W SHARDING [conn34] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.533-0400 m31200| 2015-07-09T13:56:48.527-0400 W SHARDING [conn65] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.537-0400 m30999| 2015-07-09T13:56:48.527-0400 W SHARDING [conn118] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 7.0 }, { tid: 12.0 }, { tid: 16.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.537-0400 m31200| 2015-07-09T13:56:48.527-0400 W SHARDING [conn84] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.537-0400 m30999| 2015-07-09T13:56:48.528-0400 W SHARDING [conn123] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 7.0 }, { tid: 12.0 }, { tid: 16.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.538-0400 m30998| 2015-07-09T13:56:48.528-0400 W SHARDING [conn124] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 7.0 }, { tid: 12.0 }, { tid: 16.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.539-0400 m31200| 2015-07-09T13:56:48.529-0400 I SHARDING [conn47] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 5.0 }, { tid: 10.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.539-0400 m30998| 2015-07-09T13:56:48.529-0400 W SHARDING [conn126] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 7.0 }, { tid: 12.0 }, { tid: 16.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.540-0400 m31200| 2015-07-09T13:56:48.530-0400 W SHARDING [conn47] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.541-0400 m31200| 2015-07-09T13:56:48.530-0400 I SHARDING [conn85] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 7.0 }, { tid: 12.0 }, { tid: 16.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.542-0400 m30998| 2015-07-09T13:56:48.531-0400 W SHARDING [conn118] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 5.0 }, { tid: 10.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.542-0400 m31200| 2015-07-09T13:56:48.531-0400 W SHARDING [conn85] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.542-0400 m31200| 2015-07-09T13:56:48.532-0400 I SHARDING [conn62] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 7.0 }, { tid: 12.0 }, { tid: 16.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.543-0400 m30998| 2015-07-09T13:56:48.532-0400 W SHARDING [conn122] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 7.0 }, { tid: 12.0 }, { tid: 16.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.543-0400 m31200| 2015-07-09T13:56:48.533-0400 W SHARDING [conn62] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.543-0400 m30998| 2015-07-09T13:56:48.533-0400 W SHARDING [conn117] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 7.0 }, { tid: 12.0 }, { tid: 16.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.544-0400 m31200| 2015-07-09T13:56:48.536-0400 I SHARDING [conn84] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.545-0400 m31200| 2015-07-09T13:56:48.536-0400 I SHARDING [conn64] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.545-0400 m31200| 2015-07-09T13:56:48.539-0400 I SHARDING [conn18] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.545-0400 m31200| 2015-07-09T13:56:48.540-0400 I SHARDING [conn62] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.545-0400 m31200| 2015-07-09T13:56:48.541-0400 I SHARDING [conn85] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.545-0400 m31200| 2015-07-09T13:56:48.541-0400 I SHARDING [conn47] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.546-0400 m31200| 2015-07-09T13:56:48.541-0400 I SHARDING [conn48] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.546-0400 m31200| 2015-07-09T13:56:48.543-0400 I SHARDING [conn84] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 4.0 }, { tid: 8.0 }, { tid: 12.0 }, { tid: 15.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.546-0400 m31200| 2015-07-09T13:56:48.543-0400 I SHARDING [conn64] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 4.0 }, { tid: 8.0 }, { tid: 12.0 }, { tid: 15.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.546-0400 m31200| 2015-07-09T13:56:48.544-0400 I SHARDING [conn34] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.547-0400 m31200| 2015-07-09T13:56:48.544-0400 W SHARDING [conn84] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.547-0400 m31200| 2015-07-09T13:56:48.545-0400 W SHARDING [conn64] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.548-0400 m30999| 2015-07-09T13:56:48.545-0400 W SHARDING [conn120] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 4.0 }, { tid: 8.0 }, { tid: 12.0 }, { tid: 15.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.550-0400 m31200| 2015-07-09T13:56:48.545-0400 I SHARDING [conn18] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 7.0 }, { tid: 11.0 }, { tid: 15.0 }, { tid: 17.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.552-0400 m30999| 2015-07-09T13:56:48.545-0400 W SHARDING [conn118] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 4.0 }, { tid: 8.0 }, { tid: 12.0 }, { tid: 15.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.553-0400 m31200| 2015-07-09T13:56:48.546-0400 W SHARDING [conn18] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.553-0400 m31200| 2015-07-09T13:56:48.546-0400 I SHARDING [conn65] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.553-0400 m31200| 2015-07-09T13:56:48.547-0400 I SHARDING [conn62] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 7.0 }, { tid: 11.0 }, { tid: 15.0 }, { tid: 17.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.554-0400 m30999| 2015-07-09T13:56:48.547-0400 W SHARDING [conn119] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 7.0 }, { tid: 11.0 }, { tid: 15.0 }, { tid: 17.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.554-0400 m31200| 2015-07-09T13:56:48.548-0400 I SHARDING [conn85] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 7.0 }, { tid: 11.0 }, { tid: 15.0 }, { tid: 17.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.554-0400 m31200| 2015-07-09T13:56:48.548-0400 W SHARDING [conn62] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.555-0400 m31200| 2015-07-09T13:56:48.548-0400 I SHARDING [conn47] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 7.0 }, { tid: 11.0 }, { tid: 15.0 }, { tid: 17.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.555-0400 m31200| 2015-07-09T13:56:48.548-0400 I SHARDING [conn48] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 7.0 }, { tid: 11.0 }, { tid: 15.0 }, { tid: 17.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.556-0400 m30998| 2015-07-09T13:56:48.549-0400 W SHARDING [conn126] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 7.0 }, { tid: 11.0 }, { tid: 15.0 }, { tid: 17.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.556-0400 m31200| 2015-07-09T13:56:48.549-0400 W SHARDING [conn47] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.556-0400 m31200| 2015-07-09T13:56:48.550-0400 W SHARDING [conn85] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.557-0400 m31200| 2015-07-09T13:56:48.550-0400 W SHARDING [conn48] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.557-0400 m30998| 2015-07-09T13:56:48.550-0400 W SHARDING [conn125] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 7.0 }, { tid: 11.0 }, { tid: 15.0 }, { tid: 17.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.557-0400 m30999| 2015-07-09T13:56:48.550-0400 W SHARDING [conn123] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 7.0 }, { tid: 11.0 }, { tid: 15.0 }, { tid: 17.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.558-0400 m30998| 2015-07-09T13:56:48.550-0400 W SHARDING [conn122] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 7.0 }, { tid: 11.0 }, { tid: 15.0 }, { tid: 17.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.558-0400 m31200| 2015-07-09T13:56:48.551-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 7.0 }, { tid: 11.0 }, { tid: 15.0 }, { tid: 17.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.558-0400 m31200| 2015-07-09T13:56:48.552-0400 W SHARDING [conn34] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.559-0400 m30998| 2015-07-09T13:56:48.553-0400 W SHARDING [conn117] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 7.0 }, { tid: 11.0 }, { tid: 15.0 }, { tid: 17.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.559-0400 m31200| 2015-07-09T13:56:48.553-0400 I SHARDING [conn65] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 6.0 }, { tid: 10.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.559-0400 m31200| 2015-07-09T13:56:48.555-0400 I SHARDING [conn63] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:48.555-0400-559eb5e0d5a107a5b9c0dae8", server: "bs-osx108-8", clientAddr: "127.0.0.1:62862", time: new Date(1436464608555), what: "multi-split", ns: "db19.coll19", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 3, of: 3, chunk: { min: { tid: 17.0 }, max: { tid: MaxKey }, lastmod: Timestamp 1000|3, lastmodEpoch: ObjectId('559eb5dfca4787b9985d1c4d') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.560-0400 m31200| 2015-07-09T13:56:48.555-0400 W SHARDING [conn65] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.561-0400 m30998| 2015-07-09T13:56:48.556-0400 W SHARDING [conn123] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 6.0 }, { tid: 10.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.561-0400 m31200| 2015-07-09T13:56:48.557-0400 I SHARDING [conn48] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.561-0400 m31200| 2015-07-09T13:56:48.557-0400 I SHARDING [conn18] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.561-0400 m31200| 2015-07-09T13:56:48.558-0400 I SHARDING [conn64] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.562-0400 m31200| 2015-07-09T13:56:48.562-0400 I SHARDING [conn65] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.563-0400 m31200| 2015-07-09T13:56:48.562-0400 I SHARDING [conn48] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 6.0 }, { tid: 9.0 }, { tid: 13.0 }, { tid: 16.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.564-0400 m31200| 2015-07-09T13:56:48.563-0400 I SHARDING [conn18] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 6.0 }, { tid: 9.0 }, { tid: 13.0 }, { tid: 16.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.565-0400 m31200| 2015-07-09T13:56:48.563-0400 I SHARDING [conn84] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.565-0400 m31200| 2015-07-09T13:56:48.563-0400 I SHARDING [conn34] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.565-0400 m31200| 2015-07-09T13:56:48.563-0400 I SHARDING [conn64] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 6.0 }, { tid: 9.0 }, { tid: 13.0 }, { tid: 16.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.566-0400 m31200| 2015-07-09T13:56:48.563-0400 W SHARDING [conn48] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.566-0400 m31200| 2015-07-09T13:56:48.564-0400 I SHARDING [conn47] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.566-0400 m31200| 2015-07-09T13:56:48.564-0400 W SHARDING [conn18] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.567-0400 m30999| 2015-07-09T13:56:48.564-0400 W SHARDING [conn120] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 6.0 }, { tid: 9.0 }, { tid: 13.0 }, { tid: 16.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.567-0400 m31200| 2015-07-09T13:56:48.564-0400 W SHARDING [conn64] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.568-0400 m30999| 2015-07-09T13:56:48.565-0400 W SHARDING [conn119] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 6.0 }, { tid: 9.0 }, { tid: 13.0 }, { tid: 16.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.569-0400 m30999| 2015-07-09T13:56:48.565-0400 W SHARDING [conn125] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 6.0 }, { tid: 9.0 }, { tid: 13.0 }, { tid: 16.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.569-0400 m31200| 2015-07-09T13:56:48.567-0400 I SHARDING [conn85] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.569-0400 m31200| 2015-07-09T13:56:48.567-0400 I SHARDING [conn62] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.569-0400 m31200| 2015-07-09T13:56:48.568-0400 I SHARDING [conn84] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 6.0 }, { tid: 8.0 }, { tid: 12.0 }, { tid: 15.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.570-0400 m31200| 2015-07-09T13:56:48.568-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 6.0 }, { tid: 8.0 }, { tid: 12.0 }, { tid: 15.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.570-0400 m31200| 2015-07-09T13:56:48.569-0400 I SHARDING [conn65] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 6.0 }, { tid: 9.0 }, { tid: 13.0 }, { tid: 16.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.570-0400 m31200| 2015-07-09T13:56:48.569-0400 W SHARDING [conn84] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.572-0400 m31200| 2015-07-09T13:56:48.569-0400 I SHARDING [conn47] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 6.0 }, { tid: 8.0 }, { tid: 12.0 }, { tid: 15.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.573-0400 m31200| 2015-07-09T13:56:48.570-0400 W SHARDING [conn34] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.573-0400 m31200| 2015-07-09T13:56:48.571-0400 W SHARDING [conn47] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.573-0400 m30999| 2015-07-09T13:56:48.571-0400 W SHARDING [conn124] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 6.0 }, { tid: 8.0 }, { tid: 12.0 }, { tid: 15.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.574-0400 m30998| 2015-07-09T13:56:48.571-0400 W SHARDING [conn122] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 6.0 }, { tid: 8.0 }, { tid: 12.0 }, { tid: 15.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.575-0400 m30998| 2015-07-09T13:56:48.571-0400 W SHARDING [conn117] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 6.0 }, { tid: 8.0 }, { tid: 12.0 }, { tid: 15.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.575-0400 m31200| 2015-07-09T13:56:48.572-0400 W SHARDING [conn65] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.575-0400 m30998| 2015-07-09T13:56:48.572-0400 W SHARDING [conn118] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 6.0 }, { tid: 9.0 }, { tid: 13.0 }, { tid: 16.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.576-0400 m31200| 2015-07-09T13:56:48.574-0400 I SHARDING [conn62] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 6.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 15.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.577-0400 m31200| 2015-07-09T13:56:48.574-0400 I SHARDING [conn85] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 6.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 15.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.577-0400 m31200| 2015-07-09T13:56:48.575-0400 W SHARDING [conn62] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.577-0400 m31200| 2015-07-09T13:56:48.575-0400 W SHARDING [conn85] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.578-0400 m30998| 2015-07-09T13:56:48.576-0400 W SHARDING [conn121] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 6.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 15.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.578-0400 m30998| 2015-07-09T13:56:48.576-0400 W SHARDING [conn120] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 6.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 15.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.578-0400 m31200| 2015-07-09T13:56:48.577-0400 I SHARDING [conn84] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.578-0400 m31200| 2015-07-09T13:56:48.577-0400 I SHARDING [conn64] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.579-0400 m31200| 2015-07-09T13:56:48.578-0400 I SHARDING [conn18] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.583-0400 m31200| 2015-07-09T13:56:48.581-0400 I SHARDING [conn84] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.583-0400 m31200| 2015-07-09T13:56:48.581-0400 I SHARDING [conn48] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.585-0400 m31200| 2015-07-09T13:56:48.581-0400 I SHARDING [conn64] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.586-0400 m31200| 2015-07-09T13:56:48.582-0400 I SHARDING [conn85] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.586-0400 m31200| 2015-07-09T13:56:48.582-0400 I SHARDING [conn62] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.586-0400 m31200| 2015-07-09T13:56:48.583-0400 W SHARDING [conn84] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.586-0400 m31200| 2015-07-09T13:56:48.583-0400 W SHARDING [conn64] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.587-0400 m31200| 2015-07-09T13:56:48.583-0400 I SHARDING [conn18] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.587-0400 m30999| 2015-07-09T13:56:48.583-0400 W SHARDING [conn120] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.588-0400 m30999| 2015-07-09T13:56:48.584-0400 W SHARDING [conn119] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.589-0400 m31200| 2015-07-09T13:56:48.584-0400 W SHARDING [conn18] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.590-0400 m30999| 2015-07-09T13:56:48.585-0400 W SHARDING [conn118] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.590-0400 m31200| 2015-07-09T13:56:48.586-0400 I SHARDING [conn65] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.590-0400 m31200| 2015-07-09T13:56:48.587-0400 I SHARDING [conn48] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.590-0400 m31200| 2015-07-09T13:56:48.587-0400 I SHARDING [conn47] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.590-0400 m31200| 2015-07-09T13:56:48.588-0400 W SHARDING [conn48] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.591-0400 m31200| 2015-07-09T13:56:48.588-0400 I SHARDING [conn85] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.591-0400 m31200| 2015-07-09T13:56:48.588-0400 I SHARDING [conn34] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.593-0400 m30999| 2015-07-09T13:56:48.589-0400 W SHARDING [conn124] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.593-0400 m31200| 2015-07-09T13:56:48.589-0400 I SHARDING [conn62] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.595-0400 m31200| 2015-07-09T13:56:48.589-0400 W SHARDING [conn85] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.596-0400 m31200| 2015-07-09T13:56:48.590-0400 W SHARDING [conn62] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.597-0400 m30998| 2015-07-09T13:56:48.591-0400 W SHARDING [conn117] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.598-0400 m30998| 2015-07-09T13:56:48.592-0400 W SHARDING [conn122] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.599-0400 m31200| 2015-07-09T13:56:48.593-0400 I SHARDING [conn47] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 8.0 }, { tid: 10.0 }, { tid: 13.0 }, { tid: 16.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.600-0400 m31200| 2015-07-09T13:56:48.594-0400 I SHARDING [conn65] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 8.0 }, { tid: 10.0 }, { tid: 13.0 }, { tid: 16.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.602-0400 m31200| 2015-07-09T13:56:48.594-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 8.0 }, { tid: 10.0 }, { tid: 13.0 }, { tid: 16.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.603-0400 m31200| 2015-07-09T13:56:48.595-0400 W SHARDING [conn47] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.603-0400 m31200| 2015-07-09T13:56:48.595-0400 W SHARDING [conn65] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.604-0400 m31200| 2015-07-09T13:56:48.596-0400 W SHARDING [conn34] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.604-0400 m30998| 2015-07-09T13:56:48.596-0400 W SHARDING [conn121] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 8.0 }, { tid: 10.0 }, { tid: 13.0 }, { tid: 16.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.604-0400 m31200| 2015-07-09T13:56:48.596-0400 I SHARDING [conn48] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.605-0400 m30998| 2015-07-09T13:56:48.596-0400 W SHARDING [conn118] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 8.0 }, { tid: 10.0 }, { tid: 13.0 }, { tid: 16.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.605-0400 m30998| 2015-07-09T13:56:48.597-0400 W SHARDING [conn120] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 8.0 }, { tid: 10.0 }, { tid: 13.0 }, { tid: 16.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.605-0400 m31200| 2015-07-09T13:56:48.597-0400 I SHARDING [conn18] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.606-0400 m31200| 2015-07-09T13:56:48.597-0400 I SHARDING [conn64] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.606-0400 m31200| 2015-07-09T13:56:48.599-0400 I SHARDING [conn84] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.606-0400 m31200| 2015-07-09T13:56:48.600-0400 I SHARDING [conn64] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 7.0 }, { tid: 9.0 }, { tid: 11.0 }, { tid: 15.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.607-0400 m31200| 2015-07-09T13:56:48.600-0400 I SHARDING [conn48] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 7.0 }, { tid: 9.0 }, { tid: 11.0 }, { tid: 15.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.607-0400 m31200| 2015-07-09T13:56:48.601-0400 I SHARDING [conn18] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 7.0 }, { tid: 9.0 }, { tid: 11.0 }, { tid: 15.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.608-0400 m31200| 2015-07-09T13:56:48.602-0400 W SHARDING [conn64] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.609-0400 m31200| 2015-07-09T13:56:48.602-0400 W SHARDING [conn48] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.609-0400 m31200| 2015-07-09T13:56:48.602-0400 W SHARDING [conn18] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.609-0400 m30999| 2015-07-09T13:56:48.602-0400 W SHARDING [conn122] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 7.0 }, { tid: 9.0 }, { tid: 11.0 }, { tid: 15.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.610-0400 m30999| 2015-07-09T13:56:48.602-0400 W SHARDING [conn118] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 7.0 }, { tid: 9.0 }, { tid: 11.0 }, { tid: 15.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.610-0400 m30999| 2015-07-09T13:56:48.603-0400 W SHARDING [conn119] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 7.0 }, { tid: 9.0 }, { tid: 11.0 }, { tid: 15.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.611-0400 m31200| 2015-07-09T13:56:48.604-0400 I SHARDING [conn84] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 7.0 }, { tid: 9.0 }, { tid: 11.0 }, { tid: 15.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.611-0400 m31200| 2015-07-09T13:56:48.605-0400 W SHARDING [conn84] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.611-0400 m30999| 2015-07-09T13:56:48.606-0400 W SHARDING [conn124] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 7.0 }, { tid: 9.0 }, { tid: 11.0 }, { tid: 15.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.611-0400 m31200| 2015-07-09T13:56:48.607-0400 I SHARDING [conn34] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.611-0400 m31200| 2015-07-09T13:56:48.607-0400 I SHARDING [conn62] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.612-0400 m31200| 2015-07-09T13:56:48.607-0400 I SHARDING [conn47] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.612-0400 m31200| 2015-07-09T13:56:48.608-0400 I SHARDING [conn65] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.612-0400 m31200| 2015-07-09T13:56:48.608-0400 I SHARDING [conn85] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.612-0400 m31200| 2015-07-09T13:56:48.608-0400 I SHARDING [conn63] distributed lock 'db19.coll19/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.613-0400 m31200| 2015-07-09T13:56:48.609-0400 I COMMAND [conn63] command db19.coll19 command: splitChunk { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 2.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 4, w: 2 } }, Database: { acquireCount: { r: 1, w: 2 } }, Collection: { acquireCount: { r: 1, W: 2 }, acquireWaitCount: { W: 2 }, timeAcquiringMicros: { W: 11273 } } } protocol:op_command 181ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.613-0400 m30999| 2015-07-09T13:56:48.612-0400 I SHARDING [conn121] ChunkManager: time to load chunks for db19.coll19: 0ms sequenceNumber: 92 version: 1|3||559eb5dfca4787b9985d1c4d based on: 1|0||559eb5dfca4787b9985d1c4d [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.613-0400 m30999| 2015-07-09T13:56:48.613-0400 I SHARDING [conn121] autosplitted db19.coll19 shard: ns: db19.coll19, shard: test-rs1, lastmod: 1|0||000000000000000000000000, min: { tid: MinKey }, max: { tid: MaxKey } into 3 (splitThreshold 921) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.616-0400 m31200| 2015-07-09T13:56:48.612-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 7.0 }, { tid: 9.0 }, { tid: 11.0 }, { tid: 15.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.616-0400 m31200| 2015-07-09T13:56:48.613-0400 I SHARDING [conn62] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 7.0 }, { tid: 9.0 }, { tid: 11.0 }, { tid: 15.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.617-0400 m31200| 2015-07-09T13:56:48.613-0400 I SHARDING [conn47] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 7.0 }, { tid: 9.0 }, { tid: 11.0 }, { tid: 15.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.617-0400 m31200| 2015-07-09T13:56:48.613-0400 I SHARDING [conn85] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 7.0 }, { tid: 9.0 }, { tid: 11.0 }, { tid: 15.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.618-0400 m31200| 2015-07-09T13:56:48.614-0400 I SHARDING [conn62] could not acquire lock 'db19.coll19/bs-osx108-8:31200:1436464537:809424560' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.618-0400 m31200| 2015-07-09T13:56:48.614-0400 I SHARDING [conn62] distributed lock 'db19.coll19/bs-osx108-8:31200:1436464537:809424560' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.618-0400 m31200| 2015-07-09T13:56:48.614-0400 W SHARDING [conn62] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.618-0400 m30998| 2015-07-09T13:56:48.615-0400 W SHARDING [conn119] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 7.0 }, { tid: 9.0 }, { tid: 11.0 }, { tid: 15.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.619-0400 m31200| 2015-07-09T13:56:48.614-0400 I SHARDING [conn65] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 7.0 }, { tid: 9.0 }, { tid: 11.0 }, { tid: 15.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.619-0400 m31200| 2015-07-09T13:56:48.615-0400 W SHARDING [conn47] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.619-0400 m31200| 2015-07-09T13:56:48.615-0400 W SHARDING [conn85] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.620-0400 m31200| 2015-07-09T13:56:48.615-0400 I SHARDING [conn34] distributed lock 'db19.coll19/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb5e0d5a107a5b9c0dae9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.620-0400 m30998| 2015-07-09T13:56:48.615-0400 W SHARDING [conn118] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 7.0 }, { tid: 9.0 }, { tid: 11.0 }, { tid: 15.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.621-0400 m30998| 2015-07-09T13:56:48.616-0400 W SHARDING [conn120] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 7.0 }, { tid: 9.0 }, { tid: 11.0 }, { tid: 15.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.623-0400 m31200| 2015-07-09T13:56:48.616-0400 I SHARDING [conn63] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.623-0400 m31200| 2015-07-09T13:56:48.616-0400 I SHARDING [conn34] remotely refreshing metadata for db19.coll19 based on current shard version 1|3||559eb5dfca4787b9985d1c4d, current metadata version is 1|3||559eb5dfca4787b9985d1c4d [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.623-0400 m31200| 2015-07-09T13:56:48.616-0400 W SHARDING [conn65] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.623-0400 m31200| 2015-07-09T13:56:48.616-0400 I SHARDING [conn84] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.624-0400 m30998| 2015-07-09T13:56:48.617-0400 W SHARDING [conn121] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 7.0 }, { tid: 9.0 }, { tid: 11.0 }, { tid: 15.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.625-0400 m31200| 2015-07-09T13:56:48.619-0400 I SHARDING [conn18] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.625-0400 m31200| 2015-07-09T13:56:48.619-0400 I SHARDING [conn63] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 7.0 }, { tid: 9.0 }, { tid: 11.0 }, { tid: 15.0 }, { tid: 16.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.626-0400 m31200| 2015-07-09T13:56:48.620-0400 I SHARDING [conn48] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.626-0400 m31200| 2015-07-09T13:56:48.620-0400 W SHARDING [conn63] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.627-0400 m30999| 2015-07-09T13:56:48.621-0400 W SHARDING [conn122] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 7.0 }, { tid: 9.0 }, { tid: 11.0 }, { tid: 15.0 }, { tid: 16.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.627-0400 m31200| 2015-07-09T13:56:48.622-0400 I SHARDING [conn84] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 7.0 }, { tid: 9.0 }, { tid: 11.0 }, { tid: 15.0 }, { tid: 16.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.627-0400 m31200| 2015-07-09T13:56:48.623-0400 W SHARDING [conn84] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.628-0400 m30999| 2015-07-09T13:56:48.624-0400 W SHARDING [conn119] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 7.0 }, { tid: 9.0 }, { tid: 11.0 }, { tid: 15.0 }, { tid: 16.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.629-0400 m31200| 2015-07-09T13:56:48.625-0400 I SHARDING [conn18] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 7.0 }, { tid: 9.0 }, { tid: 11.0 }, { tid: 15.0 }, { tid: 16.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.630-0400 m31200| 2015-07-09T13:56:48.626-0400 I SHARDING [conn48] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 7.0 }, { tid: 9.0 }, { tid: 11.0 }, { tid: 15.0 }, { tid: 16.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.630-0400 m31200| 2015-07-09T13:56:48.626-0400 W SHARDING [conn18] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.630-0400 m31200| 2015-07-09T13:56:48.627-0400 W SHARDING [conn48] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.631-0400 m31200| 2015-07-09T13:56:48.628-0400 I SHARDING [conn34] metadata of collection db19.coll19 already up to date (shard version : 1|3||559eb5dfca4787b9985d1c4d, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.632-0400 m30999| 2015-07-09T13:56:48.628-0400 W SHARDING [conn124] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 7.0 }, { tid: 9.0 }, { tid: 11.0 }, { tid: 15.0 }, { tid: 16.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.632-0400 m31200| 2015-07-09T13:56:48.628-0400 I SHARDING [conn62] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.632-0400 m30999| 2015-07-09T13:56:48.628-0400 W SHARDING [conn126] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 7.0 }, { tid: 9.0 }, { tid: 11.0 }, { tid: 15.0 }, { tid: 16.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.633-0400 m31200| 2015-07-09T13:56:48.628-0400 W SHARDING [conn34] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.645-0400 m31200| 2015-07-09T13:56:48.628-0400 I SHARDING [conn85] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.645-0400 m31200| 2015-07-09T13:56:48.628-0400 I SHARDING [conn47] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.645-0400 m31200| 2015-07-09T13:56:48.629-0400 I SHARDING [conn34] distributed lock 'db19.coll19/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.645-0400 m31200| 2015-07-09T13:56:48.629-0400 I SHARDING [conn65] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.646-0400 m30998| 2015-07-09T13:56:48.629-0400 W SHARDING [conn123] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 7.0 }, { tid: 9.0 }, { tid: 11.0 }, { tid: 15.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.646-0400 m30999| 2015-07-09T13:56:48.633-0400 I NETWORK [conn117] end connection 127.0.0.1:62983 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.646-0400 m31200| 2015-07-09T13:56:48.633-0400 I SHARDING [conn47] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.646-0400 m31200| 2015-07-09T13:56:48.633-0400 I SHARDING [conn65] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.647-0400 m31200| 2015-07-09T13:56:48.633-0400 I SHARDING [conn62] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.647-0400 m31200| 2015-07-09T13:56:48.634-0400 I SHARDING [conn62] could not acquire lock 'db19.coll19/bs-osx108-8:31200:1436464537:809424560' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.647-0400 m31200| 2015-07-09T13:56:48.634-0400 I SHARDING [conn62] distributed lock 'db19.coll19/bs-osx108-8:31200:1436464537:809424560' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.648-0400 m31200| 2015-07-09T13:56:48.634-0400 W SHARDING [conn62] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.649-0400 m30998| 2015-07-09T13:56:48.635-0400 W SHARDING [conn121] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.650-0400 m30998| 2015-07-09T13:56:48.636-0400 I NETWORK [conn126] end connection 127.0.0.1:63000 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.650-0400 m31200| 2015-07-09T13:56:48.635-0400 I SHARDING [conn47] distributed lock 'db19.coll19/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb5e0d5a107a5b9c0daeb [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.650-0400 m31200| 2015-07-09T13:56:48.636-0400 I SHARDING [conn47] remotely refreshing metadata for db19.coll19 based on current shard version 1|3||559eb5dfca4787b9985d1c4d, current metadata version is 1|3||559eb5dfca4787b9985d1c4d [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.651-0400 m31200| 2015-07-09T13:56:48.637-0400 I SHARDING [conn85] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.651-0400 m31200| 2015-07-09T13:56:48.641-0400 W SHARDING [conn65] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.652-0400 m30998| 2015-07-09T13:56:48.641-0400 W SHARDING [conn118] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.661-0400 m31200| 2015-07-09T13:56:48.642-0400 W SHARDING [conn85] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.662-0400 m30998| 2015-07-09T13:56:48.642-0400 W SHARDING [conn120] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.662-0400 m31200| 2015-07-09T13:56:48.643-0400 I SHARDING [conn47] metadata of collection db19.coll19 already up to date (shard version : 1|3||559eb5dfca4787b9985d1c4d, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.662-0400 m31200| 2015-07-09T13:56:48.643-0400 W SHARDING [conn47] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.662-0400 m31200| 2015-07-09T13:56:48.643-0400 I SHARDING [conn47] distributed lock 'db19.coll19/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.663-0400 m30998| 2015-07-09T13:56:48.644-0400 W SHARDING [conn125] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.663-0400 m31200| 2015-07-09T13:56:48.652-0400 I SHARDING [conn47] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.663-0400 m30999| 2015-07-09T13:56:48.655-0400 I NETWORK [conn122] end connection 127.0.0.1:62992 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.663-0400 m31200| 2015-07-09T13:56:48.661-0400 I SHARDING [conn65] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.664-0400 m31200| 2015-07-09T13:56:48.662-0400 I SHARDING [conn62] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.664-0400 m31200| 2015-07-09T13:56:48.663-0400 I SHARDING [conn85] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.671-0400 m31200| 2015-07-09T13:56:48.667-0400 I SHARDING [conn47] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 18.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.671-0400 m30999| 2015-07-09T13:56:48.670-0400 I NETWORK [conn120] end connection 127.0.0.1:62988 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.672-0400 m31200| 2015-07-09T13:56:48.672-0400 I SHARDING [conn47] distributed lock 'db19.coll19/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb5e0d5a107a5b9c0daed [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.672-0400 m31200| 2015-07-09T13:56:48.672-0400 I SHARDING [conn34] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.673-0400 m31200| 2015-07-09T13:56:48.672-0400 I SHARDING [conn47] remotely refreshing metadata for db19.coll19 based on current shard version 1|3||559eb5dfca4787b9985d1c4d, current metadata version is 1|3||559eb5dfca4787b9985d1c4d [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.676-0400 m31200| 2015-07-09T13:56:48.675-0400 I SHARDING [conn65] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 18.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.676-0400 m31200| 2015-07-09T13:56:48.675-0400 I SHARDING [conn62] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 18.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.677-0400 m31200| 2015-07-09T13:56:48.675-0400 I SHARDING [conn85] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 18.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.677-0400 m31200| 2015-07-09T13:56:48.676-0400 W SHARDING [conn65] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.677-0400 m31200| 2015-07-09T13:56:48.676-0400 W SHARDING [conn62] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.678-0400 m30998| 2015-07-09T13:56:48.677-0400 W SHARDING [conn125] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 18.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.678-0400 m30998| 2015-07-09T13:56:48.678-0400 W SHARDING [conn120] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 18.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.683-0400 m31200| 2015-07-09T13:56:48.681-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 18.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.683-0400 m31200| 2015-07-09T13:56:48.681-0400 W SHARDING [conn85] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.685-0400 m30998| 2015-07-09T13:56:48.681-0400 W SHARDING [conn119] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 18.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.685-0400 m30998| 2015-07-09T13:56:48.682-0400 I NETWORK [conn120] end connection 127.0.0.1:62990 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.685-0400 m30999| 2015-07-09T13:56:48.682-0400 I NETWORK [conn119] end connection 127.0.0.1:62987 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.685-0400 m31200| 2015-07-09T13:56:48.683-0400 W SHARDING [conn34] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.686-0400 m30998| 2015-07-09T13:56:48.684-0400 W SHARDING [conn122] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 18.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.690-0400 m30999| 2015-07-09T13:56:48.686-0400 I NETWORK [conn118] end connection 127.0.0.1:62986 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.697-0400 m31200| 2015-07-09T13:56:48.692-0400 I SHARDING [conn47] metadata of collection db19.coll19 already up to date (shard version : 1|3||559eb5dfca4787b9985d1c4d, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.697-0400 m31200| 2015-07-09T13:56:48.692-0400 W SHARDING [conn47] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.697-0400 m31200| 2015-07-09T13:56:48.692-0400 I SHARDING [conn47] distributed lock 'db19.coll19/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.698-0400 m30998| 2015-07-09T13:56:48.693-0400 W SHARDING [conn118] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 18.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.752-0400 m30999| 2015-07-09T13:56:48.699-0400 I NETWORK [conn126] end connection 127.0.0.1:63001 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.752-0400 m30998| 2015-07-09T13:56:48.704-0400 I NETWORK [conn121] end connection 127.0.0.1:62991 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.752-0400 m30998| 2015-07-09T13:56:48.706-0400 I NETWORK [conn124] end connection 127.0.0.1:62995 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.753-0400 m31200| 2015-07-09T13:56:48.711-0400 I SHARDING [conn47] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.753-0400 m31200| 2015-07-09T13:56:48.711-0400 W SHARDING [conn47] possible low cardinality key detected in db19.coll19 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.753-0400 m31200| 2015-07-09T13:56:48.711-0400 I SHARDING [conn34] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.753-0400 m31200| 2015-07-09T13:56:48.711-0400 W SHARDING [conn34] possible low cardinality key detected in db19.coll19 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.753-0400 m31200| 2015-07-09T13:56:48.712-0400 I SHARDING [conn47] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 12.0 }, { tid: 15.0 }, { tid: 16.0 }, { tid: 18.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.781-0400 m31200| 2015-07-09T13:56:48.712-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 12.0 }, { tid: 15.0 }, { tid: 16.0 }, { tid: 18.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.781-0400 m31200| 2015-07-09T13:56:48.712-0400 I SHARDING [conn85] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.781-0400 m31200| 2015-07-09T13:56:48.712-0400 W SHARDING [conn85] possible low cardinality key detected in db19.coll19 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.782-0400 m31200| 2015-07-09T13:56:48.712-0400 W SHARDING [conn85] possible low cardinality key detected in db19.coll19 - key is { tid: 19.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.782-0400 m31200| 2015-07-09T13:56:48.713-0400 I SHARDING [conn62] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.782-0400 m31200| 2015-07-09T13:56:48.713-0400 W SHARDING [conn62] possible low cardinality key detected in db19.coll19 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.782-0400 m31200| 2015-07-09T13:56:48.713-0400 W SHARDING [conn62] possible low cardinality key detected in db19.coll19 - key is { tid: 19.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.782-0400 m30998| 2015-07-09T13:56:48.714-0400 W SHARDING [conn118] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 12.0 }, { tid: 15.0 }, { tid: 16.0 }, { tid: 18.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.783-0400 m31200| 2015-07-09T13:56:48.713-0400 I SHARDING [conn34] could not acquire lock 'db19.coll19/bs-osx108-8:31200:1436464537:809424560' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.783-0400 m31200| 2015-07-09T13:56:48.713-0400 I SHARDING [conn34] distributed lock 'db19.coll19/bs-osx108-8:31200:1436464537:809424560' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.783-0400 m31200| 2015-07-09T13:56:48.713-0400 W SHARDING [conn34] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.784-0400 m31200| 2015-07-09T13:56:48.713-0400 I SHARDING [conn62] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 12.0 }, { tid: 15.0 }, { tid: 16.0 }, { tid: 17.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.784-0400 m31200| 2015-07-09T13:56:48.715-0400 I SHARDING [conn85] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 12.0 }, { tid: 15.0 }, { tid: 16.0 }, { tid: 17.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.784-0400 m30998| 2015-07-09T13:56:48.715-0400 W SHARDING [conn119] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 12.0 }, { tid: 15.0 }, { tid: 16.0 }, { tid: 17.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.785-0400 m31200| 2015-07-09T13:56:48.715-0400 W SHARDING [conn62] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.785-0400 m30999| 2015-07-09T13:56:48.716-0400 I NETWORK [conn125] end connection 127.0.0.1:62999 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.785-0400 m31200| 2015-07-09T13:56:48.717-0400 I SHARDING [conn62] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.785-0400 m31200| 2015-07-09T13:56:48.718-0400 W SHARDING [conn62] possible low cardinality key detected in db19.coll19 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.785-0400 m31200| 2015-07-09T13:56:48.718-0400 W SHARDING [conn62] possible low cardinality key detected in db19.coll19 - key is { tid: 19.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.786-0400 m31200| 2015-07-09T13:56:48.718-0400 I SHARDING [conn62] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 12.0 }, { tid: 15.0 }, { tid: 16.0 }, { tid: 17.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.787-0400 m31200| 2015-07-09T13:56:48.720-0400 W SHARDING [conn62] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.787-0400 m30998| 2015-07-09T13:56:48.721-0400 I NETWORK [conn119] end connection 127.0.0.1:62985 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.787-0400 m30998| 2015-07-09T13:56:48.721-0400 W SHARDING [conn122] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 12.0 }, { tid: 15.0 }, { tid: 16.0 }, { tid: 17.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.787-0400 m31200| 2015-07-09T13:56:48.722-0400 I SHARDING [conn47] distributed lock 'db19.coll19/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb5e0d5a107a5b9c0daee [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.788-0400 m31200| 2015-07-09T13:56:48.722-0400 I SHARDING [conn47] remotely refreshing metadata for db19.coll19 based on current shard version 1|3||559eb5dfca4787b9985d1c4d, current metadata version is 1|3||559eb5dfca4787b9985d1c4d [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.788-0400 m30998| 2015-07-09T13:56:48.723-0400 I NETWORK [conn117] end connection 127.0.0.1:62982 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.788-0400 m30998| 2015-07-09T13:56:48.726-0400 I NETWORK [conn122] end connection 127.0.0.1:62993 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.788-0400 m30998| 2015-07-09T13:56:48.726-0400 W SHARDING [conn123] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 12.0 }, { tid: 15.0 }, { tid: 16.0 }, { tid: 17.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.789-0400 m31200| 2015-07-09T13:56:48.726-0400 W SHARDING [conn85] could not acquire collection lock for db19.coll19 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db19.coll19 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.789-0400 m31200| 2015-07-09T13:56:48.732-0400 I SHARDING [conn47] metadata of collection db19.coll19 already up to date (shard version : 1|3||559eb5dfca4787b9985d1c4d, took 3ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.789-0400 m31200| 2015-07-09T13:56:48.732-0400 W SHARDING [conn47] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.789-0400 m30998| 2015-07-09T13:56:48.733-0400 I NETWORK [conn123] end connection 127.0.0.1:62994 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.789-0400 m31200| 2015-07-09T13:56:48.733-0400 I SHARDING [conn47] distributed lock 'db19.coll19/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.790-0400 m30998| 2015-07-09T13:56:48.735-0400 W SHARDING [conn125] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 12.0 }, { tid: 15.0 }, { tid: 16.0 }, { tid: 18.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.790-0400 m30999| 2015-07-09T13:56:48.756-0400 I NETWORK [conn123] end connection 127.0.0.1:62997 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.790-0400 m30999| 2015-07-09T13:56:48.759-0400 I NETWORK [conn124] end connection 127.0.0.1:62998 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.790-0400 m30998| 2015-07-09T13:56:48.767-0400 I NETWORK [conn125] end connection 127.0.0.1:62996 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.791-0400 m31200| 2015-07-09T13:56:48.780-0400 I SHARDING [conn47] request split points lookup for chunk db19.coll19 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.791-0400 m31200| 2015-07-09T13:56:48.783-0400 I SHARDING [conn47] received splitChunk request: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 8.0 }, { tid: 10.0 }, { tid: 11.0 }, { tid: 15.0 }, { tid: 16.0 }, { tid: 18.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.791-0400 m31200| 2015-07-09T13:56:48.788-0400 I SHARDING [conn47] distributed lock 'db19.coll19/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb5e0d5a107a5b9c0daf0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.791-0400 m31200| 2015-07-09T13:56:48.788-0400 I SHARDING [conn47] remotely refreshing metadata for db19.coll19 based on current shard version 1|3||559eb5dfca4787b9985d1c4d, current metadata version is 1|3||559eb5dfca4787b9985d1c4d [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.791-0400 m31200| 2015-07-09T13:56:48.789-0400 I SHARDING [conn47] metadata of collection db19.coll19 already up to date (shard version : 1|3||559eb5dfca4787b9985d1c4d, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.792-0400 m31200| 2015-07-09T13:56:48.789-0400 W SHARDING [conn47] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.792-0400 m31200| 2015-07-09T13:56:48.790-0400 I SHARDING [conn47] distributed lock 'db19.coll19/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.792-0400 m30998| 2015-07-09T13:56:48.790-0400 W SHARDING [conn118] splitChunk failed - cmd: { splitChunk: "db19.coll19", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs1", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 8.0 }, { tid: 10.0 }, { tid: 11.0 }, { tid: 15.0 }, { tid: 16.0 }, { tid: 18.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5dfca4787b9985d1c4d') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.793-0400 m30999| 2015-07-09T13:56:48.793-0400 I NETWORK [conn121] end connection 127.0.0.1:62989 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.794-0400 m30998| 2015-07-09T13:56:48.794-0400 I NETWORK [conn118] end connection 127.0.0.1:62984 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.811-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.811-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.812-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.812-0400 jstests/concurrency/fsm_workloads/findAndModify_upsert_collscan.js: Workload completed in 669 ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.812-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.812-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.812-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.812-0400 m30999| 2015-07-09T13:56:48.812-0400 I COMMAND [conn1] DROP: db19.coll19 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.812-0400 m30999| 2015-07-09T13:56:48.812-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:48.812-0400-559eb5e0ca4787b9985d1c4f", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464608812), what: "dropCollection.start", ns: "db19.coll19", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.869-0400 m30999| 2015-07-09T13:56:48.868-0400 I SHARDING [conn1] distributed lock 'db19.coll19/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5e0ca4787b9985d1c50 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.869-0400 m31100| 2015-07-09T13:56:48.869-0400 I COMMAND [conn38] CMD: drop db19.coll19 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.870-0400 m31200| 2015-07-09T13:56:48.870-0400 I COMMAND [conn64] CMD: drop db19.coll19 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.874-0400 m31201| 2015-07-09T13:56:48.873-0400 I COMMAND [repl writer worker 9] CMD: drop db19.coll19 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.874-0400 m31202| 2015-07-09T13:56:48.874-0400 I COMMAND [repl writer worker 13] CMD: drop db19.coll19 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.927-0400 m31200| 2015-07-09T13:56:48.926-0400 I SHARDING [conn64] remotely refreshing metadata for db19.coll19 with requested shard version 0|0||000000000000000000000000, current shard version is 1|3||559eb5dfca4787b9985d1c4d, current metadata version is 1|3||559eb5dfca4787b9985d1c4d [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.928-0400 m31200| 2015-07-09T13:56:48.928-0400 W SHARDING [conn64] no chunks found when reloading db19.coll19, previous version was 0|0||559eb5dfca4787b9985d1c4d, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.928-0400 m31200| 2015-07-09T13:56:48.928-0400 I SHARDING [conn64] dropping metadata for db19.coll19 at shard version 1|3||559eb5dfca4787b9985d1c4d, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.929-0400 m30999| 2015-07-09T13:56:48.928-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:48.928-0400-559eb5e0ca4787b9985d1c51", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464608928), what: "dropCollection", ns: "db19.coll19", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:48.982-0400 m30999| 2015-07-09T13:56:48.982-0400 I SHARDING [conn1] distributed lock 'db19.coll19/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.038-0400 m30999| 2015-07-09T13:56:49.038-0400 I COMMAND [conn1] DROP DATABASE: db19 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.038-0400 m30999| 2015-07-09T13:56:49.038-0400 I SHARDING [conn1] DBConfig::dropDatabase: db19 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.038-0400 m30999| 2015-07-09T13:56:49.038-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:49.038-0400-559eb5e1ca4787b9985d1c52", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464609038), what: "dropDatabase.start", ns: "db19", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.144-0400 m30999| 2015-07-09T13:56:49.143-0400 I SHARDING [conn1] DBConfig::dropDatabase: db19 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.144-0400 m31200| 2015-07-09T13:56:49.144-0400 I COMMAND [conn66] dropDatabase db19 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.145-0400 m31200| 2015-07-09T13:56:49.144-0400 I COMMAND [conn66] dropDatabase db19 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.145-0400 m30999| 2015-07-09T13:56:49.145-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:49.145-0400-559eb5e1ca4787b9985d1c53", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464609145), what: "dropDatabase", ns: "db19", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.146-0400 m31202| 2015-07-09T13:56:49.145-0400 I COMMAND [repl writer worker 0] dropDatabase db19 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.146-0400 m31201| 2015-07-09T13:56:49.145-0400 I COMMAND [repl writer worker 6] dropDatabase db19 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.146-0400 m31202| 2015-07-09T13:56:49.145-0400 I COMMAND [repl writer worker 0] dropDatabase db19 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.146-0400 m31201| 2015-07-09T13:56:49.145-0400 I COMMAND [repl writer worker 6] dropDatabase db19 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.238-0400 m31100| 2015-07-09T13:56:49.237-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.241-0400 m31101| 2015-07-09T13:56:49.241-0400 I COMMAND [repl writer worker 12] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.241-0400 m31102| 2015-07-09T13:56:49.241-0400 I COMMAND [repl writer worker 2] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.274-0400 m31200| 2015-07-09T13:56:49.273-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.276-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.276-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.276-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.276-0400 jstests/concurrency/fsm_workloads/explain_aggregate.js [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.277-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.277-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.277-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.278-0400 m31202| 2015-07-09T13:56:49.277-0400 I COMMAND [repl writer worker 5] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.278-0400 m31201| 2015-07-09T13:56:49.277-0400 I COMMAND [repl writer worker 2] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.282-0400 m30999| 2015-07-09T13:56:49.282-0400 I SHARDING [conn1] distributed lock 'db20/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5e1ca4787b9985d1c54 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.286-0400 m30999| 2015-07-09T13:56:49.286-0400 I SHARDING [conn1] Placing [db20] on: test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.286-0400 m30999| 2015-07-09T13:56:49.286-0400 I SHARDING [conn1] Enabling sharding for database [db20] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.340-0400 m30999| 2015-07-09T13:56:49.340-0400 I SHARDING [conn1] distributed lock 'db20/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.364-0400 m31200| 2015-07-09T13:56:49.364-0400 I INDEX [conn71] build index on: db20.coll20 properties: { v: 1, key: { j: 1.0 }, name: "j_1", ns: "db20.coll20" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.364-0400 m31200| 2015-07-09T13:56:49.364-0400 I INDEX [conn71] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.373-0400 m31200| 2015-07-09T13:56:49.373-0400 I INDEX [conn71] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.376-0400 m30999| 2015-07-09T13:56:49.376-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db20.coll20", key: { j: 1.0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.380-0400 m30999| 2015-07-09T13:56:49.379-0400 I SHARDING [conn1] distributed lock 'db20.coll20/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5e1ca4787b9985d1c55 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.381-0400 m30999| 2015-07-09T13:56:49.380-0400 I SHARDING [conn1] enable sharding on: db20.coll20 with shard key: { j: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.383-0400 m30999| 2015-07-09T13:56:49.381-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:49.381-0400-559eb5e1ca4787b9985d1c56", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464609381), what: "shardCollection.start", ns: "db20.coll20", details: { shardKey: { j: 1.0 }, collection: "db20.coll20", primary: "test-rs1:test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", initShards: [], numChunks: 1 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.385-0400 m31201| 2015-07-09T13:56:49.385-0400 I INDEX [repl writer worker 7] build index on: db20.coll20 properties: { v: 1, key: { j: 1.0 }, name: "j_1", ns: "db20.coll20" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.385-0400 m31201| 2015-07-09T13:56:49.385-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.390-0400 m31201| 2015-07-09T13:56:49.389-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.392-0400 m31202| 2015-07-09T13:56:49.392-0400 I INDEX [repl writer worker 9] build index on: db20.coll20 properties: { v: 1, key: { j: 1.0 }, name: "j_1", ns: "db20.coll20" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.392-0400 m31202| 2015-07-09T13:56:49.392-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.395-0400 m31202| 2015-07-09T13:56:49.395-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.435-0400 m30999| 2015-07-09T13:56:49.435-0400 I SHARDING [conn1] going to create 1 chunk(s) for: db20.coll20 using new epoch 559eb5e1ca4787b9985d1c57 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.489-0400 m30999| 2015-07-09T13:56:49.489-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db20.coll20: 0ms sequenceNumber: 93 version: 1|0||559eb5e1ca4787b9985d1c57 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.545-0400 m30999| 2015-07-09T13:56:49.544-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db20.coll20: 0ms sequenceNumber: 94 version: 1|0||559eb5e1ca4787b9985d1c57 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.547-0400 m31200| 2015-07-09T13:56:49.546-0400 I SHARDING [conn83] remotely refreshing metadata for db20.coll20 with requested shard version 1|0||559eb5e1ca4787b9985d1c57, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.548-0400 m31200| 2015-07-09T13:56:49.548-0400 I SHARDING [conn83] collection db20.coll20 was previously unsharded, new metadata loaded with shard version 1|0||559eb5e1ca4787b9985d1c57 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.549-0400 m31200| 2015-07-09T13:56:49.548-0400 I SHARDING [conn83] collection version was loaded at version 1|0||559eb5e1ca4787b9985d1c57, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.549-0400 m30999| 2015-07-09T13:56:49.549-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:49.549-0400-559eb5e1ca4787b9985d1c58", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464609549), what: "shardCollection", ns: "db20.coll20", details: { version: "1|0||559eb5e1ca4787b9985d1c57" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.603-0400 m30999| 2015-07-09T13:56:49.602-0400 I SHARDING [conn1] distributed lock 'db20.coll20/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.603-0400 Using 10 threads (requested 10) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.677-0400 m30999| 2015-07-09T13:56:49.674-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63010 #127 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.715-0400 m30998| 2015-07-09T13:56:49.714-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63011 #127 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.715-0400 m30999| 2015-07-09T13:56:49.714-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63012 #128 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.723-0400 m30998| 2015-07-09T13:56:49.722-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63014 #128 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.723-0400 m30998| 2015-07-09T13:56:49.723-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63016 #129 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.724-0400 m30999| 2015-07-09T13:56:49.723-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63013 #129 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.726-0400 m30999| 2015-07-09T13:56:49.726-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63015 #130 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.734-0400 m30998| 2015-07-09T13:56:49.726-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63017 #130 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.734-0400 m30998| 2015-07-09T13:56:49.726-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63018 #131 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.739-0400 m30999| 2015-07-09T13:56:49.739-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63019 #131 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.746-0400 setting random seed: 8543632640503 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.746-0400 setting random seed: 4528340888209 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.746-0400 setting random seed: 1181097002699 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.746-0400 setting random seed: 6243897248059 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.747-0400 setting random seed: 2833855804055 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.747-0400 setting random seed: 697333621792 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.748-0400 setting random seed: 9460613331757 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.756-0400 setting random seed: 86186924017 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.757-0400 m30998| 2015-07-09T13:56:49.757-0400 I SHARDING [conn129] ChunkManager: time to load chunks for db20.coll20: 0ms sequenceNumber: 23 version: 1|0||559eb5e1ca4787b9985d1c57 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.759-0400 setting random seed: 7825343576259 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.764-0400 setting random seed: 549547444097 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.813-0400 m31200| 2015-07-09T13:56:49.812-0400 I SHARDING [conn62] request split points lookup for chunk db20.coll20 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.814-0400 m31200| 2015-07-09T13:56:49.814-0400 I SHARDING [conn62] received splitChunk request: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.816-0400 m31200| 2015-07-09T13:56:49.816-0400 I SHARDING [conn62] distributed lock 'db20.coll20/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb5e1d5a107a5b9c0daf2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.817-0400 m31200| 2015-07-09T13:56:49.816-0400 I SHARDING [conn62] remotely refreshing metadata for db20.coll20 based on current shard version 1|0||559eb5e1ca4787b9985d1c57, current metadata version is 1|0||559eb5e1ca4787b9985d1c57 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.820-0400 m31200| 2015-07-09T13:56:49.820-0400 I SHARDING [conn62] metadata of collection db20.coll20 already up to date (shard version : 1|0||559eb5e1ca4787b9985d1c57, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.820-0400 m31200| 2015-07-09T13:56:49.820-0400 I SHARDING [conn62] splitChunk accepted at version 1|0||559eb5e1ca4787b9985d1c57 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.821-0400 m31200| 2015-07-09T13:56:49.820-0400 I SHARDING [conn85] request split points lookup for chunk db20.coll20 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.821-0400 m31200| 2015-07-09T13:56:49.820-0400 I SHARDING [conn64] request split points lookup for chunk db20.coll20 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.821-0400 m31200| 2015-07-09T13:56:49.820-0400 I SHARDING [conn84] request split points lookup for chunk db20.coll20 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.821-0400 m31200| 2015-07-09T13:56:49.821-0400 I SHARDING [conn64] received splitChunk request: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.822-0400 m31200| 2015-07-09T13:56:49.822-0400 I SHARDING [conn85] received splitChunk request: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.822-0400 m31200| 2015-07-09T13:56:49.822-0400 I SHARDING [conn84] received splitChunk request: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.824-0400 m31200| 2015-07-09T13:56:49.823-0400 W SHARDING [conn64] could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db20.coll20 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.824-0400 m31200| 2015-07-09T13:56:49.823-0400 W SHARDING [conn84] could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db20.coll20 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.824-0400 m30999| 2015-07-09T13:56:49.824-0400 W SHARDING [conn130] splitChunk failed - cmd: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.825-0400 m31200| 2015-07-09T13:56:49.824-0400 I SHARDING [conn62] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:49.824-0400-559eb5e1d5a107a5b9c0daf3", server: "bs-osx108-8", clientAddr: "127.0.0.1:62861", time: new Date(1436464609824), what: "multi-split", ns: "db20.coll20", details: { before: { min: { j: MinKey }, max: { j: MaxKey } }, number: 1, of: 3, chunk: { min: { j: MinKey }, max: { j: 0.0 }, lastmod: Timestamp 1000|1, lastmodEpoch: ObjectId('559eb5e1ca4787b9985d1c57') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.825-0400 m31200| 2015-07-09T13:56:49.824-0400 W SHARDING [conn85] could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db20.coll20 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.825-0400 m31200| 2015-07-09T13:56:49.824-0400 I SHARDING [conn63] request split points lookup for chunk db20.coll20 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.825-0400 m30999| 2015-07-09T13:56:49.824-0400 W SHARDING [conn129] splitChunk failed - cmd: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.826-0400 m31200| 2015-07-09T13:56:49.824-0400 I SHARDING [conn47] request split points lookup for chunk db20.coll20 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.826-0400 m30998| 2015-07-09T13:56:49.825-0400 W SHARDING [conn131] splitChunk failed - cmd: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.827-0400 m31200| 2015-07-09T13:56:49.826-0400 I SHARDING [conn47] received splitChunk request: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 8.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.828-0400 m31200| 2015-07-09T13:56:49.827-0400 I SHARDING [conn63] received splitChunk request: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 8.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.828-0400 m31200| 2015-07-09T13:56:49.828-0400 W SHARDING [conn47] could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db20.coll20 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.829-0400 m30998| 2015-07-09T13:56:49.828-0400 W SHARDING [conn130] splitChunk failed - cmd: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 8.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.831-0400 m31200| 2015-07-09T13:56:49.829-0400 W SHARDING [conn63] could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db20.coll20 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.832-0400 m30999| 2015-07-09T13:56:49.829-0400 W SHARDING [conn131] splitChunk failed - cmd: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 8.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.841-0400 m31200| 2015-07-09T13:56:49.841-0400 I SHARDING [conn63] request split points lookup for chunk db20.coll20 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.842-0400 m31200| 2015-07-09T13:56:49.842-0400 I SHARDING [conn63] received splitChunk request: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.843-0400 m31200| 2015-07-09T13:56:49.842-0400 I SHARDING [conn47] request split points lookup for chunk db20.coll20 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.844-0400 m31200| 2015-07-09T13:56:49.843-0400 I SHARDING [conn47] received splitChunk request: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.844-0400 m31200| 2015-07-09T13:56:49.844-0400 W SHARDING [conn63] could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db20.coll20 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.845-0400 m30999| 2015-07-09T13:56:49.844-0400 W SHARDING [conn131] splitChunk failed - cmd: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.846-0400 m31200| 2015-07-09T13:56:49.845-0400 W SHARDING [conn47] could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db20.coll20 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.846-0400 m30998| 2015-07-09T13:56:49.846-0400 W SHARDING [conn130] splitChunk failed - cmd: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.849-0400 m30998| 2015-07-09T13:56:49.848-0400 I SHARDING [conn130] ChunkManager: time to load chunks for db20.coll20: 0ms sequenceNumber: 24 version: 1|3||559eb5e1ca4787b9985d1c57 based on: 1|0||559eb5e1ca4787b9985d1c57 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.858-0400 m31200| 2015-07-09T13:56:49.857-0400 I SHARDING [conn63] request split points lookup for chunk db20.coll20 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.859-0400 m31200| 2015-07-09T13:56:49.858-0400 I SHARDING [conn64] request split points lookup for chunk db20.coll20 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.860-0400 m31200| 2015-07-09T13:56:49.858-0400 I SHARDING [conn63] received splitChunk request: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 10.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.860-0400 m31200| 2015-07-09T13:56:49.859-0400 I SHARDING [conn64] received splitChunk request: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 10.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.862-0400 m31200| 2015-07-09T13:56:49.861-0400 W SHARDING [conn63] could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db20.coll20 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.863-0400 m30999| 2015-07-09T13:56:49.861-0400 W SHARDING [conn129] splitChunk failed - cmd: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 10.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.863-0400 m31200| 2015-07-09T13:56:49.861-0400 W SHARDING [conn64] could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db20.coll20 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.864-0400 m30999| 2015-07-09T13:56:49.864-0400 W SHARDING [conn128] splitChunk failed - cmd: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 10.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.875-0400 m31200| 2015-07-09T13:56:49.875-0400 I SHARDING [conn62] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:49.875-0400-559eb5e1d5a107a5b9c0daf4", server: "bs-osx108-8", clientAddr: "127.0.0.1:62861", time: new Date(1436464609875), what: "multi-split", ns: "db20.coll20", details: { before: { min: { j: MinKey }, max: { j: MaxKey } }, number: 2, of: 3, chunk: { min: { j: 0.0 }, max: { j: 4.0 }, lastmod: Timestamp 1000|2, lastmodEpoch: ObjectId('559eb5e1ca4787b9985d1c57') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.878-0400 m31200| 2015-07-09T13:56:49.878-0400 I SHARDING [conn64] request split points lookup for chunk db20.coll20 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.879-0400 m31200| 2015-07-09T13:56:49.879-0400 I SHARDING [conn64] received splitChunk request: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.881-0400 m31200| 2015-07-09T13:56:49.880-0400 I SHARDING [conn63] request split points lookup for chunk db20.coll20 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.881-0400 m31200| 2015-07-09T13:56:49.880-0400 W SHARDING [conn64] could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db20.coll20 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.882-0400 m30999| 2015-07-09T13:56:49.880-0400 W SHARDING [conn128] splitChunk failed - cmd: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.883-0400 m31200| 2015-07-09T13:56:49.881-0400 I SHARDING [conn63] received splitChunk request: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.883-0400 m31200| 2015-07-09T13:56:49.883-0400 W SHARDING [conn63] could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db20.coll20 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.884-0400 m30999| 2015-07-09T13:56:49.883-0400 W SHARDING [conn130] splitChunk failed - cmd: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.899-0400 m31200| 2015-07-09T13:56:49.897-0400 I SHARDING [conn63] request split points lookup for chunk db20.coll20 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.899-0400 m31200| 2015-07-09T13:56:49.897-0400 I SHARDING [conn64] request split points lookup for chunk db20.coll20 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.900-0400 m31200| 2015-07-09T13:56:49.898-0400 I SHARDING [conn64] received splitChunk request: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.901-0400 m31200| 2015-07-09T13:56:49.898-0400 I SHARDING [conn63] received splitChunk request: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.903-0400 m30999| 2015-07-09T13:56:49.900-0400 W SHARDING [conn128] splitChunk failed - cmd: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.903-0400 m31200| 2015-07-09T13:56:49.900-0400 W SHARDING [conn64] could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db20.coll20 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.905-0400 m31200| 2015-07-09T13:56:49.900-0400 W SHARDING [conn63] could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db20.coll20 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.905-0400 m30999| 2015-07-09T13:56:49.900-0400 W SHARDING [conn131] splitChunk failed - cmd: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.920-0400 m31200| 2015-07-09T13:56:49.920-0400 I SHARDING [conn63] request split points lookup for chunk db20.coll20 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.921-0400 m31200| 2015-07-09T13:56:49.920-0400 I SHARDING [conn64] request split points lookup for chunk db20.coll20 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.921-0400 m31200| 2015-07-09T13:56:49.920-0400 I SHARDING [conn63] received splitChunk request: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.922-0400 m31200| 2015-07-09T13:56:49.922-0400 I SHARDING [conn64] received splitChunk request: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.923-0400 m31200| 2015-07-09T13:56:49.922-0400 W SHARDING [conn63] could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db20.coll20 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.923-0400 m30999| 2015-07-09T13:56:49.922-0400 W SHARDING [conn129] splitChunk failed - cmd: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.924-0400 m31200| 2015-07-09T13:56:49.923-0400 W SHARDING [conn64] could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db20.coll20 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.925-0400 m30999| 2015-07-09T13:56:49.924-0400 W SHARDING [conn127] splitChunk failed - cmd: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.928-0400 m31200| 2015-07-09T13:56:49.927-0400 I SHARDING [conn62] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:49.927-0400-559eb5e1d5a107a5b9c0daf5", server: "bs-osx108-8", clientAddr: "127.0.0.1:62861", time: new Date(1436464609927), what: "multi-split", ns: "db20.coll20", details: { before: { min: { j: MinKey }, max: { j: MaxKey } }, number: 3, of: 3, chunk: { min: { j: 4.0 }, max: { j: MaxKey }, lastmod: Timestamp 1000|3, lastmodEpoch: ObjectId('559eb5e1ca4787b9985d1c57') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.941-0400 m31200| 2015-07-09T13:56:49.939-0400 I SHARDING [conn64] request split points lookup for chunk db20.coll20 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.943-0400 m31200| 2015-07-09T13:56:49.942-0400 I SHARDING [conn64] received splitChunk request: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.943-0400 m31200| 2015-07-09T13:56:49.943-0400 I SHARDING [conn63] request split points lookup for chunk db20.coll20 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.944-0400 m31200| 2015-07-09T13:56:49.943-0400 W SHARDING [conn64] could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db20.coll20 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.944-0400 m30999| 2015-07-09T13:56:49.943-0400 W SHARDING [conn127] splitChunk failed - cmd: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.945-0400 m31200| 2015-07-09T13:56:49.944-0400 I SHARDING [conn63] received splitChunk request: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 26.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.946-0400 m31200| 2015-07-09T13:56:49.946-0400 W SHARDING [conn63] could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db20.coll20 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.947-0400 m30999| 2015-07-09T13:56:49.946-0400 W SHARDING [conn131] splitChunk failed - cmd: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 26.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.964-0400 m31200| 2015-07-09T13:56:49.963-0400 I SHARDING [conn63] request split points lookup for chunk db20.coll20 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.965-0400 m31200| 2015-07-09T13:56:49.964-0400 I SHARDING [conn64] request split points lookup for chunk db20.coll20 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.965-0400 m31200| 2015-07-09T13:56:49.964-0400 I SHARDING [conn63] received splitChunk request: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 22.0 }, { j: 28.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.966-0400 m31200| 2015-07-09T13:56:49.965-0400 I SHARDING [conn84] request split points lookup for chunk db20.coll20 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.966-0400 m31200| 2015-07-09T13:56:49.965-0400 I SHARDING [conn64] received splitChunk request: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.967-0400 m31200| 2015-07-09T13:56:49.966-0400 I SHARDING [conn84] received splitChunk request: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.967-0400 m31200| 2015-07-09T13:56:49.966-0400 W SHARDING [conn63] could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db20.coll20 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.968-0400 m30999| 2015-07-09T13:56:49.966-0400 W SHARDING [conn127] splitChunk failed - cmd: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 22.0 }, { j: 28.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.968-0400 m31200| 2015-07-09T13:56:49.967-0400 W SHARDING [conn84] could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db20.coll20 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.969-0400 m30999| 2015-07-09T13:56:49.967-0400 W SHARDING [conn130] splitChunk failed - cmd: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.969-0400 m31200| 2015-07-09T13:56:49.968-0400 W SHARDING [conn64] could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db20.coll20 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.969-0400 m30999| 2015-07-09T13:56:49.968-0400 W SHARDING [conn129] splitChunk failed - cmd: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.980-0400 m31200| 2015-07-09T13:56:49.979-0400 I SHARDING [conn62] distributed lock 'db20.coll20/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.980-0400 m31200| 2015-07-09T13:56:49.979-0400 I COMMAND [conn62] command db20.coll20 command: splitChunk { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 4, w: 2 } }, Database: { acquireCount: { r: 1, w: 2 } }, Collection: { acquireCount: { r: 1, W: 2 }, acquireWaitCount: { W: 2 }, timeAcquiringMicros: { W: 3945 } } } protocol:op_command 165ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.981-0400 m30998| 2015-07-09T13:56:49.980-0400 I SHARDING [conn127] autosplitted db20.coll20 shard: ns: db20.coll20, shard: test-rs1, lastmod: 1|0||000000000000000000000000, min: { j: MinKey }, max: { j: MaxKey } into 3 (splitThreshold 921) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.985-0400 m31200| 2015-07-09T13:56:49.985-0400 I SHARDING [conn64] request split points lookup for chunk db20.coll20 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.987-0400 m31200| 2015-07-09T13:56:49.986-0400 I SHARDING [conn64] received splitChunk request: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 30.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.988-0400 m31200| 2015-07-09T13:56:49.986-0400 I SHARDING [conn84] request split points lookup for chunk db20.coll20 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.990-0400 m31200| 2015-07-09T13:56:49.988-0400 I SHARDING [conn84] received splitChunk request: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.990-0400 m31200| 2015-07-09T13:56:49.988-0400 I SHARDING [conn64] distributed lock 'db20.coll20/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb5e1d5a107a5b9c0daf6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.990-0400 m31200| 2015-07-09T13:56:49.988-0400 I SHARDING [conn64] remotely refreshing metadata for db20.coll20 based on current shard version 1|3||559eb5e1ca4787b9985d1c57, current metadata version is 1|3||559eb5e1ca4787b9985d1c57 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.991-0400 m31200| 2015-07-09T13:56:49.989-0400 W SHARDING [conn84] could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db20.coll20 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.991-0400 m31200| 2015-07-09T13:56:49.989-0400 I SHARDING [conn64] metadata of collection db20.coll20 already up to date (shard version : 1|3||559eb5e1ca4787b9985d1c57, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.991-0400 m31200| 2015-07-09T13:56:49.990-0400 W SHARDING [conn64] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.992-0400 m30999| 2015-07-09T13:56:49.989-0400 W SHARDING [conn130] splitChunk failed - cmd: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.992-0400 m31200| 2015-07-09T13:56:49.990-0400 I SHARDING [conn64] distributed lock 'db20.coll20/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:49.992-0400 m30999| 2015-07-09T13:56:49.991-0400 W SHARDING [conn127] splitChunk failed - cmd: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 30.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.007-0400 m31200| 2015-07-09T13:56:50.006-0400 I SHARDING [conn64] request split points lookup for chunk db20.coll20 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.007-0400 m31200| 2015-07-09T13:56:50.007-0400 I SHARDING [conn84] request split points lookup for chunk db20.coll20 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.009-0400 m31200| 2015-07-09T13:56:50.007-0400 I SHARDING [conn64] received splitChunk request: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 22.0 }, { j: 24.0 }, { j: 28.0 }, { j: 32.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.010-0400 m31200| 2015-07-09T13:56:50.008-0400 I SHARDING [conn84] received splitChunk request: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 22.0 }, { j: 24.0 }, { j: 28.0 }, { j: 32.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.011-0400 m31200| 2015-07-09T13:56:50.008-0400 I SHARDING [conn63] request split points lookup for chunk db20.coll20 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.012-0400 m31200| 2015-07-09T13:56:50.009-0400 I SHARDING [conn63] received splitChunk request: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 22.0 }, { j: 24.0 }, { j: 28.0 }, { j: 30.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.012-0400 m31200| 2015-07-09T13:56:50.009-0400 I SHARDING [conn84] could not acquire lock 'db20.coll20/bs-osx108-8:31200:1436464537:809424560' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.012-0400 m31200| 2015-07-09T13:56:50.009-0400 I SHARDING [conn84] distributed lock 'db20.coll20/bs-osx108-8:31200:1436464537:809424560' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.013-0400 m31200| 2015-07-09T13:56:50.009-0400 W SHARDING [conn84] could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db20.coll20 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.013-0400 m31200| 2015-07-09T13:56:50.009-0400 I SHARDING [conn64] distributed lock 'db20.coll20/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb5e2d5a107a5b9c0daf7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.013-0400 m31200| 2015-07-09T13:56:50.009-0400 I SHARDING [conn64] remotely refreshing metadata for db20.coll20 based on current shard version 1|3||559eb5e1ca4787b9985d1c57, current metadata version is 1|3||559eb5e1ca4787b9985d1c57 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.013-0400 m30999| 2015-07-09T13:56:50.010-0400 W SHARDING [conn129] splitChunk failed - cmd: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 22.0 }, { j: 24.0 }, { j: 28.0 }, { j: 32.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.014-0400 m31200| 2015-07-09T13:56:50.011-0400 W SHARDING [conn63] could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db20.coll20 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.014-0400 m30999| 2015-07-09T13:56:50.011-0400 W SHARDING [conn127] splitChunk failed - cmd: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 22.0 }, { j: 24.0 }, { j: 28.0 }, { j: 30.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.015-0400 m31200| 2015-07-09T13:56:50.011-0400 I SHARDING [conn64] metadata of collection db20.coll20 already up to date (shard version : 1|3||559eb5e1ca4787b9985d1c57, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.015-0400 m31200| 2015-07-09T13:56:50.012-0400 W SHARDING [conn64] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.015-0400 m31200| 2015-07-09T13:56:50.012-0400 I SHARDING [conn64] distributed lock 'db20.coll20/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.015-0400 m30999| 2015-07-09T13:56:50.013-0400 W SHARDING [conn128] splitChunk failed - cmd: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 22.0 }, { j: 24.0 }, { j: 28.0 }, { j: 32.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.026-0400 m31200| 2015-07-09T13:56:50.025-0400 I SHARDING [conn64] request split points lookup for chunk db20.coll20 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.027-0400 m31200| 2015-07-09T13:56:50.027-0400 I SHARDING [conn64] received splitChunk request: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 22.0 }, { j: 24.0 }, { j: 28.0 }, { j: 30.0 }, { j: 34.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.030-0400 m31200| 2015-07-09T13:56:50.029-0400 I SHARDING [conn64] distributed lock 'db20.coll20/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb5e2d5a107a5b9c0daf9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.030-0400 m31200| 2015-07-09T13:56:50.029-0400 I SHARDING [conn64] remotely refreshing metadata for db20.coll20 based on current shard version 1|3||559eb5e1ca4787b9985d1c57, current metadata version is 1|3||559eb5e1ca4787b9985d1c57 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.030-0400 m31200| 2015-07-09T13:56:50.029-0400 I SHARDING [conn63] request split points lookup for chunk db20.coll20 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.033-0400 m31200| 2015-07-09T13:56:50.030-0400 I SHARDING [conn63] received splitChunk request: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 22.0 }, { j: 24.0 }, { j: 28.0 }, { j: 30.0 }, { j: 34.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.033-0400 m31200| 2015-07-09T13:56:50.032-0400 I SHARDING [conn64] metadata of collection db20.coll20 already up to date (shard version : 1|3||559eb5e1ca4787b9985d1c57, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.034-0400 m31200| 2015-07-09T13:56:50.032-0400 I SHARDING [conn84] request split points lookup for chunk db20.coll20 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.035-0400 m31200| 2015-07-09T13:56:50.032-0400 W SHARDING [conn64] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.036-0400 m31200| 2015-07-09T13:56:50.033-0400 W SHARDING [conn63] could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db20.coll20 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.037-0400 m30999| 2015-07-09T13:56:50.033-0400 W SHARDING [conn131] splitChunk failed - cmd: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 22.0 }, { j: 24.0 }, { j: 28.0 }, { j: 30.0 }, { j: 34.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.037-0400 m31200| 2015-07-09T13:56:50.033-0400 I SHARDING [conn64] distributed lock 'db20.coll20/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.038-0400 m30999| 2015-07-09T13:56:50.034-0400 W SHARDING [conn127] splitChunk failed - cmd: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 22.0 }, { j: 24.0 }, { j: 28.0 }, { j: 30.0 }, { j: 34.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.038-0400 m31200| 2015-07-09T13:56:50.034-0400 I SHARDING [conn63] received splitChunk request: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 22.0 }, { j: 24.0 }, { j: 26.0 }, { j: 30.0 }, { j: 32.0 }, { j: 38.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.039-0400 m31200| 2015-07-09T13:56:50.036-0400 I SHARDING [conn63] distributed lock 'db20.coll20/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb5e2d5a107a5b9c0dafa [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.039-0400 m31200| 2015-07-09T13:56:50.036-0400 I SHARDING [conn63] remotely refreshing metadata for db20.coll20 based on current shard version 1|3||559eb5e1ca4787b9985d1c57, current metadata version is 1|3||559eb5e1ca4787b9985d1c57 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.039-0400 m31200| 2015-07-09T13:56:50.038-0400 I SHARDING [conn63] metadata of collection db20.coll20 already up to date (shard version : 1|3||559eb5e1ca4787b9985d1c57, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.039-0400 m31200| 2015-07-09T13:56:50.038-0400 W SHARDING [conn63] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.039-0400 m31200| 2015-07-09T13:56:50.038-0400 I SHARDING [conn63] distributed lock 'db20.coll20/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.040-0400 m30999| 2015-07-09T13:56:50.040-0400 W SHARDING [conn130] splitChunk failed - cmd: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 22.0 }, { j: 24.0 }, { j: 26.0 }, { j: 30.0 }, { j: 32.0 }, { j: 38.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.068-0400 m31200| 2015-07-09T13:56:50.067-0400 I SHARDING [conn63] request split points lookup for chunk db20.coll20 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.069-0400 m31200| 2015-07-09T13:56:50.069-0400 I SHARDING [conn63] received splitChunk request: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 24.0 }, { j: 26.0 }, { j: 30.0 }, { j: 32.0 }, { j: 36.0 }, { j: 38.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.070-0400 m31200| 2015-07-09T13:56:50.069-0400 I SHARDING [conn64] request split points lookup for chunk db20.coll20 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.070-0400 m31200| 2015-07-09T13:56:50.070-0400 I SHARDING [conn64] request split points lookup for chunk db20.coll20 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.071-0400 m31200| 2015-07-09T13:56:50.070-0400 I SHARDING [conn84] received splitChunk request: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 24.0 }, { j: 26.0 }, { j: 28.0 }, { j: 32.0 }, { j: 34.0 }, { j: 38.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.071-0400 m31200| 2015-07-09T13:56:50.071-0400 I SHARDING [conn63] distributed lock 'db20.coll20/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb5e2d5a107a5b9c0dafb [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.073-0400 m31200| 2015-07-09T13:56:50.071-0400 I SHARDING [conn63] remotely refreshing metadata for db20.coll20 based on current shard version 1|3||559eb5e1ca4787b9985d1c57, current metadata version is 1|3||559eb5e1ca4787b9985d1c57 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.073-0400 m31200| 2015-07-09T13:56:50.071-0400 I SHARDING [conn64] received splitChunk request: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 24.0 }, { j: 26.0 }, { j: 28.0 }, { j: 32.0 }, { j: 34.0 }, { j: 38.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.073-0400 m31200| 2015-07-09T13:56:50.072-0400 W SHARDING [conn84] could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db20.coll20 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.074-0400 m31200| 2015-07-09T13:56:50.072-0400 W SHARDING [conn64] could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db20.coll20 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.074-0400 m30999| 2015-07-09T13:56:50.072-0400 W SHARDING [conn130] splitChunk failed - cmd: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 24.0 }, { j: 26.0 }, { j: 28.0 }, { j: 32.0 }, { j: 34.0 }, { j: 38.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.075-0400 m30999| 2015-07-09T13:56:50.073-0400 W SHARDING [conn131] splitChunk failed - cmd: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 24.0 }, { j: 26.0 }, { j: 28.0 }, { j: 32.0 }, { j: 34.0 }, { j: 38.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.077-0400 m31200| 2015-07-09T13:56:50.076-0400 I SHARDING [conn63] metadata of collection db20.coll20 already up to date (shard version : 1|3||559eb5e1ca4787b9985d1c57, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.077-0400 m31200| 2015-07-09T13:56:50.077-0400 W SHARDING [conn63] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.078-0400 m31200| 2015-07-09T13:56:50.077-0400 I SHARDING [conn63] distributed lock 'db20.coll20/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.086-0400 m30999| 2015-07-09T13:56:50.077-0400 W SHARDING [conn129] splitChunk failed - cmd: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 24.0 }, { j: 26.0 }, { j: 30.0 }, { j: 32.0 }, { j: 36.0 }, { j: 38.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.088-0400 m30998| 2015-07-09T13:56:50.088-0400 I NETWORK [conn129] end connection 127.0.0.1:63016 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.096-0400 m31200| 2015-07-09T13:56:50.096-0400 I SHARDING [conn63] request split points lookup for chunk db20.coll20 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.096-0400 m31200| 2015-07-09T13:56:50.096-0400 I SHARDING [conn64] request split points lookup for chunk db20.coll20 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.098-0400 m31200| 2015-07-09T13:56:50.097-0400 I SHARDING [conn64] received splitChunk request: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 24.0 }, { j: 26.0 }, { j: 28.0 }, { j: 32.0 }, { j: 34.0 }, { j: 36.0 }, { j: 40.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.099-0400 m31200| 2015-07-09T13:56:50.097-0400 I SHARDING [conn63] received splitChunk request: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 24.0 }, { j: 26.0 }, { j: 28.0 }, { j: 32.0 }, { j: 34.0 }, { j: 36.0 }, { j: 40.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.099-0400 m31200| 2015-07-09T13:56:50.098-0400 I SHARDING [conn63] could not acquire lock 'db20.coll20/bs-osx108-8:31200:1436464537:809424560' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.100-0400 m31200| 2015-07-09T13:56:50.098-0400 I SHARDING [conn63] distributed lock 'db20.coll20/bs-osx108-8:31200:1436464537:809424560' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.100-0400 m31200| 2015-07-09T13:56:50.098-0400 W SHARDING [conn63] could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db20.coll20 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.101-0400 m30999| 2015-07-09T13:56:50.098-0400 W SHARDING [conn128] splitChunk failed - cmd: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 24.0 }, { j: 26.0 }, { j: 28.0 }, { j: 32.0 }, { j: 34.0 }, { j: 36.0 }, { j: 40.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.101-0400 m31200| 2015-07-09T13:56:50.099-0400 I SHARDING [conn64] distributed lock 'db20.coll20/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb5e2d5a107a5b9c0dafc [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.101-0400 m31200| 2015-07-09T13:56:50.099-0400 I SHARDING [conn64] remotely refreshing metadata for db20.coll20 based on current shard version 1|3||559eb5e1ca4787b9985d1c57, current metadata version is 1|3||559eb5e1ca4787b9985d1c57 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.101-0400 m31200| 2015-07-09T13:56:50.100-0400 I SHARDING [conn64] metadata of collection db20.coll20 already up to date (shard version : 1|3||559eb5e1ca4787b9985d1c57, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.101-0400 m31200| 2015-07-09T13:56:50.100-0400 W SHARDING [conn64] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.102-0400 m31200| 2015-07-09T13:56:50.100-0400 I SHARDING [conn64] distributed lock 'db20.coll20/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.104-0400 m30999| 2015-07-09T13:56:50.103-0400 W SHARDING [conn127] splitChunk failed - cmd: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 24.0 }, { j: 26.0 }, { j: 28.0 }, { j: 32.0 }, { j: 34.0 }, { j: 36.0 }, { j: 40.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.112-0400 m30998| 2015-07-09T13:56:50.112-0400 I NETWORK [conn128] end connection 127.0.0.1:63014 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.126-0400 m31200| 2015-07-09T13:56:50.120-0400 I SHARDING [conn64] request split points lookup for chunk db20.coll20 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.133-0400 m30998| 2015-07-09T13:56:50.121-0400 I NETWORK [conn131] end connection 127.0.0.1:63018 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.134-0400 m31200| 2015-07-09T13:56:50.122-0400 I SHARDING [conn64] received splitChunk request: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 30.0 }, { j: 34.0 }, { j: 36.0 }, { j: 38.0 }, { j: 42.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.134-0400 m31200| 2015-07-09T13:56:50.123-0400 I SHARDING [conn63] request split points lookup for chunk db20.coll20 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.134-0400 m31200| 2015-07-09T13:56:50.125-0400 I SHARDING [conn63] received splitChunk request: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 30.0 }, { j: 34.0 }, { j: 36.0 }, { j: 38.0 }, { j: 42.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.135-0400 m31200| 2015-07-09T13:56:50.125-0400 I SHARDING [conn64] distributed lock 'db20.coll20/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb5e2d5a107a5b9c0dafe [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.135-0400 m31200| 2015-07-09T13:56:50.125-0400 I SHARDING [conn64] remotely refreshing metadata for db20.coll20 based on current shard version 1|3||559eb5e1ca4787b9985d1c57, current metadata version is 1|3||559eb5e1ca4787b9985d1c57 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.136-0400 m31200| 2015-07-09T13:56:50.126-0400 W SHARDING [conn63] could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db20.coll20 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.136-0400 m31200| 2015-07-09T13:56:50.126-0400 I SHARDING [conn64] metadata of collection db20.coll20 already up to date (shard version : 1|3||559eb5e1ca4787b9985d1c57, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.136-0400 m31200| 2015-07-09T13:56:50.126-0400 W SHARDING [conn64] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.137-0400 m30999| 2015-07-09T13:56:50.127-0400 W SHARDING [conn127] splitChunk failed - cmd: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 30.0 }, { j: 34.0 }, { j: 36.0 }, { j: 38.0 }, { j: 42.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db20.coll20 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.137-0400 m31200| 2015-07-09T13:56:50.127-0400 I SHARDING [conn84] request split points lookup for chunk db20.coll20 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.137-0400 m31200| 2015-07-09T13:56:50.127-0400 I SHARDING [conn64] distributed lock 'db20.coll20/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.138-0400 m30999| 2015-07-09T13:56:50.128-0400 W SHARDING [conn130] splitChunk failed - cmd: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 30.0 }, { j: 34.0 }, { j: 36.0 }, { j: 38.0 }, { j: 42.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.139-0400 m31200| 2015-07-09T13:56:50.128-0400 I SHARDING [conn84] received splitChunk request: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 30.0 }, { j: 34.0 }, { j: 36.0 }, { j: 38.0 }, { j: 42.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.139-0400 m30999| 2015-07-09T13:56:50.132-0400 I NETWORK [conn131] end connection 127.0.0.1:63019 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.139-0400 m31200| 2015-07-09T13:56:50.133-0400 I SHARDING [conn84] distributed lock 'db20.coll20/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb5e2d5a107a5b9c0daff [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.139-0400 m31200| 2015-07-09T13:56:50.133-0400 I SHARDING [conn84] remotely refreshing metadata for db20.coll20 based on current shard version 1|3||559eb5e1ca4787b9985d1c57, current metadata version is 1|3||559eb5e1ca4787b9985d1c57 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.140-0400 m31200| 2015-07-09T13:56:50.135-0400 I SHARDING [conn84] metadata of collection db20.coll20 already up to date (shard version : 1|3||559eb5e1ca4787b9985d1c57, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.140-0400 m31200| 2015-07-09T13:56:50.135-0400 W SHARDING [conn84] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.140-0400 m31200| 2015-07-09T13:56:50.135-0400 I SHARDING [conn84] distributed lock 'db20.coll20/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.141-0400 m30999| 2015-07-09T13:56:50.136-0400 W SHARDING [conn129] splitChunk failed - cmd: { splitChunk: "db20.coll20", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs1", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 30.0 }, { j: 34.0 }, { j: 36.0 }, { j: 38.0 }, { j: 42.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e1ca4787b9985d1c57') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.142-0400 m30999| 2015-07-09T13:56:50.142-0400 I NETWORK [conn128] end connection 127.0.0.1:63012 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.162-0400 m30998| 2015-07-09T13:56:50.161-0400 I NETWORK [conn130] end connection 127.0.0.1:63017 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.173-0400 m30999| 2015-07-09T13:56:50.173-0400 I NETWORK [conn127] end connection 127.0.0.1:63010 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.173-0400 m30999| 2015-07-09T13:56:50.173-0400 I NETWORK [conn130] end connection 127.0.0.1:63015 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.177-0400 m30999| 2015-07-09T13:56:50.177-0400 I NETWORK [conn129] end connection 127.0.0.1:63013 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.208-0400 m30998| 2015-07-09T13:56:50.207-0400 I NETWORK [conn127] end connection 127.0.0.1:63011 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.208-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.208-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.208-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.208-0400 jstests/concurrency/fsm_workloads/explain_aggregate.js: Workload completed in 604 ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.209-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.209-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.209-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.209-0400 m30999| 2015-07-09T13:56:50.208-0400 I COMMAND [conn1] DROP: db20.coll20 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.209-0400 m30999| 2015-07-09T13:56:50.208-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:50.208-0400-559eb5e2ca4787b9985d1c59", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464610208), what: "dropCollection.start", ns: "db20.coll20", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.265-0400 m30999| 2015-07-09T13:56:50.265-0400 I SHARDING [conn1] distributed lock 'db20.coll20/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5e2ca4787b9985d1c5a [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.266-0400 m31100| 2015-07-09T13:56:50.266-0400 I COMMAND [conn38] CMD: drop db20.coll20 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.267-0400 m31200| 2015-07-09T13:56:50.267-0400 I COMMAND [conn84] CMD: drop db20.coll20 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.271-0400 m31201| 2015-07-09T13:56:50.271-0400 I COMMAND [repl writer worker 2] CMD: drop db20.coll20 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.271-0400 m31202| 2015-07-09T13:56:50.271-0400 I COMMAND [repl writer worker 6] CMD: drop db20.coll20 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.323-0400 m31200| 2015-07-09T13:56:50.323-0400 I SHARDING [conn84] remotely refreshing metadata for db20.coll20 with requested shard version 0|0||000000000000000000000000, current shard version is 1|3||559eb5e1ca4787b9985d1c57, current metadata version is 1|3||559eb5e1ca4787b9985d1c57 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.325-0400 m31200| 2015-07-09T13:56:50.325-0400 W SHARDING [conn84] no chunks found when reloading db20.coll20, previous version was 0|0||559eb5e1ca4787b9985d1c57, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.325-0400 m31200| 2015-07-09T13:56:50.325-0400 I SHARDING [conn84] dropping metadata for db20.coll20 at shard version 1|3||559eb5e1ca4787b9985d1c57, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.326-0400 m30999| 2015-07-09T13:56:50.325-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:50.325-0400-559eb5e2ca4787b9985d1c5b", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464610325), what: "dropCollection", ns: "db20.coll20", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.379-0400 m30999| 2015-07-09T13:56:50.379-0400 I SHARDING [conn1] distributed lock 'db20.coll20/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.435-0400 m30999| 2015-07-09T13:56:50.434-0400 I COMMAND [conn1] DROP DATABASE: db20 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.435-0400 m30999| 2015-07-09T13:56:50.434-0400 I SHARDING [conn1] DBConfig::dropDatabase: db20 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.435-0400 m30999| 2015-07-09T13:56:50.435-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:50.435-0400-559eb5e2ca4787b9985d1c5c", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464610435), what: "dropDatabase.start", ns: "db20", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.540-0400 m30999| 2015-07-09T13:56:50.540-0400 I SHARDING [conn1] DBConfig::dropDatabase: db20 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.541-0400 m31200| 2015-07-09T13:56:50.541-0400 I COMMAND [conn66] dropDatabase db20 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.541-0400 m31200| 2015-07-09T13:56:50.541-0400 I COMMAND [conn66] dropDatabase db20 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.543-0400 m30999| 2015-07-09T13:56:50.542-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:50.542-0400-559eb5e2ca4787b9985d1c5d", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464610542), what: "dropDatabase", ns: "db20", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.543-0400 m31201| 2015-07-09T13:56:50.543-0400 I COMMAND [repl writer worker 9] dropDatabase db20 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.543-0400 m31201| 2015-07-09T13:56:50.543-0400 I COMMAND [repl writer worker 9] dropDatabase db20 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.543-0400 m31202| 2015-07-09T13:56:50.543-0400 I COMMAND [repl writer worker 15] dropDatabase db20 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.543-0400 m31202| 2015-07-09T13:56:50.543-0400 I COMMAND [repl writer worker 15] dropDatabase db20 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.629-0400 m31100| 2015-07-09T13:56:50.628-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.632-0400 m31101| 2015-07-09T13:56:50.632-0400 I COMMAND [repl writer worker 14] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.633-0400 m31102| 2015-07-09T13:56:50.632-0400 I COMMAND [repl writer worker 0] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.661-0400 m31200| 2015-07-09T13:56:50.661-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.662-0400 m31202| 2015-07-09T13:56:50.662-0400 I COMMAND [repl writer worker 2] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.662-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.663-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.663-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.663-0400 jstests/concurrency/fsm_workloads/indexed_insert_text.js [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.663-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.663-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.663-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.664-0400 m31201| 2015-07-09T13:56:50.664-0400 I COMMAND [repl writer worker 6] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.670-0400 m30999| 2015-07-09T13:56:50.669-0400 I SHARDING [conn1] distributed lock 'db21/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5e2ca4787b9985d1c5e [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.673-0400 m30999| 2015-07-09T13:56:50.673-0400 I SHARDING [conn1] Placing [db21] on: test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.673-0400 m30999| 2015-07-09T13:56:50.673-0400 I SHARDING [conn1] Enabling sharding for database [db21] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.728-0400 m30999| 2015-07-09T13:56:50.727-0400 I SHARDING [conn1] distributed lock 'db21/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.751-0400 m31200| 2015-07-09T13:56:50.751-0400 I INDEX [conn71] build index on: db21.coll21 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db21.coll21" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.751-0400 m31200| 2015-07-09T13:56:50.751-0400 I INDEX [conn71] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.763-0400 m31200| 2015-07-09T13:56:50.762-0400 I INDEX [conn71] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.764-0400 m30999| 2015-07-09T13:56:50.764-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db21.coll21", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.766-0400 m30999| 2015-07-09T13:56:50.766-0400 I SHARDING [conn1] distributed lock 'db21.coll21/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5e2ca4787b9985d1c5f [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.767-0400 m30999| 2015-07-09T13:56:50.767-0400 I SHARDING [conn1] enable sharding on: db21.coll21 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.768-0400 m30999| 2015-07-09T13:56:50.767-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:50.767-0400-559eb5e2ca4787b9985d1c60", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464610767), what: "shardCollection.start", ns: "db21.coll21", details: { shardKey: { _id: "hashed" }, collection: "db21.coll21", primary: "test-rs1:test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.769-0400 m31201| 2015-07-09T13:56:50.769-0400 I INDEX [repl writer worker 5] build index on: db21.coll21 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db21.coll21" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.769-0400 m31201| 2015-07-09T13:56:50.769-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.773-0400 m31202| 2015-07-09T13:56:50.773-0400 I INDEX [repl writer worker 3] build index on: db21.coll21 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db21.coll21" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.773-0400 m31202| 2015-07-09T13:56:50.773-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.777-0400 m31201| 2015-07-09T13:56:50.777-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.780-0400 m31202| 2015-07-09T13:56:50.780-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.821-0400 m30999| 2015-07-09T13:56:50.820-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db21.coll21 using new epoch 559eb5e2ca4787b9985d1c61 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.928-0400 m30999| 2015-07-09T13:56:50.927-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db21.coll21: 0ms sequenceNumber: 95 version: 1|1||559eb5e2ca4787b9985d1c61 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.983-0400 m30999| 2015-07-09T13:56:50.982-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db21.coll21: 0ms sequenceNumber: 96 version: 1|1||559eb5e2ca4787b9985d1c61 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.985-0400 m31200| 2015-07-09T13:56:50.984-0400 I SHARDING [conn41] remotely refreshing metadata for db21.coll21 with requested shard version 1|1||559eb5e2ca4787b9985d1c61, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.987-0400 m31200| 2015-07-09T13:56:50.986-0400 I SHARDING [conn41] collection db21.coll21 was previously unsharded, new metadata loaded with shard version 1|1||559eb5e2ca4787b9985d1c61 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.987-0400 m31200| 2015-07-09T13:56:50.986-0400 I SHARDING [conn41] collection version was loaded at version 1|1||559eb5e2ca4787b9985d1c61, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:50.987-0400 m30999| 2015-07-09T13:56:50.987-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:50.987-0400-559eb5e2ca4787b9985d1c62", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464610987), what: "shardCollection", ns: "db21.coll21", details: { version: "1|1||559eb5e2ca4787b9985d1c61" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.041-0400 m30999| 2015-07-09T13:56:51.041-0400 I SHARDING [conn1] distributed lock 'db21.coll21/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.042-0400 m30999| 2015-07-09T13:56:51.042-0400 I SHARDING [conn1] moving chunk ns: db21.coll21 moving ( ns: db21.coll21, shard: test-rs1, lastmod: 1|0||000000000000000000000000, min: { _id: MinKey }, max: { _id: 0 }) test-rs1 -> test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.043-0400 m31200| 2015-07-09T13:56:51.042-0400 I SHARDING [conn84] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.044-0400 m31200| 2015-07-09T13:56:51.043-0400 I SHARDING [conn84] received moveChunk request: { moveChunk: "db21.coll21", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", to: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", fromShard: "test-rs1", toShard: "test-rs0", min: { _id: MinKey }, max: { _id: 0 }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb5e2ca4787b9985d1c61') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.048-0400 m31200| 2015-07-09T13:56:51.047-0400 I SHARDING [conn84] distributed lock 'db21.coll21/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb5e3d5a107a5b9c0db01 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.048-0400 m31200| 2015-07-09T13:56:51.047-0400 I SHARDING [conn84] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:51.047-0400-559eb5e3d5a107a5b9c0db02", server: "bs-osx108-8", clientAddr: "127.0.0.1:63007", time: new Date(1436464611047), what: "moveChunk.start", ns: "db21.coll21", details: { min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs1", to: "test-rs0" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.100-0400 m31200| 2015-07-09T13:56:51.100-0400 I SHARDING [conn84] remotely refreshing metadata for db21.coll21 based on current shard version 1|1||559eb5e2ca4787b9985d1c61, current metadata version is 1|1||559eb5e2ca4787b9985d1c61 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.102-0400 m31200| 2015-07-09T13:56:51.101-0400 I SHARDING [conn84] metadata of collection db21.coll21 already up to date (shard version : 1|1||559eb5e2ca4787b9985d1c61, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.102-0400 m31200| 2015-07-09T13:56:51.101-0400 I SHARDING [conn84] moveChunk request accepted at version 1|1||559eb5e2ca4787b9985d1c61 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.102-0400 m31200| 2015-07-09T13:56:51.102-0400 I SHARDING [conn84] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.103-0400 m31100| 2015-07-09T13:56:51.102-0400 I SHARDING [conn19] remotely refreshing metadata for db21.coll21, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.104-0400 m31100| 2015-07-09T13:56:51.104-0400 I SHARDING [conn19] collection db21.coll21 was previously unsharded, new metadata loaded with shard version 0|0||559eb5e2ca4787b9985d1c61 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.104-0400 m31100| 2015-07-09T13:56:51.104-0400 I SHARDING [conn19] collection version was loaded at version 1|1||559eb5e2ca4787b9985d1c61, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.105-0400 m31100| 2015-07-09T13:56:51.104-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: MinKey } -> { _id: 0 } for collection db21.coll21 from test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202 at epoch 559eb5e2ca4787b9985d1c61 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.107-0400 m31200| 2015-07-09T13:56:51.106-0400 I SHARDING [conn84] moveChunk data transfer progress: { active: true, ns: "db21.coll21", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.110-0400 m31200| 2015-07-09T13:56:51.109-0400 I SHARDING [conn84] moveChunk data transfer progress: { active: true, ns: "db21.coll21", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.115-0400 m31200| 2015-07-09T13:56:51.114-0400 I SHARDING [conn84] moveChunk data transfer progress: { active: true, ns: "db21.coll21", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.119-0400 m31100| 2015-07-09T13:56:51.119-0400 I INDEX [migrateThread] build index on: db21.coll21 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db21.coll21" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.119-0400 m31100| 2015-07-09T13:56:51.119-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.124-0400 m31200| 2015-07-09T13:56:51.123-0400 I SHARDING [conn84] moveChunk data transfer progress: { active: true, ns: "db21.coll21", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.130-0400 m31100| 2015-07-09T13:56:51.130-0400 I INDEX [migrateThread] build index on: db21.coll21 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db21.coll21" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.130-0400 m31100| 2015-07-09T13:56:51.130-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.140-0400 m31100| 2015-07-09T13:56:51.140-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.141-0400 m31200| 2015-07-09T13:56:51.141-0400 I SHARDING [conn84] moveChunk data transfer progress: { active: true, ns: "db21.coll21", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.141-0400 m31100| 2015-07-09T13:56:51.141-0400 I SHARDING [migrateThread] Deleter starting delete for: db21.coll21 from { _id: MinKey } -> { _id: 0 }, with opId: 26117 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.141-0400 m31100| 2015-07-09T13:56:51.141-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db21.coll21 from { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.149-0400 m31101| 2015-07-09T13:56:51.148-0400 I INDEX [repl writer worker 8] build index on: db21.coll21 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db21.coll21" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.149-0400 m31102| 2015-07-09T13:56:51.148-0400 I INDEX [repl writer worker 1] build index on: db21.coll21 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db21.coll21" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.149-0400 m31101| 2015-07-09T13:56:51.148-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.149-0400 m31102| 2015-07-09T13:56:51.148-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.157-0400 m31102| 2015-07-09T13:56:51.157-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.157-0400 m31101| 2015-07-09T13:56:51.157-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.159-0400 m31100| 2015-07-09T13:56:51.158-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.159-0400 m31100| 2015-07-09T13:56:51.158-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db21.coll21' { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.175-0400 m31200| 2015-07-09T13:56:51.174-0400 I SHARDING [conn84] moveChunk data transfer progress: { active: true, ns: "db21.coll21", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.175-0400 m31200| 2015-07-09T13:56:51.174-0400 I SHARDING [conn84] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.175-0400 m31200| 2015-07-09T13:56:51.175-0400 I SHARDING [conn84] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.176-0400 m31200| 2015-07-09T13:56:51.175-0400 I SHARDING [conn84] moveChunk setting version to: 2|0||559eb5e2ca4787b9985d1c61 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.182-0400 m31100| 2015-07-09T13:56:51.182-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db21.coll21' { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.182-0400 m31100| 2015-07-09T13:56:51.182-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:51.182-0400-559eb5e3792e00bb67274930", server: "bs-osx108-8", clientAddr: "", time: new Date(1436464611182), what: "moveChunk.to", ns: "db21.coll21", details: { min: { _id: MinKey }, max: { _id: 0 }, step 1 of 5: 36, step 2 of 5: 16, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 23, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.235-0400 m31200| 2015-07-09T13:56:51.235-0400 I SHARDING [conn84] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db21.coll21", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.236-0400 m31200| 2015-07-09T13:56:51.235-0400 I SHARDING [conn84] moveChunk updating self version to: 2|1||559eb5e2ca4787b9985d1c61 through { _id: 0 } -> { _id: MaxKey } for collection 'db21.coll21' [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.237-0400 m31200| 2015-07-09T13:56:51.236-0400 I SHARDING [conn84] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:51.236-0400-559eb5e3d5a107a5b9c0db03", server: "bs-osx108-8", clientAddr: "127.0.0.1:63007", time: new Date(1436464611236), what: "moveChunk.commit", ns: "db21.coll21", details: { min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs1", to: "test-rs0", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.289-0400 m31200| 2015-07-09T13:56:51.289-0400 I SHARDING [conn84] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.290-0400 m31200| 2015-07-09T13:56:51.289-0400 I SHARDING [conn84] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.290-0400 m31200| 2015-07-09T13:56:51.289-0400 I SHARDING [conn84] Deleter starting delete for: db21.coll21 from { _id: MinKey } -> { _id: 0 }, with opId: 30143 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.290-0400 m31200| 2015-07-09T13:56:51.289-0400 I SHARDING [conn84] rangeDeleter deleted 0 documents for db21.coll21 from { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.290-0400 m31200| 2015-07-09T13:56:51.289-0400 I SHARDING [conn84] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.290-0400 m31200| 2015-07-09T13:56:51.290-0400 I SHARDING [conn84] distributed lock 'db21.coll21/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.291-0400 m31200| 2015-07-09T13:56:51.290-0400 I SHARDING [conn84] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:51.290-0400-559eb5e3d5a107a5b9c0db04", server: "bs-osx108-8", clientAddr: "127.0.0.1:63007", time: new Date(1436464611290), what: "moveChunk.from", ns: "db21.coll21", details: { min: { _id: MinKey }, max: { _id: 0 }, step 1 of 6: 0, step 2 of 6: 57, step 3 of 6: 3, step 4 of 6: 69, step 5 of 6: 114, step 6 of 6: 0, to: "test-rs0", from: "test-rs1", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.344-0400 m31200| 2015-07-09T13:56:51.343-0400 I COMMAND [conn84] command db21.coll21 command: moveChunk { moveChunk: "db21.coll21", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", to: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", fromShard: "test-rs1", toShard: "test-rs0", min: { _id: MinKey }, max: { _id: 0 }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb5e2ca4787b9985d1c61') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 300ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.346-0400 m30999| 2015-07-09T13:56:51.345-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db21.coll21: 0ms sequenceNumber: 97 version: 2|1||559eb5e2ca4787b9985d1c61 based on: 1|1||559eb5e2ca4787b9985d1c61 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.347-0400 m31100| 2015-07-09T13:56:51.346-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db21.coll21", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e2ca4787b9985d1c61') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.350-0400 m31100| 2015-07-09T13:56:51.349-0400 I SHARDING [conn38] distributed lock 'db21.coll21/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb5e3792e00bb67274931 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.350-0400 m31100| 2015-07-09T13:56:51.349-0400 I SHARDING [conn38] remotely refreshing metadata for db21.coll21 based on current shard version 0|0||559eb5e2ca4787b9985d1c61, current metadata version is 1|1||559eb5e2ca4787b9985d1c61 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.351-0400 m31100| 2015-07-09T13:56:51.351-0400 I SHARDING [conn38] updating metadata for db21.coll21 from shard version 0|0||559eb5e2ca4787b9985d1c61 to shard version 2|0||559eb5e2ca4787b9985d1c61 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.351-0400 m31100| 2015-07-09T13:56:51.351-0400 I SHARDING [conn38] collection version was loaded at version 2|1||559eb5e2ca4787b9985d1c61, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.351-0400 m31100| 2015-07-09T13:56:51.351-0400 I SHARDING [conn38] splitChunk accepted at version 2|0||559eb5e2ca4787b9985d1c61 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.352-0400 m31100| 2015-07-09T13:56:51.352-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:51.352-0400-559eb5e3792e00bb67274932", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436464611352), what: "split", ns: "db21.coll21", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559eb5e2ca4787b9985d1c61') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559eb5e2ca4787b9985d1c61') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.406-0400 m31100| 2015-07-09T13:56:51.406-0400 I SHARDING [conn38] distributed lock 'db21.coll21/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.408-0400 m30999| 2015-07-09T13:56:51.408-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db21.coll21: 0ms sequenceNumber: 98 version: 2|3||559eb5e2ca4787b9985d1c61 based on: 2|1||559eb5e2ca4787b9985d1c61 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.409-0400 m31200| 2015-07-09T13:56:51.408-0400 I SHARDING [conn84] received splitChunk request: { splitChunk: "db21.coll21", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e2ca4787b9985d1c61') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.412-0400 m31200| 2015-07-09T13:56:51.411-0400 I SHARDING [conn84] distributed lock 'db21.coll21/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb5e3d5a107a5b9c0db05 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.412-0400 m31200| 2015-07-09T13:56:51.411-0400 I SHARDING [conn84] remotely refreshing metadata for db21.coll21 based on current shard version 2|0||559eb5e2ca4787b9985d1c61, current metadata version is 2|0||559eb5e2ca4787b9985d1c61 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.413-0400 m31200| 2015-07-09T13:56:51.413-0400 I SHARDING [conn84] updating metadata for db21.coll21 from shard version 2|0||559eb5e2ca4787b9985d1c61 to shard version 2|1||559eb5e2ca4787b9985d1c61 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.413-0400 m31200| 2015-07-09T13:56:51.413-0400 I SHARDING [conn84] collection version was loaded at version 2|3||559eb5e2ca4787b9985d1c61, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.413-0400 m31200| 2015-07-09T13:56:51.413-0400 I SHARDING [conn84] splitChunk accepted at version 2|1||559eb5e2ca4787b9985d1c61 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.415-0400 m31200| 2015-07-09T13:56:51.414-0400 I SHARDING [conn84] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:51.414-0400-559eb5e3d5a107a5b9c0db06", server: "bs-osx108-8", clientAddr: "127.0.0.1:63007", time: new Date(1436464611414), what: "split", ns: "db21.coll21", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559eb5e2ca4787b9985d1c61') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559eb5e2ca4787b9985d1c61') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.468-0400 m31200| 2015-07-09T13:56:51.468-0400 I SHARDING [conn84] distributed lock 'db21.coll21/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.470-0400 m30999| 2015-07-09T13:56:51.470-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db21.coll21: 0ms sequenceNumber: 99 version: 2|5||559eb5e2ca4787b9985d1c61 based on: 2|3||559eb5e2ca4787b9985d1c61 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.480-0400 m31100| 2015-07-09T13:56:51.479-0400 I INDEX [conn56] build index on: db21.coll21 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "indexed_insert_text_text", ns: "db21.coll21", weights: { indexed_insert_text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.480-0400 m31100| 2015-07-09T13:56:51.479-0400 I INDEX [conn56] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.483-0400 m31200| 2015-07-09T13:56:51.483-0400 I INDEX [conn41] build index on: db21.coll21 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "indexed_insert_text_text", ns: "db21.coll21", weights: { indexed_insert_text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.483-0400 m31200| 2015-07-09T13:56:51.483-0400 I INDEX [conn41] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.486-0400 m31100| 2015-07-09T13:56:51.485-0400 I INDEX [conn56] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.488-0400 m31200| 2015-07-09T13:56:51.488-0400 I INDEX [conn41] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.489-0400 Using 20 threads (requested 20) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.519-0400 m31202| 2015-07-09T13:56:51.518-0400 I INDEX [repl writer worker 9] build index on: db21.coll21 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "indexed_insert_text_text", ns: "db21.coll21", weights: { indexed_insert_text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.519-0400 m31202| 2015-07-09T13:56:51.518-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.519-0400 m31102| 2015-07-09T13:56:51.518-0400 I INDEX [repl writer worker 13] build index on: db21.coll21 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "indexed_insert_text_text", ns: "db21.coll21", weights: { indexed_insert_text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.519-0400 m31102| 2015-07-09T13:56:51.518-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.522-0400 m31101| 2015-07-09T13:56:51.519-0400 I INDEX [repl writer worker 9] build index on: db21.coll21 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "indexed_insert_text_text", ns: "db21.coll21", weights: { indexed_insert_text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.523-0400 m31101| 2015-07-09T13:56:51.519-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.538-0400 m31201| 2015-07-09T13:56:51.536-0400 I INDEX [repl writer worker 10] build index on: db21.coll21 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "indexed_insert_text_text", ns: "db21.coll21", weights: { indexed_insert_text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.538-0400 m31201| 2015-07-09T13:56:51.536-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.595-0400 m31101| 2015-07-09T13:56:51.586-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.620-0400 m31202| 2015-07-09T13:56:51.617-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.620-0400 m31201| 2015-07-09T13:56:51.619-0400 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.652-0400 m30998| 2015-07-09T13:56:51.650-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63020 #132 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.664-0400 m30999| 2015-07-09T13:56:51.663-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63021 #132 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.675-0400 m30999| 2015-07-09T13:56:51.674-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63022 #133 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.675-0400 m31102| 2015-07-09T13:56:51.674-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.685-0400 m30999| 2015-07-09T13:56:51.685-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63023 #134 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.707-0400 m30999| 2015-07-09T13:56:51.704-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63024 #135 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.716-0400 m30998| 2015-07-09T13:56:51.716-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63025 #133 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.727-0400 m30998| 2015-07-09T13:56:51.726-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63026 #134 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.728-0400 m30998| 2015-07-09T13:56:51.726-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63027 #135 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.728-0400 m30998| 2015-07-09T13:56:51.728-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63028 #136 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.728-0400 m30999| 2015-07-09T13:56:51.728-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63029 #136 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.732-0400 m30999| 2015-07-09T13:56:51.730-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63030 #137 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.735-0400 m30998| 2015-07-09T13:56:51.734-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63031 #137 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.735-0400 m30999| 2015-07-09T13:56:51.735-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63033 #138 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.736-0400 m30999| 2015-07-09T13:56:51.736-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63035 #139 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.737-0400 m30998| 2015-07-09T13:56:51.737-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63032 #138 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.737-0400 m30998| 2015-07-09T13:56:51.737-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63034 #139 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.738-0400 m30999| 2015-07-09T13:56:51.738-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63036 #140 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.744-0400 m30998| 2015-07-09T13:56:51.744-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63037 #140 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.744-0400 m30999| 2015-07-09T13:56:51.744-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63038 #141 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.745-0400 m30998| 2015-07-09T13:56:51.744-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63039 #141 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.750-0400 setting random seed: 4334969529882 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.751-0400 setting random seed: 3191126463934 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.751-0400 setting random seed: 3262504325248 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.752-0400 setting random seed: 7256832295097 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.752-0400 setting random seed: 9554126225411 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.752-0400 setting random seed: 2067233421839 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.753-0400 setting random seed: 9464103709906 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.753-0400 setting random seed: 8614520942792 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.755-0400 setting random seed: 9422696302644 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.755-0400 setting random seed: 8871872080489 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.761-0400 m30998| 2015-07-09T13:56:51.760-0400 I SHARDING [conn132] ChunkManager: time to load chunks for db21.coll21: 0ms sequenceNumber: 25 version: 2|5||559eb5e2ca4787b9985d1c61 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.762-0400 setting random seed: 3992137038148 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.767-0400 setting random seed: 4253552928566 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.767-0400 setting random seed: 4418361089192 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.769-0400 setting random seed: 2910219980403 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.769-0400 setting random seed: 5757382931187 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.771-0400 setting random seed: 9394860630854 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.771-0400 setting random seed: 6199545268900 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.775-0400 setting random seed: 5580440494231 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.775-0400 setting random seed: 5847231410443 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:51.807-0400 setting random seed: 463430229574 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.147-0400 m30999| 2015-07-09T13:56:52.146-0400 I NETWORK [conn136] end connection 127.0.0.1:63029 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.155-0400 m30999| 2015-07-09T13:56:52.155-0400 I NETWORK [conn134] end connection 127.0.0.1:63023 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.167-0400 m30998| 2015-07-09T13:56:52.166-0400 I NETWORK [conn133] end connection 127.0.0.1:63025 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.168-0400 m30998| 2015-07-09T13:56:52.168-0400 I NETWORK [conn140] end connection 127.0.0.1:63037 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.188-0400 m30999| 2015-07-09T13:56:52.187-0400 I NETWORK [conn138] end connection 127.0.0.1:63033 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.189-0400 m30998| 2015-07-09T13:56:52.189-0400 I NETWORK [conn132] end connection 127.0.0.1:63020 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.203-0400 m30998| 2015-07-09T13:56:52.199-0400 I NETWORK [conn137] end connection 127.0.0.1:63031 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.203-0400 m30999| 2015-07-09T13:56:52.202-0400 I NETWORK [conn132] end connection 127.0.0.1:63021 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.213-0400 m30998| 2015-07-09T13:56:52.207-0400 I NETWORK [conn134] end connection 127.0.0.1:63026 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.214-0400 m30999| 2015-07-09T13:56:52.213-0400 I NETWORK [conn139] end connection 127.0.0.1:63035 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.239-0400 m30999| 2015-07-09T13:56:52.236-0400 I NETWORK [conn137] end connection 127.0.0.1:63030 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.240-0400 m30998| 2015-07-09T13:56:52.239-0400 I NETWORK [conn139] end connection 127.0.0.1:63034 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.249-0400 m30999| 2015-07-09T13:56:52.249-0400 I NETWORK [conn133] end connection 127.0.0.1:63022 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.263-0400 m30998| 2015-07-09T13:56:52.258-0400 I NETWORK [conn138] end connection 127.0.0.1:63032 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.267-0400 m30998| 2015-07-09T13:56:52.266-0400 I NETWORK [conn141] end connection 127.0.0.1:63039 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.275-0400 m30999| 2015-07-09T13:56:52.274-0400 I NETWORK [conn140] end connection 127.0.0.1:63036 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.299-0400 m30999| 2015-07-09T13:56:52.298-0400 I NETWORK [conn141] end connection 127.0.0.1:63038 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.305-0400 m30998| 2015-07-09T13:56:52.302-0400 I NETWORK [conn135] end connection 127.0.0.1:63027 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.323-0400 m30998| 2015-07-09T13:56:52.320-0400 I NETWORK [conn136] end connection 127.0.0.1:63028 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.345-0400 m30999| 2015-07-09T13:56:52.344-0400 I NETWORK [conn135] end connection 127.0.0.1:63024 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.366-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.366-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.366-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.367-0400 jstests/concurrency/fsm_workloads/indexed_insert_text.js: Workload completed in 877 ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.367-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.367-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.367-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.367-0400 m30999| 2015-07-09T13:56:52.366-0400 I COMMAND [conn1] DROP: db21.coll21 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.367-0400 m30999| 2015-07-09T13:56:52.367-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:52.366-0400-559eb5e4ca4787b9985d1c63", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464612366), what: "dropCollection.start", ns: "db21.coll21", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.424-0400 m30999| 2015-07-09T13:56:52.423-0400 I SHARDING [conn1] distributed lock 'db21.coll21/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5e4ca4787b9985d1c64 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.424-0400 m31100| 2015-07-09T13:56:52.424-0400 I COMMAND [conn38] CMD: drop db21.coll21 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.427-0400 m31200| 2015-07-09T13:56:52.427-0400 I COMMAND [conn64] CMD: drop db21.coll21 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.428-0400 m31101| 2015-07-09T13:56:52.428-0400 I COMMAND [repl writer worker 12] CMD: drop db21.coll21 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.429-0400 m31102| 2015-07-09T13:56:52.428-0400 I COMMAND [repl writer worker 2] CMD: drop db21.coll21 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.431-0400 m31201| 2015-07-09T13:56:52.431-0400 I COMMAND [repl writer worker 6] CMD: drop db21.coll21 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.431-0400 m31202| 2015-07-09T13:56:52.431-0400 I COMMAND [repl writer worker 15] CMD: drop db21.coll21 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.483-0400 m31100| 2015-07-09T13:56:52.483-0400 I SHARDING [conn38] remotely refreshing metadata for db21.coll21 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559eb5e2ca4787b9985d1c61, current metadata version is 2|3||559eb5e2ca4787b9985d1c61 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.485-0400 m31100| 2015-07-09T13:56:52.484-0400 W SHARDING [conn38] no chunks found when reloading db21.coll21, previous version was 0|0||559eb5e2ca4787b9985d1c61, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.485-0400 m31100| 2015-07-09T13:56:52.485-0400 I SHARDING [conn38] dropping metadata for db21.coll21 at shard version 2|3||559eb5e2ca4787b9985d1c61, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.486-0400 m31200| 2015-07-09T13:56:52.486-0400 I SHARDING [conn64] remotely refreshing metadata for db21.coll21 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559eb5e2ca4787b9985d1c61, current metadata version is 2|5||559eb5e2ca4787b9985d1c61 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.488-0400 m31200| 2015-07-09T13:56:52.487-0400 W SHARDING [conn64] no chunks found when reloading db21.coll21, previous version was 0|0||559eb5e2ca4787b9985d1c61, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.488-0400 m31200| 2015-07-09T13:56:52.487-0400 I SHARDING [conn64] dropping metadata for db21.coll21 at shard version 2|5||559eb5e2ca4787b9985d1c61, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.489-0400 m30999| 2015-07-09T13:56:52.488-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:52.488-0400-559eb5e4ca4787b9985d1c65", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464612488), what: "dropCollection", ns: "db21.coll21", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.543-0400 m30999| 2015-07-09T13:56:52.542-0400 I SHARDING [conn1] distributed lock 'db21.coll21/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.599-0400 m30999| 2015-07-09T13:56:52.598-0400 I COMMAND [conn1] DROP DATABASE: db21 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.599-0400 m30999| 2015-07-09T13:56:52.598-0400 I SHARDING [conn1] DBConfig::dropDatabase: db21 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.599-0400 m30999| 2015-07-09T13:56:52.598-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:52.598-0400-559eb5e4ca4787b9985d1c66", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464612598), what: "dropDatabase.start", ns: "db21", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.704-0400 m30999| 2015-07-09T13:56:52.704-0400 I SHARDING [conn1] DBConfig::dropDatabase: db21 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.704-0400 m31200| 2015-07-09T13:56:52.704-0400 I COMMAND [conn66] dropDatabase db21 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.705-0400 m31200| 2015-07-09T13:56:52.704-0400 I COMMAND [conn66] dropDatabase db21 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.705-0400 m30999| 2015-07-09T13:56:52.705-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:52.705-0400-559eb5e4ca4787b9985d1c67", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464612705), what: "dropDatabase", ns: "db21", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.705-0400 m31201| 2015-07-09T13:56:52.705-0400 I COMMAND [repl writer worker 14] dropDatabase db21 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.705-0400 m31201| 2015-07-09T13:56:52.705-0400 I COMMAND [repl writer worker 14] dropDatabase db21 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.706-0400 m31202| 2015-07-09T13:56:52.705-0400 I COMMAND [repl writer worker 14] dropDatabase db21 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.706-0400 m31202| 2015-07-09T13:56:52.705-0400 I COMMAND [repl writer worker 14] dropDatabase db21 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.794-0400 m31100| 2015-07-09T13:56:52.793-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.797-0400 m31101| 2015-07-09T13:56:52.797-0400 I COMMAND [repl writer worker 2] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.797-0400 m31102| 2015-07-09T13:56:52.797-0400 I COMMAND [repl writer worker 1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.834-0400 m31200| 2015-07-09T13:56:52.834-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.837-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.837-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.837-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.837-0400 jstests/concurrency/fsm_workloads/update_multifield_isolated_multiupdate.js [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.837-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.837-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.837-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.838-0400 m31201| 2015-07-09T13:56:52.838-0400 I COMMAND [repl writer worker 12] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.838-0400 m31202| 2015-07-09T13:56:52.838-0400 I COMMAND [repl writer worker 11] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.843-0400 m30999| 2015-07-09T13:56:52.842-0400 I SHARDING [conn1] distributed lock 'db22/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5e4ca4787b9985d1c68 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.846-0400 m30999| 2015-07-09T13:56:52.846-0400 I SHARDING [conn1] Placing [db22] on: test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.846-0400 m30999| 2015-07-09T13:56:52.846-0400 I SHARDING [conn1] Enabling sharding for database [db22] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.900-0400 m30999| 2015-07-09T13:56:52.899-0400 I SHARDING [conn1] distributed lock 'db22/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.921-0400 m31200| 2015-07-09T13:56:52.921-0400 I INDEX [conn71] build index on: db22.coll22 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db22.coll22" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.922-0400 m31200| 2015-07-09T13:56:52.921-0400 I INDEX [conn71] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.929-0400 m31200| 2015-07-09T13:56:52.929-0400 I INDEX [conn71] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.930-0400 m30999| 2015-07-09T13:56:52.930-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db22.coll22", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.934-0400 m30999| 2015-07-09T13:56:52.933-0400 I SHARDING [conn1] distributed lock 'db22.coll22/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5e4ca4787b9985d1c69 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.935-0400 m30999| 2015-07-09T13:56:52.934-0400 I SHARDING [conn1] enable sharding on: db22.coll22 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.935-0400 m30999| 2015-07-09T13:56:52.934-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:52.934-0400-559eb5e4ca4787b9985d1c6a", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464612934), what: "shardCollection.start", ns: "db22.coll22", details: { shardKey: { _id: "hashed" }, collection: "db22.coll22", primary: "test-rs1:test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.938-0400 m31202| 2015-07-09T13:56:52.938-0400 I INDEX [repl writer worker 13] build index on: db22.coll22 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db22.coll22" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.939-0400 m31202| 2015-07-09T13:56:52.938-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.941-0400 m31201| 2015-07-09T13:56:52.941-0400 I INDEX [repl writer worker 2] build index on: db22.coll22 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db22.coll22" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.941-0400 m31201| 2015-07-09T13:56:52.941-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.943-0400 m31202| 2015-07-09T13:56:52.942-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.946-0400 m31201| 2015-07-09T13:56:52.946-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:52.988-0400 m30999| 2015-07-09T13:56:52.988-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db22.coll22 using new epoch 559eb5e4ca4787b9985d1c6b [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.094-0400 m30999| 2015-07-09T13:56:53.093-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db22.coll22: 0ms sequenceNumber: 100 version: 1|1||559eb5e4ca4787b9985d1c6b based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.149-0400 m30999| 2015-07-09T13:56:53.148-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db22.coll22: 0ms sequenceNumber: 101 version: 1|1||559eb5e4ca4787b9985d1c6b based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.151-0400 m31200| 2015-07-09T13:56:53.150-0400 I SHARDING [conn40] remotely refreshing metadata for db22.coll22 with requested shard version 1|1||559eb5e4ca4787b9985d1c6b, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.152-0400 m31200| 2015-07-09T13:56:53.152-0400 I SHARDING [conn40] collection db22.coll22 was previously unsharded, new metadata loaded with shard version 1|1||559eb5e4ca4787b9985d1c6b [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.152-0400 m31200| 2015-07-09T13:56:53.152-0400 I SHARDING [conn40] collection version was loaded at version 1|1||559eb5e4ca4787b9985d1c6b, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.153-0400 m30999| 2015-07-09T13:56:53.152-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:53.152-0400-559eb5e5ca4787b9985d1c6c", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464613152), what: "shardCollection", ns: "db22.coll22", details: { version: "1|1||559eb5e4ca4787b9985d1c6b" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.206-0400 m30999| 2015-07-09T13:56:53.205-0400 I SHARDING [conn1] distributed lock 'db22.coll22/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.207-0400 m30999| 2015-07-09T13:56:53.206-0400 I SHARDING [conn1] moving chunk ns: db22.coll22 moving ( ns: db22.coll22, shard: test-rs1, lastmod: 1|0||000000000000000000000000, min: { _id: MinKey }, max: { _id: 0 }) test-rs1 -> test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.207-0400 m31200| 2015-07-09T13:56:53.207-0400 I SHARDING [conn64] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.208-0400 m31200| 2015-07-09T13:56:53.208-0400 I SHARDING [conn64] received moveChunk request: { moveChunk: "db22.coll22", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", to: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", fromShard: "test-rs1", toShard: "test-rs0", min: { _id: MinKey }, max: { _id: 0 }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb5e4ca4787b9985d1c6b') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.212-0400 m31200| 2015-07-09T13:56:53.211-0400 I SHARDING [conn64] distributed lock 'db22.coll22/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb5e5d5a107a5b9c0db08 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.212-0400 m31200| 2015-07-09T13:56:53.211-0400 I SHARDING [conn64] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:53.211-0400-559eb5e5d5a107a5b9c0db09", server: "bs-osx108-8", clientAddr: "127.0.0.1:62863", time: new Date(1436464613211), what: "moveChunk.start", ns: "db22.coll22", details: { min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs1", to: "test-rs0" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.265-0400 m31200| 2015-07-09T13:56:53.265-0400 I SHARDING [conn64] remotely refreshing metadata for db22.coll22 based on current shard version 1|1||559eb5e4ca4787b9985d1c6b, current metadata version is 1|1||559eb5e4ca4787b9985d1c6b [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.267-0400 m31200| 2015-07-09T13:56:53.266-0400 I SHARDING [conn64] metadata of collection db22.coll22 already up to date (shard version : 1|1||559eb5e4ca4787b9985d1c6b, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.267-0400 m31200| 2015-07-09T13:56:53.266-0400 I SHARDING [conn64] moveChunk request accepted at version 1|1||559eb5e4ca4787b9985d1c6b [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.267-0400 m31200| 2015-07-09T13:56:53.267-0400 I SHARDING [conn64] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.268-0400 m31100| 2015-07-09T13:56:53.267-0400 I SHARDING [conn19] remotely refreshing metadata for db22.coll22, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.269-0400 m31100| 2015-07-09T13:56:53.269-0400 I SHARDING [conn19] collection db22.coll22 was previously unsharded, new metadata loaded with shard version 0|0||559eb5e4ca4787b9985d1c6b [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.269-0400 m31100| 2015-07-09T13:56:53.269-0400 I SHARDING [conn19] collection version was loaded at version 1|1||559eb5e4ca4787b9985d1c6b, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.270-0400 m31100| 2015-07-09T13:56:53.269-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: MinKey } -> { _id: 0 } for collection db22.coll22 from test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202 at epoch 559eb5e4ca4787b9985d1c6b [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.272-0400 m31200| 2015-07-09T13:56:53.271-0400 I SHARDING [conn64] moveChunk data transfer progress: { active: true, ns: "db22.coll22", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.275-0400 m31200| 2015-07-09T13:56:53.275-0400 I SHARDING [conn64] moveChunk data transfer progress: { active: true, ns: "db22.coll22", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.280-0400 m31200| 2015-07-09T13:56:53.280-0400 I SHARDING [conn64] moveChunk data transfer progress: { active: true, ns: "db22.coll22", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.284-0400 m31100| 2015-07-09T13:56:53.284-0400 I INDEX [migrateThread] build index on: db22.coll22 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db22.coll22" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.284-0400 m31100| 2015-07-09T13:56:53.284-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.290-0400 m31200| 2015-07-09T13:56:53.289-0400 I SHARDING [conn64] moveChunk data transfer progress: { active: true, ns: "db22.coll22", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.295-0400 m31100| 2015-07-09T13:56:53.293-0400 I INDEX [migrateThread] build index on: db22.coll22 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db22.coll22" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.295-0400 m31100| 2015-07-09T13:56:53.293-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.304-0400 m31100| 2015-07-09T13:56:53.304-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.305-0400 m31100| 2015-07-09T13:56:53.305-0400 I SHARDING [migrateThread] Deleter starting delete for: db22.coll22 from { _id: MinKey } -> { _id: 0 }, with opId: 26975 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.307-0400 m31100| 2015-07-09T13:56:53.307-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db22.coll22 from { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.307-0400 m31200| 2015-07-09T13:56:53.307-0400 I SHARDING [conn64] moveChunk data transfer progress: { active: true, ns: "db22.coll22", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.312-0400 m31102| 2015-07-09T13:56:53.312-0400 I INDEX [repl writer worker 13] build index on: db22.coll22 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db22.coll22" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.313-0400 m31102| 2015-07-09T13:56:53.312-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.316-0400 m31101| 2015-07-09T13:56:53.316-0400 I INDEX [repl writer worker 10] build index on: db22.coll22 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db22.coll22" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.317-0400 m31101| 2015-07-09T13:56:53.316-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.321-0400 m31102| 2015-07-09T13:56:53.320-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.322-0400 m31101| 2015-07-09T13:56:53.321-0400 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.322-0400 m31100| 2015-07-09T13:56:53.322-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.322-0400 m31100| 2015-07-09T13:56:53.322-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db22.coll22' { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.342-0400 m31200| 2015-07-09T13:56:53.341-0400 I SHARDING [conn64] moveChunk data transfer progress: { active: true, ns: "db22.coll22", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.342-0400 m31200| 2015-07-09T13:56:53.341-0400 I SHARDING [conn64] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.342-0400 m31200| 2015-07-09T13:56:53.342-0400 I SHARDING [conn64] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.342-0400 m31200| 2015-07-09T13:56:53.342-0400 I SHARDING [conn64] moveChunk setting version to: 2|0||559eb5e4ca4787b9985d1c6b [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.346-0400 m31100| 2015-07-09T13:56:53.345-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db22.coll22' { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.346-0400 m31100| 2015-07-09T13:56:53.346-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:53.346-0400-559eb5e5792e00bb67274933", server: "bs-osx108-8", clientAddr: "", time: new Date(1436464613346), what: "moveChunk.to", ns: "db22.coll22", details: { min: { _id: MinKey }, max: { _id: 0 }, step 1 of 5: 35, step 2 of 5: 16, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 23, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.399-0400 m31200| 2015-07-09T13:56:53.399-0400 I SHARDING [conn64] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db22.coll22", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.400-0400 m31200| 2015-07-09T13:56:53.399-0400 I SHARDING [conn64] moveChunk updating self version to: 2|1||559eb5e4ca4787b9985d1c6b through { _id: 0 } -> { _id: MaxKey } for collection 'db22.coll22' [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.401-0400 m31200| 2015-07-09T13:56:53.400-0400 I SHARDING [conn64] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:53.400-0400-559eb5e5d5a107a5b9c0db0a", server: "bs-osx108-8", clientAddr: "127.0.0.1:62863", time: new Date(1436464613400), what: "moveChunk.commit", ns: "db22.coll22", details: { min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs1", to: "test-rs0", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.454-0400 m31200| 2015-07-09T13:56:53.453-0400 I SHARDING [conn64] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.454-0400 m31200| 2015-07-09T13:56:53.453-0400 I SHARDING [conn64] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.454-0400 m31200| 2015-07-09T13:56:53.454-0400 I SHARDING [conn64] Deleter starting delete for: db22.coll22 from { _id: MinKey } -> { _id: 0 }, with opId: 31004 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.454-0400 m31200| 2015-07-09T13:56:53.454-0400 I SHARDING [conn64] rangeDeleter deleted 0 documents for db22.coll22 from { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.454-0400 m31200| 2015-07-09T13:56:53.454-0400 I SHARDING [conn64] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.455-0400 m31200| 2015-07-09T13:56:53.455-0400 I SHARDING [conn64] distributed lock 'db22.coll22/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.455-0400 m31200| 2015-07-09T13:56:53.455-0400 I SHARDING [conn64] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:53.455-0400-559eb5e5d5a107a5b9c0db0b", server: "bs-osx108-8", clientAddr: "127.0.0.1:62863", time: new Date(1436464613455), what: "moveChunk.from", ns: "db22.coll22", details: { min: { _id: MinKey }, max: { _id: 0 }, step 1 of 6: 0, step 2 of 6: 58, step 3 of 6: 3, step 4 of 6: 71, step 5 of 6: 112, step 6 of 6: 0, to: "test-rs0", from: "test-rs1", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.509-0400 m31200| 2015-07-09T13:56:53.508-0400 I COMMAND [conn64] command db22.coll22 command: moveChunk { moveChunk: "db22.coll22", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", to: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", fromShard: "test-rs1", toShard: "test-rs0", min: { _id: MinKey }, max: { _id: 0 }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb5e4ca4787b9985d1c6b') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 301ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.510-0400 m30999| 2015-07-09T13:56:53.510-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db22.coll22: 0ms sequenceNumber: 102 version: 2|1||559eb5e4ca4787b9985d1c6b based on: 1|1||559eb5e4ca4787b9985d1c6b [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.512-0400 m31100| 2015-07-09T13:56:53.511-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db22.coll22", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e4ca4787b9985d1c6b') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.515-0400 m31100| 2015-07-09T13:56:53.515-0400 I SHARDING [conn38] distributed lock 'db22.coll22/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb5e5792e00bb67274934 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.515-0400 m31100| 2015-07-09T13:56:53.515-0400 I SHARDING [conn38] remotely refreshing metadata for db22.coll22 based on current shard version 0|0||559eb5e4ca4787b9985d1c6b, current metadata version is 1|1||559eb5e4ca4787b9985d1c6b [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.516-0400 m31100| 2015-07-09T13:56:53.516-0400 I SHARDING [conn38] updating metadata for db22.coll22 from shard version 0|0||559eb5e4ca4787b9985d1c6b to shard version 2|0||559eb5e4ca4787b9985d1c6b [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.516-0400 m31100| 2015-07-09T13:56:53.516-0400 I SHARDING [conn38] collection version was loaded at version 2|1||559eb5e4ca4787b9985d1c6b, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.517-0400 m31100| 2015-07-09T13:56:53.516-0400 I SHARDING [conn38] splitChunk accepted at version 2|0||559eb5e4ca4787b9985d1c6b [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.519-0400 m31100| 2015-07-09T13:56:53.518-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:53.518-0400-559eb5e5792e00bb67274935", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436464613518), what: "split", ns: "db22.coll22", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559eb5e4ca4787b9985d1c6b') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559eb5e4ca4787b9985d1c6b') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.571-0400 m31100| 2015-07-09T13:56:53.571-0400 I SHARDING [conn38] distributed lock 'db22.coll22/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.573-0400 m30999| 2015-07-09T13:56:53.573-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db22.coll22: 0ms sequenceNumber: 103 version: 2|3||559eb5e4ca4787b9985d1c6b based on: 2|1||559eb5e4ca4787b9985d1c6b [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.574-0400 m31200| 2015-07-09T13:56:53.573-0400 I SHARDING [conn64] received splitChunk request: { splitChunk: "db22.coll22", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e4ca4787b9985d1c6b') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.577-0400 m31200| 2015-07-09T13:56:53.577-0400 I SHARDING [conn64] distributed lock 'db22.coll22/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb5e5d5a107a5b9c0db0c [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.578-0400 m31200| 2015-07-09T13:56:53.577-0400 I SHARDING [conn64] remotely refreshing metadata for db22.coll22 based on current shard version 2|0||559eb5e4ca4787b9985d1c6b, current metadata version is 2|0||559eb5e4ca4787b9985d1c6b [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.579-0400 m31200| 2015-07-09T13:56:53.578-0400 I SHARDING [conn64] updating metadata for db22.coll22 from shard version 2|0||559eb5e4ca4787b9985d1c6b to shard version 2|1||559eb5e4ca4787b9985d1c6b [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.579-0400 m31200| 2015-07-09T13:56:53.578-0400 I SHARDING [conn64] collection version was loaded at version 2|3||559eb5e4ca4787b9985d1c6b, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.579-0400 m31200| 2015-07-09T13:56:53.578-0400 I SHARDING [conn64] splitChunk accepted at version 2|1||559eb5e4ca4787b9985d1c6b [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.580-0400 m31200| 2015-07-09T13:56:53.580-0400 I SHARDING [conn64] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:53.579-0400-559eb5e5d5a107a5b9c0db0d", server: "bs-osx108-8", clientAddr: "127.0.0.1:62863", time: new Date(1436464613579), what: "split", ns: "db22.coll22", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559eb5e4ca4787b9985d1c6b') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559eb5e4ca4787b9985d1c6b') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.634-0400 m31200| 2015-07-09T13:56:53.634-0400 I SHARDING [conn64] distributed lock 'db22.coll22/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.637-0400 m30999| 2015-07-09T13:56:53.636-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db22.coll22: 0ms sequenceNumber: 104 version: 2|5||559eb5e4ca4787b9985d1c6b based on: 2|3||559eb5e4ca4787b9985d1c6b [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.646-0400 m31200| 2015-07-09T13:56:53.645-0400 I INDEX [conn40] build index on: db22.coll22 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db22.coll22" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.646-0400 m31200| 2015-07-09T13:56:53.645-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.646-0400 m31100| 2015-07-09T13:56:53.645-0400 I INDEX [conn52] build index on: db22.coll22 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db22.coll22" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.646-0400 m31100| 2015-07-09T13:56:53.645-0400 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.650-0400 m31100| 2015-07-09T13:56:53.649-0400 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.651-0400 m31200| 2015-07-09T13:56:53.651-0400 I INDEX [conn40] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.656-0400 m31102| 2015-07-09T13:56:53.655-0400 I INDEX [repl writer worker 5] build index on: db22.coll22 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db22.coll22" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.656-0400 m31102| 2015-07-09T13:56:53.655-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.664-0400 m31200| 2015-07-09T13:56:53.662-0400 I INDEX [conn40] build index on: db22.coll22 properties: { v: 1, key: { y: 1.0 }, name: "y_1", ns: "db22.coll22" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.664-0400 m31200| 2015-07-09T13:56:53.662-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.664-0400 m31101| 2015-07-09T13:56:53.663-0400 I INDEX [repl writer worker 3] build index on: db22.coll22 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db22.coll22" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.665-0400 m31101| 2015-07-09T13:56:53.663-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.668-0400 m31100| 2015-07-09T13:56:53.668-0400 I INDEX [conn52] build index on: db22.coll22 properties: { v: 1, key: { y: 1.0 }, name: "y_1", ns: "db22.coll22" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.669-0400 m31100| 2015-07-09T13:56:53.668-0400 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.675-0400 m31201| 2015-07-09T13:56:53.675-0400 I INDEX [repl writer worker 10] build index on: db22.coll22 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db22.coll22" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.675-0400 m31201| 2015-07-09T13:56:53.675-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.676-0400 m31102| 2015-07-09T13:56:53.675-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.678-0400 m31202| 2015-07-09T13:56:53.678-0400 I INDEX [repl writer worker 1] build index on: db22.coll22 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db22.coll22" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.678-0400 m31202| 2015-07-09T13:56:53.678-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.679-0400 m31200| 2015-07-09T13:56:53.678-0400 I INDEX [conn40] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.681-0400 m31100| 2015-07-09T13:56:53.681-0400 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.685-0400 m31201| 2015-07-09T13:56:53.684-0400 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.685-0400 m31101| 2015-07-09T13:56:53.684-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.687-0400 m31202| 2015-07-09T13:56:53.686-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.690-0400 m31200| 2015-07-09T13:56:53.689-0400 I INDEX [conn40] build index on: db22.coll22 properties: { v: 1, key: { z: 1.0 }, name: "z_1", ns: "db22.coll22" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.690-0400 m31200| 2015-07-09T13:56:53.690-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.696-0400 m31102| 2015-07-09T13:56:53.696-0400 I INDEX [repl writer worker 6] build index on: db22.coll22 properties: { v: 1, key: { y: 1.0 }, name: "y_1", ns: "db22.coll22" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.697-0400 m31102| 2015-07-09T13:56:53.696-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.698-0400 m31101| 2015-07-09T13:56:53.697-0400 I INDEX [repl writer worker 5] build index on: db22.coll22 properties: { v: 1, key: { y: 1.0 }, name: "y_1", ns: "db22.coll22" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.698-0400 m31101| 2015-07-09T13:56:53.697-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.702-0400 m31100| 2015-07-09T13:56:53.701-0400 I INDEX [conn52] build index on: db22.coll22 properties: { v: 1, key: { z: 1.0 }, name: "z_1", ns: "db22.coll22" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.703-0400 m31100| 2015-07-09T13:56:53.701-0400 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.703-0400 m31201| 2015-07-09T13:56:53.701-0400 I INDEX [repl writer worker 9] build index on: db22.coll22 properties: { v: 1, key: { y: 1.0 }, name: "y_1", ns: "db22.coll22" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.703-0400 m31201| 2015-07-09T13:56:53.701-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.704-0400 m31202| 2015-07-09T13:56:53.702-0400 I INDEX [repl writer worker 5] build index on: db22.coll22 properties: { v: 1, key: { y: 1.0 }, name: "y_1", ns: "db22.coll22" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.704-0400 m31202| 2015-07-09T13:56:53.702-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.706-0400 m31200| 2015-07-09T13:56:53.705-0400 I INDEX [conn40] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.718-0400 m31101| 2015-07-09T13:56:53.718-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.718-0400 m31100| 2015-07-09T13:56:53.718-0400 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.723-0400 m31202| 2015-07-09T13:56:53.722-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.724-0400 m31201| 2015-07-09T13:56:53.723-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.725-0400 m31102| 2015-07-09T13:56:53.723-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.732-0400 m31100| 2015-07-09T13:56:53.731-0400 I INDEX [conn52] build index on: db22.coll22 properties: { v: 1, key: { x: 1.0, y: 1.0, z: 1.0 }, name: "x_1_y_1_z_1", ns: "db22.coll22" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.732-0400 m31100| 2015-07-09T13:56:53.731-0400 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.735-0400 m31101| 2015-07-09T13:56:53.735-0400 I INDEX [repl writer worker 14] build index on: db22.coll22 properties: { v: 1, key: { z: 1.0 }, name: "z_1", ns: "db22.coll22" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.735-0400 m31101| 2015-07-09T13:56:53.735-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.736-0400 m31200| 2015-07-09T13:56:53.735-0400 I INDEX [conn40] build index on: db22.coll22 properties: { v: 1, key: { x: 1.0, y: 1.0, z: 1.0 }, name: "x_1_y_1_z_1", ns: "db22.coll22" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.736-0400 m31200| 2015-07-09T13:56:53.735-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.744-0400 m31201| 2015-07-09T13:56:53.743-0400 I INDEX [repl writer worker 11] build index on: db22.coll22 properties: { v: 1, key: { z: 1.0 }, name: "z_1", ns: "db22.coll22" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.744-0400 m31201| 2015-07-09T13:56:53.743-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.752-0400 m31100| 2015-07-09T13:56:53.752-0400 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.752-0400 m31202| 2015-07-09T13:56:53.752-0400 I INDEX [repl writer worker 8] build index on: db22.coll22 properties: { v: 1, key: { z: 1.0 }, name: "z_1", ns: "db22.coll22" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.752-0400 m31202| 2015-07-09T13:56:53.752-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.753-0400 m31102| 2015-07-09T13:56:53.752-0400 I INDEX [repl writer worker 8] build index on: db22.coll22 properties: { v: 1, key: { z: 1.0 }, name: "z_1", ns: "db22.coll22" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.754-0400 m31102| 2015-07-09T13:56:53.752-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.754-0400 m31101| 2015-07-09T13:56:53.752-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.755-0400 m31200| 2015-07-09T13:56:53.755-0400 I INDEX [conn40] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.763-0400 m31202| 2015-07-09T13:56:53.763-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.763-0400 m31201| 2015-07-09T13:56:53.763-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.764-0400 m31102| 2015-07-09T13:56:53.763-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.767-0400 m31101| 2015-07-09T13:56:53.766-0400 I INDEX [repl writer worker 7] build index on: db22.coll22 properties: { v: 1, key: { x: 1.0, y: 1.0, z: 1.0 }, name: "x_1_y_1_z_1", ns: "db22.coll22" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.767-0400 m31101| 2015-07-09T13:56:53.766-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.771-0400 Using 10 threads (requested 10) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.804-0400 m31102| 2015-07-09T13:56:53.773-0400 I INDEX [repl writer worker 4] build index on: db22.coll22 properties: { v: 1, key: { x: 1.0, y: 1.0, z: 1.0 }, name: "x_1_y_1_z_1", ns: "db22.coll22" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.810-0400 m31102| 2015-07-09T13:56:53.773-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.811-0400 m31202| 2015-07-09T13:56:53.772-0400 I INDEX [repl writer worker 2] build index on: db22.coll22 properties: { v: 1, key: { x: 1.0, y: 1.0, z: 1.0 }, name: "x_1_y_1_z_1", ns: "db22.coll22" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.811-0400 m31202| 2015-07-09T13:56:53.775-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.811-0400 m31201| 2015-07-09T13:56:53.796-0400 I INDEX [repl writer worker 13] build index on: db22.coll22 properties: { v: 1, key: { x: 1.0, y: 1.0, z: 1.0 }, name: "x_1_y_1_z_1", ns: "db22.coll22" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.811-0400 m31201| 2015-07-09T13:56:53.796-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.836-0400 m31101| 2015-07-09T13:56:53.821-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.836-0400 m31201| 2015-07-09T13:56:53.821-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.836-0400 m31102| 2015-07-09T13:56:53.835-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.856-0400 m31202| 2015-07-09T13:56:53.851-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.869-0400 m30999| 2015-07-09T13:56:53.868-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63040 #142 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.881-0400 m30999| 2015-07-09T13:56:53.880-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63041 #143 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.882-0400 m30998| 2015-07-09T13:56:53.882-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63042 #142 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.893-0400 m30999| 2015-07-09T13:56:53.893-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63043 #144 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.899-0400 m30998| 2015-07-09T13:56:53.899-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63045 #143 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.902-0400 m30999| 2015-07-09T13:56:53.901-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63044 #145 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.906-0400 m30999| 2015-07-09T13:56:53.906-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63048 #146 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.907-0400 m30998| 2015-07-09T13:56:53.906-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63046 #144 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.908-0400 m30998| 2015-07-09T13:56:53.908-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63047 #145 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.908-0400 m30998| 2015-07-09T13:56:53.908-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63049 #146 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.916-0400 setting random seed: 8068824028596 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.916-0400 setting random seed: 8184685804881 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.916-0400 setting random seed: 7391448132693 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.916-0400 setting random seed: 7832922651432 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.916-0400 setting random seed: 2725511766038 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.917-0400 setting random seed: 7931063924916 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.918-0400 setting random seed: 7676273141987 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.918-0400 setting random seed: 5122757460922 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.920-0400 setting random seed: 1797165279276 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.921-0400 setting random seed: 14386693947 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:53.927-0400 m30998| 2015-07-09T13:56:53.926-0400 I SHARDING [conn144] ChunkManager: time to load chunks for db22.coll22: 0ms sequenceNumber: 26 version: 2|5||559eb5e4ca4787b9985d1c6b based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.105-0400 m30998| 2015-07-09T13:56:54.103-0400 I NETWORK [conn142] end connection 127.0.0.1:63042 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.106-0400 m30999| 2015-07-09T13:56:54.103-0400 I NETWORK [conn145] end connection 127.0.0.1:63044 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.113-0400 m30999| 2015-07-09T13:56:54.113-0400 I NETWORK [conn144] end connection 127.0.0.1:63043 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.117-0400 m30998| 2015-07-09T13:56:54.116-0400 I NETWORK [conn146] end connection 127.0.0.1:63049 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.148-0400 m30999| 2015-07-09T13:56:54.146-0400 I NETWORK [conn146] end connection 127.0.0.1:63048 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.148-0400 m30999| 2015-07-09T13:56:54.146-0400 I NETWORK [conn142] end connection 127.0.0.1:63040 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.157-0400 m30998| 2015-07-09T13:56:54.157-0400 I NETWORK [conn143] end connection 127.0.0.1:63045 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.164-0400 m30998| 2015-07-09T13:56:54.163-0400 I NETWORK [conn145] end connection 127.0.0.1:63047 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.173-0400 m30999| 2015-07-09T13:56:54.169-0400 I NETWORK [conn143] end connection 127.0.0.1:63041 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.174-0400 m30998| 2015-07-09T13:56:54.173-0400 I NETWORK [conn144] end connection 127.0.0.1:63046 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.195-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.195-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.195-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.195-0400 jstests/concurrency/fsm_workloads/update_multifield_isolated_multiupdate.js: Workload completed in 425 ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.195-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.195-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.195-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.195-0400 m30999| 2015-07-09T13:56:54.195-0400 I COMMAND [conn1] DROP: db22.coll22 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.196-0400 m30999| 2015-07-09T13:56:54.195-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:54.195-0400-559eb5e6ca4787b9985d1c6d", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464614195), what: "dropCollection.start", ns: "db22.coll22", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.252-0400 m30999| 2015-07-09T13:56:54.252-0400 I SHARDING [conn1] distributed lock 'db22.coll22/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5e6ca4787b9985d1c6e [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.253-0400 m31100| 2015-07-09T13:56:54.253-0400 I COMMAND [conn38] CMD: drop db22.coll22 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.256-0400 m31200| 2015-07-09T13:56:54.256-0400 I COMMAND [conn64] CMD: drop db22.coll22 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.257-0400 m31102| 2015-07-09T13:56:54.256-0400 I COMMAND [repl writer worker 14] CMD: drop db22.coll22 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.257-0400 m31101| 2015-07-09T13:56:54.257-0400 I COMMAND [repl writer worker 15] CMD: drop db22.coll22 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.260-0400 m31201| 2015-07-09T13:56:54.260-0400 I COMMAND [repl writer worker 4] CMD: drop db22.coll22 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.260-0400 m31202| 2015-07-09T13:56:54.260-0400 I COMMAND [repl writer worker 14] CMD: drop db22.coll22 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.314-0400 m31100| 2015-07-09T13:56:54.313-0400 I SHARDING [conn38] remotely refreshing metadata for db22.coll22 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559eb5e4ca4787b9985d1c6b, current metadata version is 2|3||559eb5e4ca4787b9985d1c6b [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.315-0400 m31100| 2015-07-09T13:56:54.315-0400 W SHARDING [conn38] no chunks found when reloading db22.coll22, previous version was 0|0||559eb5e4ca4787b9985d1c6b, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.315-0400 m31100| 2015-07-09T13:56:54.315-0400 I SHARDING [conn38] dropping metadata for db22.coll22 at shard version 2|3||559eb5e4ca4787b9985d1c6b, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.317-0400 m31200| 2015-07-09T13:56:54.316-0400 I SHARDING [conn64] remotely refreshing metadata for db22.coll22 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559eb5e4ca4787b9985d1c6b, current metadata version is 2|5||559eb5e4ca4787b9985d1c6b [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.318-0400 m31200| 2015-07-09T13:56:54.318-0400 W SHARDING [conn64] no chunks found when reloading db22.coll22, previous version was 0|0||559eb5e4ca4787b9985d1c6b, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.318-0400 m31200| 2015-07-09T13:56:54.318-0400 I SHARDING [conn64] dropping metadata for db22.coll22 at shard version 2|5||559eb5e4ca4787b9985d1c6b, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.319-0400 m30999| 2015-07-09T13:56:54.318-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:54.318-0400-559eb5e6ca4787b9985d1c6f", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464614318), what: "dropCollection", ns: "db22.coll22", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.373-0400 m30999| 2015-07-09T13:56:54.373-0400 I SHARDING [conn1] distributed lock 'db22.coll22/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.430-0400 m30999| 2015-07-09T13:56:54.429-0400 I COMMAND [conn1] DROP DATABASE: db22 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.430-0400 m30999| 2015-07-09T13:56:54.430-0400 I SHARDING [conn1] DBConfig::dropDatabase: db22 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.431-0400 m30999| 2015-07-09T13:56:54.430-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:54.430-0400-559eb5e6ca4787b9985d1c70", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464614430), what: "dropDatabase.start", ns: "db22", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.536-0400 m30999| 2015-07-09T13:56:54.535-0400 I SHARDING [conn1] DBConfig::dropDatabase: db22 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.536-0400 m31200| 2015-07-09T13:56:54.536-0400 I COMMAND [conn66] dropDatabase db22 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.536-0400 m31200| 2015-07-09T13:56:54.536-0400 I COMMAND [conn66] dropDatabase db22 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.536-0400 m30999| 2015-07-09T13:56:54.536-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:54.536-0400-559eb5e6ca4787b9985d1c71", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464614536), what: "dropDatabase", ns: "db22", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.537-0400 m31201| 2015-07-09T13:56:54.537-0400 I COMMAND [repl writer worker 10] dropDatabase db22 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.537-0400 m31201| 2015-07-09T13:56:54.537-0400 I COMMAND [repl writer worker 10] dropDatabase db22 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.537-0400 m31202| 2015-07-09T13:56:54.537-0400 I COMMAND [repl writer worker 9] dropDatabase db22 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.537-0400 m31202| 2015-07-09T13:56:54.537-0400 I COMMAND [repl writer worker 9] dropDatabase db22 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.627-0400 m31100| 2015-07-09T13:56:54.626-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.630-0400 m31101| 2015-07-09T13:56:54.630-0400 I COMMAND [repl writer worker 13] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.631-0400 m31102| 2015-07-09T13:56:54.630-0400 I COMMAND [repl writer worker 10] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.658-0400 m31200| 2015-07-09T13:56:54.657-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.660-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.660-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.660-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.660-0400 jstests/concurrency/fsm_workloads/update_multifield_multiupdate_noindex.js [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.660-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.660-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.660-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.661-0400 m31201| 2015-07-09T13:56:54.660-0400 I COMMAND [repl writer worker 11] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.662-0400 m31202| 2015-07-09T13:56:54.661-0400 I COMMAND [repl writer worker 1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.665-0400 m30999| 2015-07-09T13:56:54.664-0400 I SHARDING [conn1] distributed lock 'db23/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5e6ca4787b9985d1c72 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.668-0400 m30999| 2015-07-09T13:56:54.667-0400 I SHARDING [conn1] Placing [db23] on: test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.668-0400 m30999| 2015-07-09T13:56:54.667-0400 I SHARDING [conn1] Enabling sharding for database [db23] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.722-0400 m30999| 2015-07-09T13:56:54.721-0400 I SHARDING [conn1] distributed lock 'db23/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.744-0400 m31200| 2015-07-09T13:56:54.743-0400 I INDEX [conn51] build index on: db23.coll23 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db23.coll23" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.744-0400 m31200| 2015-07-09T13:56:54.743-0400 I INDEX [conn51] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.750-0400 m31200| 2015-07-09T13:56:54.750-0400 I INDEX [conn51] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.751-0400 m30999| 2015-07-09T13:56:54.751-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db23.coll23", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.755-0400 m30999| 2015-07-09T13:56:54.754-0400 I SHARDING [conn1] distributed lock 'db23.coll23/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5e6ca4787b9985d1c73 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.756-0400 m30999| 2015-07-09T13:56:54.756-0400 I SHARDING [conn1] enable sharding on: db23.coll23 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.757-0400 m30999| 2015-07-09T13:56:54.756-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:54.756-0400-559eb5e6ca4787b9985d1c74", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464614756), what: "shardCollection.start", ns: "db23.coll23", details: { shardKey: { _id: "hashed" }, collection: "db23.coll23", primary: "test-rs1:test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.763-0400 m31201| 2015-07-09T13:56:54.762-0400 I INDEX [repl writer worker 13] build index on: db23.coll23 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db23.coll23" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.764-0400 m31201| 2015-07-09T13:56:54.762-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.768-0400 m31202| 2015-07-09T13:56:54.768-0400 I INDEX [repl writer worker 13] build index on: db23.coll23 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db23.coll23" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.768-0400 m31202| 2015-07-09T13:56:54.768-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.772-0400 m31201| 2015-07-09T13:56:54.771-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.774-0400 m31202| 2015-07-09T13:56:54.774-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.809-0400 m30999| 2015-07-09T13:56:54.809-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db23.coll23 using new epoch 559eb5e6ca4787b9985d1c75 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.917-0400 m30999| 2015-07-09T13:56:54.916-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db23.coll23: 0ms sequenceNumber: 105 version: 1|1||559eb5e6ca4787b9985d1c75 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.972-0400 m30999| 2015-07-09T13:56:54.971-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db23.coll23: 0ms sequenceNumber: 106 version: 1|1||559eb5e6ca4787b9985d1c75 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.974-0400 m31200| 2015-07-09T13:56:54.973-0400 I SHARDING [conn40] remotely refreshing metadata for db23.coll23 with requested shard version 1|1||559eb5e6ca4787b9985d1c75, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.975-0400 m31200| 2015-07-09T13:56:54.975-0400 I SHARDING [conn40] collection db23.coll23 was previously unsharded, new metadata loaded with shard version 1|1||559eb5e6ca4787b9985d1c75 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.975-0400 m31200| 2015-07-09T13:56:54.975-0400 I SHARDING [conn40] collection version was loaded at version 1|1||559eb5e6ca4787b9985d1c75, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:54.976-0400 m30999| 2015-07-09T13:56:54.975-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:54.975-0400-559eb5e6ca4787b9985d1c76", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464614975), what: "shardCollection", ns: "db23.coll23", details: { version: "1|1||559eb5e6ca4787b9985d1c75" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:55.030-0400 m30999| 2015-07-09T13:56:55.029-0400 I SHARDING [conn1] distributed lock 'db23.coll23/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:55.030-0400 m30999| 2015-07-09T13:56:55.030-0400 I SHARDING [conn1] moving chunk ns: db23.coll23 moving ( ns: db23.coll23, shard: test-rs1, lastmod: 1|0||000000000000000000000000, min: { _id: MinKey }, max: { _id: 0 }) test-rs1 -> test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:55.031-0400 m31200| 2015-07-09T13:56:55.030-0400 I SHARDING [conn64] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:55.032-0400 m31200| 2015-07-09T13:56:55.031-0400 I SHARDING [conn64] received moveChunk request: { moveChunk: "db23.coll23", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", to: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", fromShard: "test-rs1", toShard: "test-rs0", min: { _id: MinKey }, max: { _id: 0 }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb5e6ca4787b9985d1c75') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:55.035-0400 m31200| 2015-07-09T13:56:55.034-0400 I SHARDING [conn64] distributed lock 'db23.coll23/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb5e7d5a107a5b9c0db0f [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:55.035-0400 m31200| 2015-07-09T13:56:55.034-0400 I SHARDING [conn64] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:55.034-0400-559eb5e7d5a107a5b9c0db10", server: "bs-osx108-8", clientAddr: "127.0.0.1:62863", time: new Date(1436464615034), what: "moveChunk.start", ns: "db23.coll23", details: { min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs1", to: "test-rs0" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:55.087-0400 m31200| 2015-07-09T13:56:55.087-0400 I SHARDING [conn64] remotely refreshing metadata for db23.coll23 based on current shard version 1|1||559eb5e6ca4787b9985d1c75, current metadata version is 1|1||559eb5e6ca4787b9985d1c75 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:55.089-0400 m31200| 2015-07-09T13:56:55.089-0400 I SHARDING [conn64] metadata of collection db23.coll23 already up to date (shard version : 1|1||559eb5e6ca4787b9985d1c75, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:55.089-0400 m31200| 2015-07-09T13:56:55.089-0400 I SHARDING [conn64] moveChunk request accepted at version 1|1||559eb5e6ca4787b9985d1c75 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:55.090-0400 m31200| 2015-07-09T13:56:55.089-0400 I SHARDING [conn64] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:55.090-0400 m31100| 2015-07-09T13:56:55.090-0400 I SHARDING [conn19] remotely refreshing metadata for db23.coll23, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:55.092-0400 m31100| 2015-07-09T13:56:55.091-0400 I SHARDING [conn19] collection db23.coll23 was previously unsharded, new metadata loaded with shard version 0|0||559eb5e6ca4787b9985d1c75 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:55.092-0400 m31100| 2015-07-09T13:56:55.091-0400 I SHARDING [conn19] collection version was loaded at version 1|1||559eb5e6ca4787b9985d1c75, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:55.092-0400 m31100| 2015-07-09T13:56:55.092-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: MinKey } -> { _id: 0 } for collection db23.coll23 from test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202 at epoch 559eb5e6ca4787b9985d1c75 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:55.094-0400 m31200| 2015-07-09T13:56:55.094-0400 I SHARDING [conn64] moveChunk data transfer progress: { active: true, ns: "db23.coll23", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:55.098-0400 m31200| 2015-07-09T13:56:55.097-0400 I SHARDING [conn64] moveChunk data transfer progress: { active: true, ns: "db23.coll23", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:55.103-0400 m31200| 2015-07-09T13:56:55.102-0400 I SHARDING [conn64] moveChunk data transfer progress: { active: true, ns: "db23.coll23", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:55.106-0400 m31100| 2015-07-09T13:56:55.106-0400 I INDEX [migrateThread] build index on: db23.coll23 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db23.coll23" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:55.106-0400 m31100| 2015-07-09T13:56:55.106-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:55.111-0400 m31100| 2015-07-09T13:56:55.111-0400 I INDEX [migrateThread] build index on: db23.coll23 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db23.coll23" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:55.112-0400 m31200| 2015-07-09T13:56:55.111-0400 I SHARDING [conn64] moveChunk data transfer progress: { active: true, ns: "db23.coll23", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:55.112-0400 m31100| 2015-07-09T13:56:55.111-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:55.128-0400 m31100| 2015-07-09T13:56:55.128-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:55.128-0400 m31100| 2015-07-09T13:56:55.128-0400 I SHARDING [migrateThread] Deleter starting delete for: db23.coll23 from { _id: MinKey } -> { _id: 0 }, with opId: 27259 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:55.129-0400 m31200| 2015-07-09T13:56:55.128-0400 I SHARDING [conn64] moveChunk data transfer progress: { active: true, ns: "db23.coll23", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:55.129-0400 m31100| 2015-07-09T13:56:55.129-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db23.coll23 from { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:55.135-0400 m31101| 2015-07-09T13:56:55.135-0400 I INDEX [repl writer worker 4] build index on: db23.coll23 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db23.coll23" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:55.136-0400 m31101| 2015-07-09T13:56:55.135-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:55.136-0400 m31102| 2015-07-09T13:56:55.135-0400 I INDEX [repl writer worker 9] build index on: db23.coll23 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db23.coll23" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:55.137-0400 m31102| 2015-07-09T13:56:55.135-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:55.141-0400 m31101| 2015-07-09T13:56:55.141-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:55.143-0400 m31100| 2015-07-09T13:56:55.142-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.233-0400 m31100| 2015-07-09T13:56:55.143-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db23.coll23' { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.233-0400 m31102| 2015-07-09T13:56:55.144-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.233-0400 m31200| 2015-07-09T13:56:55.162-0400 I SHARDING [conn64] moveChunk data transfer progress: { active: true, ns: "db23.coll23", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.234-0400 m31200| 2015-07-09T13:56:55.162-0400 I SHARDING [conn64] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.234-0400 m31200| 2015-07-09T13:56:55.163-0400 I SHARDING [conn64] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.234-0400 m31200| 2015-07-09T13:56:55.163-0400 I SHARDING [conn64] moveChunk setting version to: 2|0||559eb5e6ca4787b9985d1c75 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.234-0400 m31100| 2015-07-09T13:56:55.165-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db23.coll23' { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.234-0400 m31100| 2015-07-09T13:56:55.165-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:55.165-0400-559eb5e7792e00bb67274936", server: "bs-osx108-8", clientAddr: "", time: new Date(1436464615165), what: "moveChunk.to", ns: "db23.coll23", details: { min: { _id: MinKey }, max: { _id: 0 }, step 1 of 5: 36, step 2 of 5: 13, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 22, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.235-0400 m31200| 2015-07-09T13:56:55.219-0400 I SHARDING [conn64] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db23.coll23", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.235-0400 m31200| 2015-07-09T13:56:55.219-0400 I SHARDING [conn64] moveChunk updating self version to: 2|1||559eb5e6ca4787b9985d1c75 through { _id: 0 } -> { _id: MaxKey } for collection 'db23.coll23' [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.235-0400 m31200| 2015-07-09T13:56:55.221-0400 I SHARDING [conn64] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:55.221-0400-559eb5e7d5a107a5b9c0db11", server: "bs-osx108-8", clientAddr: "127.0.0.1:62863", time: new Date(1436464615221), what: "moveChunk.commit", ns: "db23.coll23", details: { min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs1", to: "test-rs0", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.235-0400 m31200| 2015-07-09T13:56:55.274-0400 I SHARDING [conn64] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.236-0400 m31200| 2015-07-09T13:56:55.274-0400 I SHARDING [conn64] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.236-0400 m31200| 2015-07-09T13:56:55.274-0400 I SHARDING [conn64] Deleter starting delete for: db23.coll23 from { _id: MinKey } -> { _id: 0 }, with opId: 31559 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.236-0400 m31200| 2015-07-09T13:56:55.274-0400 I SHARDING [conn64] rangeDeleter deleted 0 documents for db23.coll23 from { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.236-0400 m31200| 2015-07-09T13:56:55.274-0400 I SHARDING [conn64] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.236-0400 m31200| 2015-07-09T13:56:55.275-0400 I SHARDING [conn64] distributed lock 'db23.coll23/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.236-0400 m31200| 2015-07-09T13:56:55.275-0400 I SHARDING [conn64] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:55.275-0400-559eb5e7d5a107a5b9c0db12", server: "bs-osx108-8", clientAddr: "127.0.0.1:62863", time: new Date(1436464615275), what: "moveChunk.from", ns: "db23.coll23", details: { min: { _id: MinKey }, max: { _id: 0 }, step 1 of 6: 0, step 2 of 6: 57, step 3 of 6: 3, step 4 of 6: 70, step 5 of 6: 111, step 6 of 6: 0, to: "test-rs0", from: "test-rs1", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.238-0400 m31200| 2015-07-09T13:56:55.329-0400 I COMMAND [conn64] command db23.coll23 command: moveChunk { moveChunk: "db23.coll23", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", to: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", fromShard: "test-rs1", toShard: "test-rs0", min: { _id: MinKey }, max: { _id: 0 }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb5e6ca4787b9985d1c75') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 298ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.238-0400 m30999| 2015-07-09T13:56:55.331-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db23.coll23: 0ms sequenceNumber: 107 version: 2|1||559eb5e6ca4787b9985d1c75 based on: 1|1||559eb5e6ca4787b9985d1c75 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.238-0400 m31100| 2015-07-09T13:56:55.332-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db23.coll23", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e6ca4787b9985d1c75') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.239-0400 m31100| 2015-07-09T13:56:55.335-0400 I SHARDING [conn38] distributed lock 'db23.coll23/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb5e7792e00bb67274937 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.239-0400 m31100| 2015-07-09T13:56:55.335-0400 I SHARDING [conn38] remotely refreshing metadata for db23.coll23 based on current shard version 0|0||559eb5e6ca4787b9985d1c75, current metadata version is 1|1||559eb5e6ca4787b9985d1c75 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.239-0400 m31100| 2015-07-09T13:56:55.337-0400 I SHARDING [conn38] updating metadata for db23.coll23 from shard version 0|0||559eb5e6ca4787b9985d1c75 to shard version 2|0||559eb5e6ca4787b9985d1c75 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.239-0400 m31100| 2015-07-09T13:56:55.337-0400 I SHARDING [conn38] collection version was loaded at version 2|1||559eb5e6ca4787b9985d1c75, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.239-0400 m31100| 2015-07-09T13:56:55.337-0400 I SHARDING [conn38] splitChunk accepted at version 2|0||559eb5e6ca4787b9985d1c75 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.240-0400 m31100| 2015-07-09T13:56:55.338-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:55.338-0400-559eb5e7792e00bb67274938", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436464615338), what: "split", ns: "db23.coll23", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559eb5e6ca4787b9985d1c75') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559eb5e6ca4787b9985d1c75') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.240-0400 m31100| 2015-07-09T13:56:55.392-0400 I SHARDING [conn38] distributed lock 'db23.coll23/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.240-0400 m30999| 2015-07-09T13:56:55.393-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db23.coll23: 0ms sequenceNumber: 108 version: 2|3||559eb5e6ca4787b9985d1c75 based on: 2|1||559eb5e6ca4787b9985d1c75 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.240-0400 m31200| 2015-07-09T13:56:55.394-0400 I SHARDING [conn64] received splitChunk request: { splitChunk: "db23.coll23", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e6ca4787b9985d1c75') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.240-0400 m31200| 2015-07-09T13:56:55.397-0400 I SHARDING [conn64] distributed lock 'db23.coll23/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb5e7d5a107a5b9c0db13 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.241-0400 m31200| 2015-07-09T13:56:55.397-0400 I SHARDING [conn64] remotely refreshing metadata for db23.coll23 based on current shard version 2|0||559eb5e6ca4787b9985d1c75, current metadata version is 2|0||559eb5e6ca4787b9985d1c75 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.241-0400 m31200| 2015-07-09T13:56:55.399-0400 I SHARDING [conn64] updating metadata for db23.coll23 from shard version 2|0||559eb5e6ca4787b9985d1c75 to shard version 2|1||559eb5e6ca4787b9985d1c75 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.241-0400 m31200| 2015-07-09T13:56:55.399-0400 I SHARDING [conn64] collection version was loaded at version 2|3||559eb5e6ca4787b9985d1c75, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.241-0400 m31200| 2015-07-09T13:56:55.399-0400 I SHARDING [conn64] splitChunk accepted at version 2|1||559eb5e6ca4787b9985d1c75 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.242-0400 m31200| 2015-07-09T13:56:55.400-0400 I SHARDING [conn64] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:55.400-0400-559eb5e7d5a107a5b9c0db14", server: "bs-osx108-8", clientAddr: "127.0.0.1:62863", time: new Date(1436464615400), what: "split", ns: "db23.coll23", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559eb5e6ca4787b9985d1c75') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559eb5e6ca4787b9985d1c75') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.242-0400 m31200| 2015-07-09T13:56:55.454-0400 I SHARDING [conn64] distributed lock 'db23.coll23/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.242-0400 m30999| 2015-07-09T13:56:55.456-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db23.coll23: 0ms sequenceNumber: 109 version: 2|5||559eb5e6ca4787b9985d1c75 based on: 2|3||559eb5e6ca4787b9985d1c75 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.242-0400 m30999| 2015-07-09T13:56:55.458-0400 I SHARDING [conn1] sharded connection to test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.242-0400 m30999| 2015-07-09T13:56:55.458-0400 I SHARDING [conn1] retrying command: { listIndexes: "coll23" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.242-0400 m31200| 2015-07-09T13:56:55.458-0400 I NETWORK [conn40] end connection 127.0.0.1:62753 (78 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.243-0400 m31200| 2015-07-09T13:56:55.469-0400 I INDEX [conn30] build index on: db23.coll23 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db23.coll23" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.243-0400 m31200| 2015-07-09T13:56:55.469-0400 I INDEX [conn30] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.243-0400 m31100| 2015-07-09T13:56:55.469-0400 I INDEX [conn52] build index on: db23.coll23 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db23.coll23" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.243-0400 m31100| 2015-07-09T13:56:55.469-0400 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.243-0400 m31100| 2015-07-09T13:56:55.476-0400 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.243-0400 m31200| 2015-07-09T13:56:55.479-0400 I INDEX [conn30] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.243-0400 m31101| 2015-07-09T13:56:55.486-0400 I INDEX [repl writer worker 8] build index on: db23.coll23 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db23.coll23" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.244-0400 m31101| 2015-07-09T13:56:55.486-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.244-0400 m31102| 2015-07-09T13:56:55.488-0400 I INDEX [repl writer worker 11] build index on: db23.coll23 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db23.coll23" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.244-0400 m31102| 2015-07-09T13:56:55.488-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.244-0400 m31100| 2015-07-09T13:56:55.491-0400 I INDEX [conn52] build index on: db23.coll23 properties: { v: 1, key: { y: 1.0 }, name: "y_1", ns: "db23.coll23" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.244-0400 m31100| 2015-07-09T13:56:55.491-0400 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.244-0400 m31202| 2015-07-09T13:56:55.498-0400 I INDEX [repl writer worker 5] build index on: db23.coll23 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db23.coll23" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.245-0400 m31202| 2015-07-09T13:56:55.498-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.245-0400 m31200| 2015-07-09T13:56:55.498-0400 I INDEX [conn30] build index on: db23.coll23 properties: { v: 1, key: { y: 1.0 }, name: "y_1", ns: "db23.coll23" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.245-0400 m31200| 2015-07-09T13:56:55.498-0400 I INDEX [conn30] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.245-0400 m31201| 2015-07-09T13:56:55.500-0400 I INDEX [repl writer worker 1] build index on: db23.coll23 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db23.coll23" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.245-0400 m31201| 2015-07-09T13:56:55.500-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.245-0400 m31101| 2015-07-09T13:56:55.501-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.245-0400 m31102| 2015-07-09T13:56:55.502-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.245-0400 m31100| 2015-07-09T13:56:55.504-0400 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.246-0400 m31202| 2015-07-09T13:56:55.507-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.246-0400 m31200| 2015-07-09T13:56:55.513-0400 I INDEX [conn30] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.246-0400 m31201| 2015-07-09T13:56:55.514-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.246-0400 m31101| 2015-07-09T13:56:55.514-0400 I INDEX [repl writer worker 2] build index on: db23.coll23 properties: { v: 1, key: { y: 1.0 }, name: "y_1", ns: "db23.coll23" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.246-0400 m31101| 2015-07-09T13:56:55.514-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.246-0400 m31102| 2015-07-09T13:56:55.514-0400 I INDEX [repl writer worker 1] build index on: db23.coll23 properties: { v: 1, key: { y: 1.0 }, name: "y_1", ns: "db23.coll23" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.246-0400 m31102| 2015-07-09T13:56:55.514-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.247-0400 m31101| 2015-07-09T13:56:55.525-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.247-0400 m31202| 2015-07-09T13:56:55.529-0400 I INDEX [repl writer worker 8] build index on: db23.coll23 properties: { v: 1, key: { y: 1.0 }, name: "y_1", ns: "db23.coll23" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.247-0400 m31202| 2015-07-09T13:56:55.529-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.247-0400 m31200| 2015-07-09T13:56:55.530-0400 I INDEX [conn30] build index on: db23.coll23 properties: { v: 1, key: { z: 1.0 }, name: "z_1", ns: "db23.coll23" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.247-0400 m31200| 2015-07-09T13:56:55.530-0400 I INDEX [conn30] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.247-0400 m31100| 2015-07-09T13:56:55.531-0400 I INDEX [conn52] build index on: db23.coll23 properties: { v: 1, key: { z: 1.0 }, name: "z_1", ns: "db23.coll23" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.247-0400 m31201| 2015-07-09T13:56:55.531-0400 I INDEX [repl writer worker 15] build index on: db23.coll23 properties: { v: 1, key: { y: 1.0 }, name: "y_1", ns: "db23.coll23" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.247-0400 m31100| 2015-07-09T13:56:55.531-0400 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.248-0400 m31201| 2015-07-09T13:56:55.531-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.248-0400 m31102| 2015-07-09T13:56:55.536-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.248-0400 m31201| 2015-07-09T13:56:55.540-0400 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.248-0400 m31200| 2015-07-09T13:56:55.541-0400 I INDEX [conn30] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.248-0400 m31202| 2015-07-09T13:56:55.541-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.248-0400 m31100| 2015-07-09T13:56:55.547-0400 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.248-0400 m31201| 2015-07-09T13:56:55.548-0400 I INDEX [repl writer worker 3] build index on: db23.coll23 properties: { v: 1, key: { z: 1.0 }, name: "z_1", ns: "db23.coll23" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.249-0400 m31201| 2015-07-09T13:56:55.548-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.249-0400 m31202| 2015-07-09T13:56:55.548-0400 I INDEX [repl writer worker 3] build index on: db23.coll23 properties: { v: 1, key: { z: 1.0 }, name: "z_1", ns: "db23.coll23" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.249-0400 m31202| 2015-07-09T13:56:55.548-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.249-0400 m31202| 2015-07-09T13:56:55.554-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.249-0400 m31200| 2015-07-09T13:56:55.566-0400 I INDEX [conn30] build index on: db23.coll23 properties: { v: 1, key: { x: 1.0, y: 1.0, z: 1.0 }, name: "x_1_y_1_z_1", ns: "db23.coll23" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.249-0400 m31200| 2015-07-09T13:56:55.566-0400 I INDEX [conn30] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.249-0400 m31100| 2015-07-09T13:56:55.566-0400 I INDEX [conn52] build index on: db23.coll23 properties: { v: 1, key: { x: 1.0, y: 1.0, z: 1.0 }, name: "x_1_y_1_z_1", ns: "db23.coll23" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.250-0400 m31100| 2015-07-09T13:56:55.566-0400 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.250-0400 m31201| 2015-07-09T13:56:55.567-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.250-0400 m31101| 2015-07-09T13:56:55.567-0400 I INDEX [repl writer worker 6] build index on: db23.coll23 properties: { v: 1, key: { z: 1.0 }, name: "z_1", ns: "db23.coll23" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.250-0400 m31101| 2015-07-09T13:56:55.567-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.250-0400 m31102| 2015-07-09T13:56:55.575-0400 I INDEX [repl writer worker 0] build index on: db23.coll23 properties: { v: 1, key: { z: 1.0 }, name: "z_1", ns: "db23.coll23" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.250-0400 m31102| 2015-07-09T13:56:55.575-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.250-0400 m31100| 2015-07-09T13:56:55.577-0400 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.251-0400 m31101| 2015-07-09T13:56:55.583-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.251-0400 m31102| 2015-07-09T13:56:55.589-0400 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.251-0400 m31200| 2015-07-09T13:56:55.590-0400 I INDEX [conn30] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.251-0400 m31101| 2015-07-09T13:56:55.595-0400 I INDEX [repl writer worker 1] build index on: db23.coll23 properties: { v: 1, key: { x: 1.0, y: 1.0, z: 1.0 }, name: "x_1_y_1_z_1", ns: "db23.coll23" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.251-0400 m31101| 2015-07-09T13:56:55.595-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.251-0400 m31102| 2015-07-09T13:56:55.597-0400 I INDEX [repl writer worker 3] build index on: db23.coll23 properties: { v: 1, key: { x: 1.0, y: 1.0, z: 1.0 }, name: "x_1_y_1_z_1", ns: "db23.coll23" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.251-0400 m31102| 2015-07-09T13:56:55.597-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.252-0400 m31200| 2015-07-09T13:56:55.603-0400 I COMMAND [conn64] CMD: dropIndexes db23.coll23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.252-0400 m31100| 2015-07-09T13:56:55.602-0400 I COMMAND [conn38] CMD: dropIndexes db23.coll23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.252-0400 m31202| 2015-07-09T13:56:55.605-0400 I INDEX [repl writer worker 2] build index on: db23.coll23 properties: { v: 1, key: { x: 1.0, y: 1.0, z: 1.0 }, name: "x_1_y_1_z_1", ns: "db23.coll23" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.252-0400 m31202| 2015-07-09T13:56:55.605-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.252-0400 m31100| 2015-07-09T13:56:55.607-0400 I COMMAND [conn38] CMD: dropIndexes db23.coll23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.252-0400 m31200| 2015-07-09T13:56:55.607-0400 I COMMAND [conn64] CMD: dropIndexes db23.coll23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.252-0400 m31201| 2015-07-09T13:56:55.610-0400 I INDEX [repl writer worker 0] build index on: db23.coll23 properties: { v: 1, key: { x: 1.0, y: 1.0, z: 1.0 }, name: "x_1_y_1_z_1", ns: "db23.coll23" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.252-0400 m31201| 2015-07-09T13:56:55.610-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.253-0400 m31102| 2015-07-09T13:56:55.612-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.253-0400 m31200| 2015-07-09T13:56:55.613-0400 I COMMAND [conn64] CMD: dropIndexes db23.coll23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.253-0400 m31100| 2015-07-09T13:56:55.613-0400 I COMMAND [conn38] CMD: dropIndexes db23.coll23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.253-0400 m31102| 2015-07-09T13:56:55.616-0400 I COMMAND [repl writer worker 13] CMD: dropIndexes db23.coll23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.253-0400 m31101| 2015-07-09T13:56:55.617-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.253-0400 m31202| 2015-07-09T13:56:55.618-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.253-0400 m31101| 2015-07-09T13:56:55.618-0400 I COMMAND [repl writer worker 10] CMD: dropIndexes db23.coll23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.254-0400 m31201| 2015-07-09T13:56:55.618-0400 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.254-0400 m31100| 2015-07-09T13:56:55.621-0400 I COMMAND [conn38] CMD: dropIndexes db23.coll23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.254-0400 m31200| 2015-07-09T13:56:55.621-0400 I COMMAND [conn64] CMD: dropIndexes db23.coll23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.254-0400 m31102| 2015-07-09T13:56:55.621-0400 I COMMAND [repl writer worker 5] CMD: dropIndexes db23.coll23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.254-0400 m31101| 2015-07-09T13:56:55.621-0400 I COMMAND [repl writer worker 3] CMD: dropIndexes db23.coll23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.254-0400 m31102| 2015-07-09T13:56:55.624-0400 I COMMAND [repl writer worker 6] CMD: dropIndexes db23.coll23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.254-0400 m31202| 2015-07-09T13:56:55.624-0400 I COMMAND [repl writer worker 15] CMD: dropIndexes db23.coll23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.254-0400 m31201| 2015-07-09T13:56:55.625-0400 I COMMAND [repl writer worker 8] CMD: dropIndexes db23.coll23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.255-0400 m31202| 2015-07-09T13:56:55.626-0400 I COMMAND [repl writer worker 6] CMD: dropIndexes db23.coll23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.255-0400 Using 10 threads (requested 10) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.255-0400 m31202| 2015-07-09T13:56:55.632-0400 I COMMAND [repl writer worker 14] CMD: dropIndexes db23.coll23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.255-0400 m31101| 2015-07-09T13:56:55.633-0400 I COMMAND [repl writer worker 5] CMD: dropIndexes db23.coll23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.255-0400 m31202| 2015-07-09T13:56:55.645-0400 I COMMAND [repl writer worker 9] CMD: dropIndexes db23.coll23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.255-0400 m31101| 2015-07-09T13:56:55.657-0400 I COMMAND [repl writer worker 14] CMD: dropIndexes db23.coll23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.255-0400 m31201| 2015-07-09T13:56:55.671-0400 I COMMAND [repl writer worker 12] CMD: dropIndexes db23.coll23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.255-0400 m31201| 2015-07-09T13:56:55.682-0400 I COMMAND [repl writer worker 4] CMD: dropIndexes db23.coll23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.255-0400 m31201| 2015-07-09T13:56:55.689-0400 I COMMAND [repl writer worker 10] CMD: dropIndexes db23.coll23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.256-0400 m30999| 2015-07-09T13:56:55.743-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63051 #147 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.256-0400 m30998| 2015-07-09T13:56:55.750-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63052 #147 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.256-0400 m30999| 2015-07-09T13:56:55.750-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63053 #148 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.256-0400 m30998| 2015-07-09T13:56:55.757-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63054 #148 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.256-0400 m30999| 2015-07-09T13:56:55.761-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63055 #149 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.256-0400 m30998| 2015-07-09T13:56:55.761-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63056 #149 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.256-0400 m30999| 2015-07-09T13:56:55.765-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63057 #150 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.257-0400 m30998| 2015-07-09T13:56:55.765-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63058 #150 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.257-0400 m30999| 2015-07-09T13:56:55.766-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63060 #151 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.257-0400 m30998| 2015-07-09T13:56:55.772-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63059 #151 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.257-0400 setting random seed: 4411942916922 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.257-0400 setting random seed: 4956862865947 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.257-0400 setting random seed: 5025624074041 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.257-0400 setting random seed: 4611742096021 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.257-0400 setting random seed: 5663851751014 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.257-0400 setting random seed: 9994226763956 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.257-0400 setting random seed: 1524639241397 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.258-0400 m30998| 2015-07-09T13:56:55.790-0400 I SHARDING [conn147] ChunkManager: time to load chunks for db23.coll23: 0ms sequenceNumber: 27 version: 2|5||559eb5e6ca4787b9985d1c75 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.258-0400 setting random seed: 2969300029799 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.258-0400 setting random seed: 9549443926662 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.258-0400 setting random seed: 2356423628516 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.258-0400 m30999| 2015-07-09T13:56:55.883-0400 I NETWORK [conn149] end connection 127.0.0.1:63055 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.258-0400 m30998| 2015-07-09T13:56:55.887-0400 I NETWORK [conn150] end connection 127.0.0.1:63058 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.258-0400 m30999| 2015-07-09T13:56:55.909-0400 I NETWORK [conn150] end connection 127.0.0.1:63057 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.258-0400 m30998| 2015-07-09T13:56:55.915-0400 I NETWORK [conn147] end connection 127.0.0.1:63052 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.258-0400 m30998| 2015-07-09T13:56:55.939-0400 I NETWORK [conn151] end connection 127.0.0.1:63059 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.259-0400 m30999| 2015-07-09T13:56:55.941-0400 I NETWORK [conn148] end connection 127.0.0.1:63053 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.259-0400 m30999| 2015-07-09T13:56:55.953-0400 I NETWORK [conn151] end connection 127.0.0.1:63060 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.259-0400 m30999| 2015-07-09T13:56:55.965-0400 I NETWORK [conn147] end connection 127.0.0.1:63051 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.259-0400 m30998| 2015-07-09T13:56:55.974-0400 I NETWORK [conn149] end connection 127.0.0.1:63056 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.259-0400 m30998| 2015-07-09T13:56:55.981-0400 I NETWORK [conn148] end connection 127.0.0.1:63054 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.259-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.259-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.259-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.259-0400 jstests/concurrency/fsm_workloads/update_multifield_multiupdate_noindex.js: Workload completed in 374 ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.259-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.260-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.260-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.260-0400 m30999| 2015-07-09T13:56:56.001-0400 I COMMAND [conn1] DROP: db23.coll23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.260-0400 m30999| 2015-07-09T13:56:56.001-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:56.001-0400-559eb5e8ca4787b9985d1c77", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464616001), what: "dropCollection.start", ns: "db23.coll23", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.260-0400 m30999| 2015-07-09T13:56:56.057-0400 I SHARDING [conn1] distributed lock 'db23.coll23/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5e8ca4787b9985d1c78 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.260-0400 m31100| 2015-07-09T13:56:56.057-0400 I COMMAND [conn38] CMD: drop db23.coll23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.260-0400 m31102| 2015-07-09T13:56:56.059-0400 I COMMAND [repl writer worker 8] CMD: dropIndexes db23.coll23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.260-0400 m31200| 2015-07-09T13:56:56.060-0400 I COMMAND [conn64] CMD: drop db23.coll23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.261-0400 m31101| 2015-07-09T13:56:56.062-0400 I COMMAND [repl writer worker 7] CMD: drop db23.coll23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.261-0400 m31102| 2015-07-09T13:56:56.062-0400 I COMMAND [repl writer worker 4] CMD: drop db23.coll23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.261-0400 m31202| 2015-07-09T13:56:56.063-0400 I COMMAND [repl writer worker 12] CMD: drop db23.coll23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.261-0400 m31201| 2015-07-09T13:56:56.064-0400 I COMMAND [repl writer worker 1] CMD: drop db23.coll23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.261-0400 m31100| 2015-07-09T13:56:56.115-0400 I SHARDING [conn38] remotely refreshing metadata for db23.coll23 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559eb5e6ca4787b9985d1c75, current metadata version is 2|3||559eb5e6ca4787b9985d1c75 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.261-0400 m31100| 2015-07-09T13:56:56.116-0400 W SHARDING [conn38] no chunks found when reloading db23.coll23, previous version was 0|0||559eb5e6ca4787b9985d1c75, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.261-0400 m31100| 2015-07-09T13:56:56.116-0400 I SHARDING [conn38] dropping metadata for db23.coll23 at shard version 2|3||559eb5e6ca4787b9985d1c75, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.262-0400 m31200| 2015-07-09T13:56:56.117-0400 I SHARDING [conn64] remotely refreshing metadata for db23.coll23 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559eb5e6ca4787b9985d1c75, current metadata version is 2|5||559eb5e6ca4787b9985d1c75 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.262-0400 m31200| 2015-07-09T13:56:56.119-0400 W SHARDING [conn64] no chunks found when reloading db23.coll23, previous version was 0|0||559eb5e6ca4787b9985d1c75, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.262-0400 m31200| 2015-07-09T13:56:56.119-0400 I SHARDING [conn64] dropping metadata for db23.coll23 at shard version 2|5||559eb5e6ca4787b9985d1c75, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.262-0400 m30999| 2015-07-09T13:56:56.120-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:56.120-0400-559eb5e8ca4787b9985d1c79", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464616120), what: "dropCollection", ns: "db23.coll23", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.262-0400 m30999| 2015-07-09T13:56:56.177-0400 I SHARDING [conn1] distributed lock 'db23.coll23/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.262-0400 m30999| 2015-07-09T13:56:56.186-0400 I COMMAND [conn1] DROP DATABASE: db23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.263-0400 m30999| 2015-07-09T13:56:56.186-0400 I SHARDING [conn1] DBConfig::dropDatabase: db23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.263-0400 m30999| 2015-07-09T13:56:56.186-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:56.186-0400-559eb5e8ca4787b9985d1c7a", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464616186), what: "dropDatabase.start", ns: "db23", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.296-0400 m30999| 2015-07-09T13:56:56.295-0400 I SHARDING [conn1] DBConfig::dropDatabase: db23 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.296-0400 m31200| 2015-07-09T13:56:56.296-0400 I COMMAND [conn66] dropDatabase db23 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.297-0400 m31200| 2015-07-09T13:56:56.296-0400 I COMMAND [conn66] dropDatabase db23 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.297-0400 m30999| 2015-07-09T13:56:56.296-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:56.296-0400-559eb5e8ca4787b9985d1c7b", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464616296), what: "dropDatabase", ns: "db23", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.297-0400 m31202| 2015-07-09T13:56:56.297-0400 I COMMAND [repl writer worker 10] dropDatabase db23 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.297-0400 m31202| 2015-07-09T13:56:56.297-0400 I COMMAND [repl writer worker 10] dropDatabase db23 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.298-0400 m31201| 2015-07-09T13:56:56.297-0400 I COMMAND [repl writer worker 7] dropDatabase db23 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.298-0400 m31201| 2015-07-09T13:56:56.297-0400 I COMMAND [repl writer worker 7] dropDatabase db23 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.387-0400 m31100| 2015-07-09T13:56:56.387-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.391-0400 m31102| 2015-07-09T13:56:56.390-0400 I COMMAND [repl writer worker 12] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.391-0400 m31101| 2015-07-09T13:56:56.391-0400 I COMMAND [repl writer worker 11] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.423-0400 m31200| 2015-07-09T13:56:56.422-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.425-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.425-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.425-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.425-0400 jstests/concurrency/fsm_workloads/agg_sort.js [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.426-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.426-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.426-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.426-0400 m31202| 2015-07-09T13:56:56.426-0400 I COMMAND [repl writer worker 4] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.426-0400 m31201| 2015-07-09T13:56:56.426-0400 I COMMAND [repl writer worker 0] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.430-0400 m30999| 2015-07-09T13:56:56.430-0400 I SHARDING [conn1] distributed lock 'db24/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5e8ca4787b9985d1c7c [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.433-0400 m30999| 2015-07-09T13:56:56.433-0400 I SHARDING [conn1] Placing [db24] on: test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.434-0400 m30999| 2015-07-09T13:56:56.433-0400 I SHARDING [conn1] Enabling sharding for database [db24] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.487-0400 m30999| 2015-07-09T13:56:56.487-0400 I SHARDING [conn1] distributed lock 'db24/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.508-0400 m31200| 2015-07-09T13:56:56.507-0400 I INDEX [conn59] build index on: db24.coll24 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db24.coll24" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.508-0400 m31200| 2015-07-09T13:56:56.507-0400 I INDEX [conn59] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.520-0400 m31200| 2015-07-09T13:56:56.520-0400 I INDEX [conn59] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.523-0400 m30999| 2015-07-09T13:56:56.522-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db24.coll24", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.526-0400 m30999| 2015-07-09T13:56:56.525-0400 I SHARDING [conn1] distributed lock 'db24.coll24/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5e8ca4787b9985d1c7d [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.527-0400 m30999| 2015-07-09T13:56:56.527-0400 I SHARDING [conn1] enable sharding on: db24.coll24 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.528-0400 m30999| 2015-07-09T13:56:56.527-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:56.527-0400-559eb5e8ca4787b9985d1c7e", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464616527), what: "shardCollection.start", ns: "db24.coll24", details: { shardKey: { _id: "hashed" }, collection: "db24.coll24", primary: "test-rs1:test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.531-0400 m31201| 2015-07-09T13:56:56.531-0400 I INDEX [repl writer worker 14] build index on: db24.coll24 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db24.coll24" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.532-0400 m31201| 2015-07-09T13:56:56.531-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.533-0400 m31202| 2015-07-09T13:56:56.533-0400 I INDEX [repl writer worker 7] build index on: db24.coll24 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db24.coll24" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.534-0400 m31202| 2015-07-09T13:56:56.533-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.539-0400 m31201| 2015-07-09T13:56:56.539-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.542-0400 m31202| 2015-07-09T13:56:56.542-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.580-0400 m30999| 2015-07-09T13:56:56.579-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db24.coll24 using new epoch 559eb5e8ca4787b9985d1c7f [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.687-0400 m30999| 2015-07-09T13:56:56.686-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db24.coll24: 0ms sequenceNumber: 110 version: 1|1||559eb5e8ca4787b9985d1c7f based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.745-0400 m30999| 2015-07-09T13:56:56.745-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db24.coll24: 0ms sequenceNumber: 111 version: 1|1||559eb5e8ca4787b9985d1c7f based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.747-0400 m31200| 2015-07-09T13:56:56.746-0400 I SHARDING [conn83] remotely refreshing metadata for db24.coll24 with requested shard version 1|1||559eb5e8ca4787b9985d1c7f, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.748-0400 m31200| 2015-07-09T13:56:56.748-0400 I SHARDING [conn83] collection db24.coll24 was previously unsharded, new metadata loaded with shard version 1|1||559eb5e8ca4787b9985d1c7f [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.748-0400 m31200| 2015-07-09T13:56:56.748-0400 I SHARDING [conn83] collection version was loaded at version 1|1||559eb5e8ca4787b9985d1c7f, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.749-0400 m30999| 2015-07-09T13:56:56.748-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:56.748-0400-559eb5e8ca4787b9985d1c80", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464616748), what: "shardCollection", ns: "db24.coll24", details: { version: "1|1||559eb5e8ca4787b9985d1c7f" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.804-0400 m30999| 2015-07-09T13:56:56.804-0400 I SHARDING [conn1] distributed lock 'db24.coll24/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.805-0400 m30999| 2015-07-09T13:56:56.805-0400 I SHARDING [conn1] moving chunk ns: db24.coll24 moving ( ns: db24.coll24, shard: test-rs1, lastmod: 1|0||000000000000000000000000, min: { _id: MinKey }, max: { _id: 0 }) test-rs1 -> test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.806-0400 m31200| 2015-07-09T13:56:56.805-0400 I SHARDING [conn64] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.807-0400 m31200| 2015-07-09T13:56:56.806-0400 I SHARDING [conn64] received moveChunk request: { moveChunk: "db24.coll24", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", to: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", fromShard: "test-rs1", toShard: "test-rs0", min: { _id: MinKey }, max: { _id: 0 }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb5e8ca4787b9985d1c7f') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.810-0400 m31200| 2015-07-09T13:56:56.809-0400 I SHARDING [conn64] distributed lock 'db24.coll24/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb5e8d5a107a5b9c0db16 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.810-0400 m31200| 2015-07-09T13:56:56.809-0400 I SHARDING [conn64] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:56.809-0400-559eb5e8d5a107a5b9c0db17", server: "bs-osx108-8", clientAddr: "127.0.0.1:62863", time: new Date(1436464616809), what: "moveChunk.start", ns: "db24.coll24", details: { min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs1", to: "test-rs0" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.863-0400 m31200| 2015-07-09T13:56:56.863-0400 I SHARDING [conn64] remotely refreshing metadata for db24.coll24 based on current shard version 1|1||559eb5e8ca4787b9985d1c7f, current metadata version is 1|1||559eb5e8ca4787b9985d1c7f [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.865-0400 m31200| 2015-07-09T13:56:56.864-0400 I SHARDING [conn64] metadata of collection db24.coll24 already up to date (shard version : 1|1||559eb5e8ca4787b9985d1c7f, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.865-0400 m31200| 2015-07-09T13:56:56.865-0400 I SHARDING [conn64] moveChunk request accepted at version 1|1||559eb5e8ca4787b9985d1c7f [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.866-0400 m31200| 2015-07-09T13:56:56.866-0400 I SHARDING [conn64] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.867-0400 m31100| 2015-07-09T13:56:56.866-0400 I SHARDING [conn19] remotely refreshing metadata for db24.coll24, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.868-0400 m31100| 2015-07-09T13:56:56.867-0400 I SHARDING [conn19] collection db24.coll24 was previously unsharded, new metadata loaded with shard version 0|0||559eb5e8ca4787b9985d1c7f [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.868-0400 m31100| 2015-07-09T13:56:56.868-0400 I SHARDING [conn19] collection version was loaded at version 1|1||559eb5e8ca4787b9985d1c7f, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.868-0400 m31100| 2015-07-09T13:56:56.868-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: MinKey } -> { _id: 0 } for collection db24.coll24 from test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202 at epoch 559eb5e8ca4787b9985d1c7f [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.870-0400 m31200| 2015-07-09T13:56:56.870-0400 I SHARDING [conn64] moveChunk data transfer progress: { active: true, ns: "db24.coll24", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.873-0400 m31200| 2015-07-09T13:56:56.873-0400 I SHARDING [conn64] moveChunk data transfer progress: { active: true, ns: "db24.coll24", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.879-0400 m31200| 2015-07-09T13:56:56.878-0400 I SHARDING [conn64] moveChunk data transfer progress: { active: true, ns: "db24.coll24", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.881-0400 m31100| 2015-07-09T13:56:56.880-0400 I INDEX [migrateThread] build index on: db24.coll24 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db24.coll24" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.881-0400 m31100| 2015-07-09T13:56:56.880-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.887-0400 m31200| 2015-07-09T13:56:56.887-0400 I SHARDING [conn64] moveChunk data transfer progress: { active: true, ns: "db24.coll24", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.894-0400 m31100| 2015-07-09T13:56:56.893-0400 I INDEX [migrateThread] build index on: db24.coll24 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db24.coll24" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.894-0400 m31100| 2015-07-09T13:56:56.893-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.903-0400 m31100| 2015-07-09T13:56:56.902-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.903-0400 m31100| 2015-07-09T13:56:56.903-0400 I SHARDING [migrateThread] Deleter starting delete for: db24.coll24 from { _id: MinKey } -> { _id: 0 }, with opId: 27552 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.904-0400 m31100| 2015-07-09T13:56:56.903-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db24.coll24 from { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.904-0400 m31200| 2015-07-09T13:56:56.904-0400 I SHARDING [conn64] moveChunk data transfer progress: { active: true, ns: "db24.coll24", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.912-0400 m31102| 2015-07-09T13:56:56.912-0400 I INDEX [repl writer worker 2] build index on: db24.coll24 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db24.coll24" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.912-0400 m31102| 2015-07-09T13:56:56.912-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.913-0400 m31101| 2015-07-09T13:56:56.912-0400 I INDEX [repl writer worker 12] build index on: db24.coll24 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db24.coll24" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.913-0400 m31101| 2015-07-09T13:56:56.912-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.918-0400 m31102| 2015-07-09T13:56:56.917-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.920-0400 m31100| 2015-07-09T13:56:56.920-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.920-0400 m31101| 2015-07-09T13:56:56.920-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.920-0400 m31100| 2015-07-09T13:56:56.920-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db24.coll24' { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.938-0400 m31200| 2015-07-09T13:56:56.938-0400 I SHARDING [conn64] moveChunk data transfer progress: { active: true, ns: "db24.coll24", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.938-0400 m31200| 2015-07-09T13:56:56.938-0400 I SHARDING [conn64] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.939-0400 m31200| 2015-07-09T13:56:56.939-0400 I SHARDING [conn64] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.939-0400 m31200| 2015-07-09T13:56:56.939-0400 I SHARDING [conn64] moveChunk setting version to: 2|0||559eb5e8ca4787b9985d1c7f [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.943-0400 m31100| 2015-07-09T13:56:56.942-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db24.coll24' { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.943-0400 m31100| 2015-07-09T13:56:56.943-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:56.943-0400-559eb5e8792e00bb67274939", server: "bs-osx108-8", clientAddr: "", time: new Date(1436464616943), what: "moveChunk.to", ns: "db24.coll24", details: { min: { _id: MinKey }, max: { _id: 0 }, step 1 of 5: 35, step 2 of 5: 15, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 23, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.998-0400 m31200| 2015-07-09T13:56:56.997-0400 I SHARDING [conn64] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db24.coll24", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", min: { _id: MinKey }, max: { _id: 0 }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.998-0400 m31200| 2015-07-09T13:56:56.997-0400 I SHARDING [conn64] moveChunk updating self version to: 2|1||559eb5e8ca4787b9985d1c7f through { _id: 0 } -> { _id: MaxKey } for collection 'db24.coll24' [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:56.999-0400 m31200| 2015-07-09T13:56:56.999-0400 I SHARDING [conn64] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:56.999-0400-559eb5e8d5a107a5b9c0db18", server: "bs-osx108-8", clientAddr: "127.0.0.1:62863", time: new Date(1436464616999), what: "moveChunk.commit", ns: "db24.coll24", details: { min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs1", to: "test-rs0", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:57.052-0400 m31200| 2015-07-09T13:56:57.051-0400 I SHARDING [conn64] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:57.052-0400 m31200| 2015-07-09T13:56:57.051-0400 I SHARDING [conn64] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:57.052-0400 m31200| 2015-07-09T13:56:57.051-0400 I SHARDING [conn64] Deleter starting delete for: db24.coll24 from { _id: MinKey } -> { _id: 0 }, with opId: 32022 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:57.053-0400 m31200| 2015-07-09T13:56:57.052-0400 I SHARDING [conn64] rangeDeleter deleted 0 documents for db24.coll24 from { _id: MinKey } -> { _id: 0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:57.053-0400 m31200| 2015-07-09T13:56:57.052-0400 I SHARDING [conn64] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:57.053-0400 m31200| 2015-07-09T13:56:57.052-0400 I SHARDING [conn64] distributed lock 'db24.coll24/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:57.054-0400 m31200| 2015-07-09T13:56:57.052-0400 I SHARDING [conn64] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:57.052-0400-559eb5e9d5a107a5b9c0db19", server: "bs-osx108-8", clientAddr: "127.0.0.1:62863", time: new Date(1436464617052), what: "moveChunk.from", ns: "db24.coll24", details: { min: { _id: MinKey }, max: { _id: 0 }, step 1 of 6: 0, step 2 of 6: 58, step 3 of 6: 3, step 4 of 6: 69, step 5 of 6: 113, step 6 of 6: 0, to: "test-rs0", from: "test-rs1", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:57.106-0400 m31200| 2015-07-09T13:56:57.105-0400 I COMMAND [conn64] command db24.coll24 command: moveChunk { moveChunk: "db24.coll24", from: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", to: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", fromShard: "test-rs1", toShard: "test-rs0", min: { _id: MinKey }, max: { _id: 0 }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb5e8ca4787b9985d1c7f') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 299ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:57.108-0400 m30999| 2015-07-09T13:56:57.107-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db24.coll24: 0ms sequenceNumber: 112 version: 2|1||559eb5e8ca4787b9985d1c7f based on: 1|1||559eb5e8ca4787b9985d1c7f [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:57.109-0400 m31100| 2015-07-09T13:56:57.109-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db24.coll24", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e8ca4787b9985d1c7f') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:57.113-0400 m31100| 2015-07-09T13:56:57.113-0400 I SHARDING [conn38] distributed lock 'db24.coll24/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb5e9792e00bb6727493a [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:57.113-0400 m31100| 2015-07-09T13:56:57.113-0400 I SHARDING [conn38] remotely refreshing metadata for db24.coll24 based on current shard version 0|0||559eb5e8ca4787b9985d1c7f, current metadata version is 1|1||559eb5e8ca4787b9985d1c7f [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:57.114-0400 m31100| 2015-07-09T13:56:57.114-0400 I SHARDING [conn38] updating metadata for db24.coll24 from shard version 0|0||559eb5e8ca4787b9985d1c7f to shard version 2|0||559eb5e8ca4787b9985d1c7f [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:57.115-0400 m31100| 2015-07-09T13:56:57.114-0400 I SHARDING [conn38] collection version was loaded at version 2|1||559eb5e8ca4787b9985d1c7f, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:57.115-0400 m31100| 2015-07-09T13:56:57.114-0400 I SHARDING [conn38] splitChunk accepted at version 2|0||559eb5e8ca4787b9985d1c7f [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:57.117-0400 m31100| 2015-07-09T13:56:57.116-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:57.116-0400-559eb5e9792e00bb6727493b", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436464617116), what: "split", ns: "db24.coll24", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559eb5e8ca4787b9985d1c7f') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559eb5e8ca4787b9985d1c7f') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:57.170-0400 m31100| 2015-07-09T13:56:57.169-0400 I SHARDING [conn38] distributed lock 'db24.coll24/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:57.172-0400 m30999| 2015-07-09T13:56:57.171-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db24.coll24: 0ms sequenceNumber: 113 version: 2|3||559eb5e8ca4787b9985d1c7f based on: 2|1||559eb5e8ca4787b9985d1c7f [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:57.172-0400 m31200| 2015-07-09T13:56:57.172-0400 I SHARDING [conn64] received splitChunk request: { splitChunk: "db24.coll24", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb5e8ca4787b9985d1c7f') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:57.175-0400 m31200| 2015-07-09T13:56:57.175-0400 I SHARDING [conn64] distributed lock 'db24.coll24/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb5e9d5a107a5b9c0db1a [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:57.176-0400 m31200| 2015-07-09T13:56:57.175-0400 I SHARDING [conn64] remotely refreshing metadata for db24.coll24 based on current shard version 2|0||559eb5e8ca4787b9985d1c7f, current metadata version is 2|0||559eb5e8ca4787b9985d1c7f [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:57.177-0400 m31200| 2015-07-09T13:56:57.176-0400 I SHARDING [conn64] updating metadata for db24.coll24 from shard version 2|0||559eb5e8ca4787b9985d1c7f to shard version 2|1||559eb5e8ca4787b9985d1c7f [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:57.177-0400 m31200| 2015-07-09T13:56:57.176-0400 I SHARDING [conn64] collection version was loaded at version 2|3||559eb5e8ca4787b9985d1c7f, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:57.177-0400 m31200| 2015-07-09T13:56:57.176-0400 I SHARDING [conn64] splitChunk accepted at version 2|1||559eb5e8ca4787b9985d1c7f [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:57.178-0400 m31200| 2015-07-09T13:56:57.178-0400 I SHARDING [conn64] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:56:57.177-0400-559eb5e9d5a107a5b9c0db1b", server: "bs-osx108-8", clientAddr: "127.0.0.1:62863", time: new Date(1436464617177), what: "split", ns: "db24.coll24", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559eb5e8ca4787b9985d1c7f') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559eb5e8ca4787b9985d1c7f') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:57.232-0400 m31200| 2015-07-09T13:56:57.232-0400 I SHARDING [conn64] distributed lock 'db24.coll24/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:57.234-0400 m30999| 2015-07-09T13:56:57.234-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db24.coll24: 0ms sequenceNumber: 114 version: 2|5||559eb5e8ca4787b9985d1c7f based on: 2|3||559eb5e8ca4787b9985d1c7f [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:57.814-0400 m31100| 2015-07-09T13:56:57.813-0400 I COMMAND [conn68] command db24.$cmd command: insert { insert: "coll24", documents: 500, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 2000|3, ObjectId('559eb5e8ca4787b9985d1c7f') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 518, w: 518 } }, Database: { acquireCount: { w: 518 } }, Collection: { acquireCount: { w: 18 } }, Metadata: { acquireCount: { w: 500 } }, oplog: { acquireCount: { w: 500 } } } protocol:op_command 381ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:57.889-0400 m31200| 2015-07-09T13:56:57.888-0400 I COMMAND [conn59] command db24.$cmd command: insert { insert: "coll24", documents: 500, ordered: false, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 2000|5, ObjectId('559eb5e8ca4787b9985d1c7f') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 519, w: 519 } }, Database: { acquireCount: { w: 519 } }, Collection: { acquireCount: { w: 19 } }, Metadata: { acquireCount: { w: 500 } }, oplog: { acquireCount: { w: 500 } } } protocol:op_command 436ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:58.065-0400 Using 5 threads (requested 5) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:58.115-0400 m30998| 2015-07-09T13:56:58.115-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63063 #152 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:58.115-0400 m30999| 2015-07-09T13:56:58.115-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63061 #152 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:58.120-0400 m30999| 2015-07-09T13:56:58.115-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63062 #153 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:58.131-0400 m30998| 2015-07-09T13:56:58.131-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63064 #153 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:58.139-0400 m30999| 2015-07-09T13:56:58.138-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63065 #154 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:58.146-0400 setting random seed: 136424275115 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:58.146-0400 setting random seed: 2882792926393 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:58.146-0400 setting random seed: 1100736455991 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:58.147-0400 setting random seed: 77624469995 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:58.147-0400 setting random seed: 4410670339129 [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:58.149-0400 m30998| 2015-07-09T13:56:58.149-0400 I SHARDING [conn152] ChunkManager: time to load chunks for db24.coll24: 0ms sequenceNumber: 28 version: 2|5||559eb5e8ca4787b9985d1c7f based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:58.166-0400 m31100| 2015-07-09T13:56:58.165-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63066 #81 (75 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:58.168-0400 m31200| 2015-07-09T13:56:58.167-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63067 #86 (79 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:58.195-0400 m31100| 2015-07-09T13:56:58.194-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63068 #82 (76 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:58.201-0400 m31200| 2015-07-09T13:56:58.201-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63069 #87 (80 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:58.248-0400 m31100| 2015-07-09T13:56:58.248-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63070 #83 (77 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:58.249-0400 m31100| 2015-07-09T13:56:58.249-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63071 #84 (78 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:58.250-0400 m31100| 2015-07-09T13:56:58.249-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63072 #85 (79 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:58.261-0400 m31200| 2015-07-09T13:56:58.260-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63073 #88 (81 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:58.261-0400 m31200| 2015-07-09T13:56:58.261-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63074 #89 (82 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:58.274-0400 m31200| 2015-07-09T13:56:58.271-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63075 #90 (83 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:59.048-0400 m31200| 2015-07-09T13:56:59.047-0400 I WRITE [conn30] insert db24.tmp.agg_out.2 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 511, w: 503 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 1 }, acquireWaitCount: { R: 1, W: 1 }, timeAcquiringMicros: { R: 18042, W: 49754 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 721ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:59.069-0400 m31200| 2015-07-09T13:56:59.068-0400 I WRITE [conn83] insert db24.tmp.agg_out.1 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 511, w: 503 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 1 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 721ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:59.070-0400 m31200| 2015-07-09T13:56:59.069-0400 I WRITE [conn80] insert db24.tmp.agg_out.3 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 511, w: 503 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 1 }, acquireWaitCount: { r: 1, R: 1, W: 1 }, timeAcquiringMicros: { r: 13356, R: 16802, W: 29971 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 713ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:59.078-0400 m31200| 2015-07-09T13:56:59.078-0400 I WRITE [conn41] insert db24.tmp.agg_out.4 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 511, w: 503 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 1 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 735ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:59.094-0400 m31200| 2015-07-09T13:56:59.094-0400 I WRITE [conn52] insert db24.tmp.agg_out.5 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 511, w: 503 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 1 }, acquireWaitCount: { r: 1, R: 1, W: 1 }, timeAcquiringMicros: { r: 35546, R: 16861, W: 8556 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 729ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:59.130-0400 m31200| 2015-07-09T13:56:59.126-0400 I NETWORK [conn52] scoped connection to bs-osx108-8:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:59.131-0400 m31100| 2015-07-09T13:56:59.131-0400 I NETWORK [conn85] end connection 127.0.0.1:63072 (78 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:59.139-0400 m31200| 2015-07-09T13:56:59.136-0400 I NETWORK [conn83] scoped connection to bs-osx108-8:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:59.139-0400 m31200| 2015-07-09T13:56:59.139-0400 I NETWORK [conn52] scoped connection to bs-osx108-8:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:59.139-0400 m31100| 2015-07-09T13:56:59.139-0400 I NETWORK [conn81] end connection 127.0.0.1:63066 (77 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:59.140-0400 m31200| 2015-07-09T13:56:59.139-0400 I NETWORK [conn89] end connection 127.0.0.1:63074 (82 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:59.140-0400 m31200| 2015-07-09T13:56:59.140-0400 I NETWORK [conn80] scoped connection to bs-osx108-8:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:59.140-0400 m31100| 2015-07-09T13:56:59.140-0400 I NETWORK [conn84] end connection 127.0.0.1:63071 (76 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:59.143-0400 m31200| 2015-07-09T13:56:59.143-0400 I NETWORK [conn41] scoped connection to bs-osx108-8:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:59.145-0400 m31200| 2015-07-09T13:56:59.145-0400 I NETWORK [conn83] scoped connection to bs-osx108-8:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:59.148-0400 m31200| 2015-07-09T13:56:59.147-0400 I COMMAND [conn83] command db24.coll24_out_agg_sort_2 command: aggregate { aggregate: "coll24", pipeline: [ { $mergeCursors: [ { host: "bs-osx108-8:31100", id: 2156985752980 }, { host: "bs-osx108-8:31200", id: 1805905009549 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll24_out_agg_sort_2" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:144 locks:{ Global: { acquireCount: { r: 515, w: 504, W: 1 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 469, W: 39146 } }, Database: { acquireCount: { r: 4, w: 503, R: 1, W: 1 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 502 } }, oplog: { acquireCount: { w: 502 } } } protocol:op_command 996ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:59.148-0400 m31200| 2015-07-09T13:56:59.148-0400 I NETWORK [conn86] end connection 127.0.0.1:63067 (81 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:59.149-0400 m31200| 2015-07-09T13:56:59.148-0400 I NETWORK [conn80] scoped connection to bs-osx108-8:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:59.150-0400 m31100| 2015-07-09T13:56:59.143-0400 I NETWORK [conn82] end connection 127.0.0.1:63068 (76 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:59.151-0400 m31200| 2015-07-09T13:56:59.149-0400 I COMMAND [conn80] command db24.coll24_out_agg_sort_1 command: aggregate { aggregate: "coll24", pipeline: [ { $mergeCursors: [ { host: "bs-osx108-8:31100", id: 2157078941175 }, { host: "bs-osx108-8:31200", id: 1804535900852 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll24_out_agg_sort_1" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:144 locks:{ Global: { acquireCount: { r: 515, w: 504, W: 1 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 1127, W: 25258 } }, Database: { acquireCount: { r: 4, w: 503, R: 1, W: 1 }, acquireWaitCount: { r: 1, R: 1, W: 1 }, timeAcquiringMicros: { r: 13356, R: 16802, W: 29971 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 502 } }, oplog: { acquireCount: { w: 502 } } } protocol:op_command 988ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:59.151-0400 m31200| 2015-07-09T13:56:59.149-0400 I NETWORK [conn90] end connection 127.0.0.1:63075 (80 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:59.151-0400 m31200| 2015-07-09T13:56:59.149-0400 I COMMAND [conn52] command db24.coll24_out_agg_sort_3 command: aggregate { aggregate: "coll24", pipeline: [ { $mergeCursors: [ { host: "bs-osx108-8:31100", id: 2156184470157 }, { host: "bs-osx108-8:31200", id: 1804923556208 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll24_out_agg_sort_3" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:144 locks:{ Global: { acquireCount: { r: 515, w: 504, W: 1 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 8540, W: 3236 } }, Database: { acquireCount: { r: 4, w: 503, R: 1, W: 1 }, acquireWaitCount: { r: 1, R: 1, W: 1 }, timeAcquiringMicros: { r: 35546, R: 16861, W: 8556 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 502 } }, oplog: { acquireCount: { w: 502 } } } protocol:op_command 988ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:59.156-0400 m31200| 2015-07-09T13:56:59.156-0400 I NETWORK [conn41] scoped connection to bs-osx108-8:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:59.158-0400 m31200| 2015-07-09T13:56:59.158-0400 I NETWORK [conn87] end connection 127.0.0.1:63069 (79 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:59.160-0400 m31200| 2015-07-09T13:56:59.159-0400 I COMMAND [conn41] command db24.coll24_out_agg_sort_0 command: aggregate { aggregate: "coll24", pipeline: [ { $mergeCursors: [ { host: "bs-osx108-8:31100", id: 2157741928158 }, { host: "bs-osx108-8:31200", id: 1803945304394 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll24_out_agg_sort_0" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:144 locks:{ Global: { acquireCount: { r: 515, w: 504, W: 1 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 23914, W: 4933 } }, Database: { acquireCount: { r: 4, w: 503, R: 1, W: 1 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 502 } }, oplog: { acquireCount: { w: 502 } } } protocol:op_command 997ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:59.161-0400 m31200| 2015-07-09T13:56:59.161-0400 I NETWORK [conn30] scoped connection to bs-osx108-8:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:59.162-0400 m31100| 2015-07-09T13:56:59.161-0400 I NETWORK [conn83] end connection 127.0.0.1:63070 (74 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:59.164-0400 m31200| 2015-07-09T13:56:59.164-0400 I NETWORK [conn30] scoped connection to bs-osx108-8:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:59.165-0400 m31200| 2015-07-09T13:56:59.164-0400 I COMMAND [conn30] command db24.coll24_out_agg_sort_4 command: aggregate { aggregate: "coll24", pipeline: [ { $mergeCursors: [ { host: "bs-osx108-8:31100", id: 2158175798993 }, { host: "bs-osx108-8:31200", id: 1804150381885 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll24_out_agg_sort_4" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:144 locks:{ Global: { acquireCount: { r: 515, w: 504, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 58774 } }, Database: { acquireCount: { r: 4, w: 503, R: 1, W: 1 }, acquireWaitCount: { R: 1, W: 1 }, timeAcquiringMicros: { R: 18042, W: 49754 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 502 } }, oplog: { acquireCount: { w: 502 } } } protocol:op_command 1003ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:59.166-0400 m31200| 2015-07-09T13:56:59.164-0400 I NETWORK [conn88] end connection 127.0.0.1:63073 (78 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:59.195-0400 m31100| 2015-07-09T13:56:59.194-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63076 #86 (75 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:59.426-0400 m31200| 2015-07-09T13:56:59.425-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63077 #91 (79 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:59.501-0400 m31100| 2015-07-09T13:56:59.500-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63078 #87 (76 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:59.508-0400 m31200| 2015-07-09T13:56:59.508-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63079 #92 (80 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:59.563-0400 m31100| 2015-07-09T13:56:59.563-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63080 #88 (77 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:59.567-0400 m31100| 2015-07-09T13:56:59.567-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63081 #89 (78 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:59.575-0400 m31200| 2015-07-09T13:56:59.575-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63082 #93 (81 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:59.578-0400 m31200| 2015-07-09T13:56:59.576-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63083 #94 (82 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:59.641-0400 m31100| 2015-07-09T13:56:59.640-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63084 #90 (79 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:59.646-0400 m31100| 2015-07-09T13:56:59.646-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63085 #91 (80 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:59.648-0400 m31200| 2015-07-09T13:56:59.648-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63086 #95 (83 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:56:59.652-0400 m31200| 2015-07-09T13:56:59.652-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63087 #96 (84 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:00.385-0400 m31200| 2015-07-09T13:57:00.385-0400 I WRITE [conn83] insert db24.tmp.agg_out.8 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { W: 2 }, timeAcquiringMicros: { W: 3840 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 642ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:00.409-0400 m31200| 2015-07-09T13:57:00.408-0400 I WRITE [conn30] insert db24.tmp.agg_out.7 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { r: 3, W: 2 }, timeAcquiringMicros: { r: 51720, W: 23118 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 650ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:00.413-0400 m31200| 2015-07-09T13:57:00.412-0400 I WRITE [conn80] insert db24.tmp.agg_out.9 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { r: 1, W: 2 }, timeAcquiringMicros: { r: 31205, W: 7987 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 664ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:00.424-0400 m31200| 2015-07-09T13:57:00.424-0400 I WRITE [conn41] insert db24.tmp.agg_out.6 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 8365 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 663ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:00.427-0400 m31200| 2015-07-09T13:57:00.427-0400 I WRITE [conn52] insert db24.tmp.agg_out.10 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { r: 1, W: 2 }, timeAcquiringMicros: { r: 31797, W: 12302 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 671ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:00.473-0400 m31200| 2015-07-09T13:57:00.471-0400 I NETWORK [conn80] scoped connection to bs-osx108-8:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:00.475-0400 m31100| 2015-07-09T13:57:00.471-0400 I NETWORK [conn91] end connection 127.0.0.1:63085 (79 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:00.483-0400 m31200| 2015-07-09T13:57:00.483-0400 I NETWORK [conn52] scoped connection to bs-osx108-8:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:00.486-0400 m31100| 2015-07-09T13:57:00.485-0400 I NETWORK [conn90] end connection 127.0.0.1:63084 (78 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:00.493-0400 m31200| 2015-07-09T13:57:00.492-0400 I NETWORK [conn80] scoped connection to bs-osx108-8:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:00.493-0400 m31200| 2015-07-09T13:57:00.493-0400 I NETWORK [conn96] end connection 127.0.0.1:63087 (83 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:00.494-0400 m31200| 2015-07-09T13:57:00.493-0400 I COMMAND [conn80] command db24.coll24_out_agg_sort_1 command: aggregate { aggregate: "coll24", pipeline: [ { $mergeCursors: [ { host: "bs-osx108-8:31100", id: 2158042840276 }, { host: "bs-osx108-8:31200", id: 1805492498027 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll24_out_agg_sort_1" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:144 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 31944, W: 2310 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 1, W: 2 }, timeAcquiringMicros: { r: 31205, W: 7987 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 932ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:00.494-0400 m31200| 2015-07-09T13:57:00.493-0400 I NETWORK [conn30] scoped connection to bs-osx108-8:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:00.495-0400 m31100| 2015-07-09T13:57:00.494-0400 I NETWORK [conn89] end connection 127.0.0.1:63081 (77 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:00.501-0400 m31200| 2015-07-09T13:57:00.501-0400 I NETWORK [conn30] scoped connection to bs-osx108-8:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:00.502-0400 m31200| 2015-07-09T13:57:00.501-0400 I NETWORK [conn83] scoped connection to bs-osx108-8:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:00.502-0400 m31100| 2015-07-09T13:57:00.501-0400 I NETWORK [conn88] end connection 127.0.0.1:63080 (76 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:00.504-0400 m31200| 2015-07-09T13:57:00.501-0400 I NETWORK [conn93] end connection 127.0.0.1:63082 (82 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:00.505-0400 m31200| 2015-07-09T13:57:00.501-0400 I COMMAND [conn30] command db24.coll24_out_agg_sort_0 command: aggregate { aggregate: "coll24", pipeline: [ { $mergeCursors: [ { host: "bs-osx108-8:31100", id: 2158134289834 }, { host: "bs-osx108-8:31200", id: 1805861872613 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll24_out_agg_sort_0" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:144 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 6594, W: 36184 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 3, W: 2 }, timeAcquiringMicros: { r: 51720, W: 23118 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 1040ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:00.505-0400 m31200| 2015-07-09T13:57:00.501-0400 I NETWORK [conn52] scoped connection to bs-osx108-8:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:00.505-0400 m31200| 2015-07-09T13:57:00.503-0400 I NETWORK [conn41] scoped connection to bs-osx108-8:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:00.505-0400 m31100| 2015-07-09T13:57:00.504-0400 I NETWORK [conn87] end connection 127.0.0.1:63078 (75 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:00.506-0400 m31200| 2015-07-09T13:57:00.504-0400 I NETWORK [conn95] end connection 127.0.0.1:63086 (81 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:00.507-0400 m31200| 2015-07-09T13:57:00.506-0400 I COMMAND [conn52] command db24.coll24_out_agg_sort_3 command: aggregate { aggregate: "coll24", pipeline: [ { $mergeCursors: [ { host: "bs-osx108-8:31100", id: 2157943929380 }, { host: "bs-osx108-8:31200", id: 1804031557874 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll24_out_agg_sort_3" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:144 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 25518, W: 5791 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 1, W: 2 }, timeAcquiringMicros: { r: 31797, W: 12302 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 944ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:00.511-0400 m31200| 2015-07-09T13:57:00.510-0400 I NETWORK [conn83] scoped connection to bs-osx108-8:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:00.511-0400 m31200| 2015-07-09T13:57:00.510-0400 I COMMAND [conn83] command db24.coll24_out_agg_sort_4 command: aggregate { aggregate: "coll24", pipeline: [ { $mergeCursors: [ { host: "bs-osx108-8:31100", id: 2157133497100 }, { host: "bs-osx108-8:31200", id: 1805212955247 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll24_out_agg_sort_4" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:144 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 69056 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { W: 2 }, timeAcquiringMicros: { W: 3840 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 979ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:00.512-0400 m31200| 2015-07-09T13:57:00.510-0400 I NETWORK [conn94] end connection 127.0.0.1:63083 (80 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:00.512-0400 m31200| 2015-07-09T13:57:00.512-0400 I NETWORK [conn41] scoped connection to bs-osx108-8:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:00.513-0400 m31200| 2015-07-09T13:57:00.512-0400 I COMMAND [conn41] command db24.coll24_out_agg_sort_2 command: aggregate { aggregate: "coll24", pipeline: [ { $mergeCursors: [ { host: "bs-osx108-8:31100", id: 2156923020936 }, { host: "bs-osx108-8:31200", id: 1804086473348 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll24_out_agg_sort_2" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:144 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 26533, W: 15295 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 8365 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 1053ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:00.516-0400 m31200| 2015-07-09T13:57:00.514-0400 I NETWORK [conn92] end connection 127.0.0.1:63079 (79 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:00.569-0400 m31202| 2015-07-09T13:57:00.568-0400 I COMMAND [repl writer worker 14] CMD: drop db24.coll24_out_agg_sort_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:00.601-0400 m31202| 2015-07-09T13:57:00.601-0400 I COMMAND [repl writer worker 5] CMD: drop db24.coll24_out_agg_sort_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:00.639-0400 m31201| 2015-07-09T13:57:00.638-0400 I COMMAND [repl writer worker 9] CMD: drop db24.coll24_out_agg_sort_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:00.655-0400 m31202| 2015-07-09T13:57:00.654-0400 I COMMAND [repl writer worker 9] CMD: drop db24.coll24_out_agg_sort_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:00.663-0400 m31202| 2015-07-09T13:57:00.662-0400 I COMMAND [repl writer worker 8] CMD: drop db24.coll24_out_agg_sort_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:00.672-0400 m31201| 2015-07-09T13:57:00.671-0400 I COMMAND [repl writer worker 8] CMD: drop db24.coll24_out_agg_sort_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:00.689-0400 m31201| 2015-07-09T13:57:00.686-0400 I COMMAND [repl writer worker 10] CMD: drop db24.coll24_out_agg_sort_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:00.698-0400 m31202| 2015-07-09T13:57:00.692-0400 I COMMAND [repl writer worker 4] CMD: drop db24.coll24_out_agg_sort_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:00.727-0400 m31201| 2015-07-09T13:57:00.719-0400 I COMMAND [repl writer worker 12] CMD: drop db24.coll24_out_agg_sort_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:00.739-0400 m31201| 2015-07-09T13:57:00.738-0400 I COMMAND [repl writer worker 0] CMD: drop db24.coll24_out_agg_sort_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:00.808-0400 m31100| 2015-07-09T13:57:00.807-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63088 #92 (76 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:00.814-0400 m31200| 2015-07-09T13:57:00.814-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63089 #97 (80 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:00.876-0400 m31100| 2015-07-09T13:57:00.876-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63090 #93 (77 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:00.882-0400 m31200| 2015-07-09T13:57:00.879-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63091 #98 (81 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:00.910-0400 m31100| 2015-07-09T13:57:00.909-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63092 #94 (78 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:00.915-0400 m31200| 2015-07-09T13:57:00.914-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63093 #99 (82 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:01.299-0400 m31200| 2015-07-09T13:57:01.298-0400 I WRITE [conn52] insert db24.tmp.agg_out.11 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { w: 1, W: 2 }, timeAcquiringMicros: { w: 22986, W: 25943 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 341ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:01.299-0400 m31200| 2015-07-09T13:57:01.298-0400 I WRITE [conn41] insert db24.system.indexes keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 11, w: 3 } }, Database: { acquireCount: { r: 3, w: 1, R: 1, W: 2 }, acquireWaitCount: { r: 1, W: 2 }, timeAcquiringMicros: { r: 26202, W: 346457 } }, Collection: { acquireCount: { r: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } 341ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:01.302-0400 m31100| 2015-07-09T13:57:01.302-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63094 #95 (79 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:01.315-0400 m31200| 2015-07-09T13:57:01.315-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63095 #100 (83 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:01.452-0400 m31200| 2015-07-09T13:57:01.451-0400 I WRITE [conn83] insert db24.tmp.agg_out.12 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { w: 1, W: 2 }, timeAcquiringMicros: { w: 296086, W: 16199 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 153ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:01.452-0400 m31200| 2015-07-09T13:57:01.451-0400 I WRITE [conn80] insert db24.system.indexes keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 11, w: 3 } }, Database: { acquireCount: { r: 3, w: 1, R: 1, W: 2 }, acquireWaitCount: { r: 2, W: 2 }, timeAcquiringMicros: { r: 8279, W: 513508 } }, Collection: { acquireCount: { r: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } 491ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:01.461-0400 m31100| 2015-07-09T13:57:01.460-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63096 #96 (80 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:01.464-0400 m31200| 2015-07-09T13:57:01.464-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63097 #101 (84 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:01.469-0400 m31200| 2015-07-09T13:57:01.468-0400 I COMMAND [conn52] command db24.tmp.agg_out.11 command: renameCollection { renameCollection: "db24.tmp.agg_out.11", to: "db24.coll24_out_agg_sort_1", dropTarget: true } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:97 locks:{ Global: { acquireCount: { r: 515, w: 506, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 145980 } }, Database: { acquireCount: { r: 3, w: 504, R: 1, W: 2 }, acquireWaitCount: { w: 1, W: 2 }, timeAcquiringMicros: { w: 22986, W: 25943 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_query 154ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:01.477-0400 m31200| 2015-07-09T13:57:01.476-0400 I QUERY [conn99] getmore db24.coll24 query: { aggregate: "coll24", pipeline: [ { $match: { flag: true } }, { $sort: { rand: 1 } } ], fromRouter: true, cursor: { batchSize: 0 } } cursorid:1805270349630 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:3 nreturned:252 reslen:3028304 locks:{ Global: { acquireCount: { r: 12 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 15676 } }, Database: { acquireCount: { r: 6 }, acquireWaitCount: { r: 4 }, timeAcquiringMicros: { r: 520380 } }, Collection: { acquireCount: { r: 6 } } } 521ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:01.479-0400 m31200| 2015-07-09T13:57:01.478-0400 I NETWORK [conn52] scoped connection to bs-osx108-8:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:01.488-0400 m31100| 2015-07-09T13:57:01.488-0400 I NETWORK [conn92] end connection 127.0.0.1:63088 (79 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:01.496-0400 m31200| 2015-07-09T13:57:01.496-0400 I NETWORK [conn52] scoped connection to bs-osx108-8:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:01.497-0400 m31200| 2015-07-09T13:57:01.496-0400 I NETWORK [conn97] end connection 127.0.0.1:63089 (83 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:01.498-0400 m31200| 2015-07-09T13:57:01.497-0400 I COMMAND [conn52] command db24.coll24_out_agg_sort_1 command: aggregate { aggregate: "coll24", pipeline: [ { $mergeCursors: [ { host: "bs-osx108-8:31100", id: 2157711067761 }, { host: "bs-osx108-8:31200", id: 1805505932981 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll24_out_agg_sort_1" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:144 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 3954, W: 145980 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { w: 1, W: 2 }, timeAcquiringMicros: { w: 22986, W: 25943 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 734ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:01.525-0400 m31200| 2015-07-09T13:57:01.525-0400 I NETWORK [conn83] scoped connection to bs-osx108-8:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:01.526-0400 m31200| 2015-07-09T13:57:01.526-0400 I NETWORK [conn83] scoped connection to bs-osx108-8:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:01.527-0400 m31200| 2015-07-09T13:57:01.526-0400 I COMMAND [conn83] command db24.coll24_out_agg_sort_4 command: aggregate { aggregate: "coll24", pipeline: [ { $mergeCursors: [ { host: "bs-osx108-8:31100", id: 2157642295900 }, { host: "bs-osx108-8:31200", id: 1804266239948 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll24_out_agg_sort_4" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:144 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 4954 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { w: 1, W: 2 }, timeAcquiringMicros: { w: 296086, W: 16199 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 709ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:01.528-0400 m31200| 2015-07-09T13:57:01.528-0400 I NETWORK [conn98] end connection 127.0.0.1:63091 (82 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:01.528-0400 m31100| 2015-07-09T13:57:01.528-0400 I NETWORK [conn93] end connection 127.0.0.1:63090 (78 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:01.736-0400 m31201| 2015-07-09T13:57:01.736-0400 I COMMAND [repl writer worker 6] CMD: drop db24.coll24_out_agg_sort_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:01.743-0400 m31202| 2015-07-09T13:57:01.743-0400 I COMMAND [repl writer worker 7] CMD: drop db24.coll24_out_agg_sort_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:01.746-0400 m31201| 2015-07-09T13:57:01.746-0400 I COMMAND [repl writer worker 10] CMD: drop db24.coll24_out_agg_sort_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:01.755-0400 m31202| 2015-07-09T13:57:01.755-0400 I COMMAND [repl writer worker 11] CMD: drop db24.coll24_out_agg_sort_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:02.198-0400 m31200| 2015-07-09T13:57:02.197-0400 I WRITE [conn41] insert db24.tmp.agg_out.15 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { r: 1, W: 2 }, timeAcquiringMicros: { r: 26202, W: 346457 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 646ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:02.208-0400 m31200| 2015-07-09T13:57:02.207-0400 I WRITE [conn30] insert db24.tmp.agg_out.13 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { W: 2 }, timeAcquiringMicros: { W: 10926 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 654ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:02.210-0400 m31200| 2015-07-09T13:57:02.209-0400 I WRITE [conn80] insert db24.tmp.agg_out.14 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { r: 2, W: 2 }, timeAcquiringMicros: { r: 8279, W: 513508 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 632ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:02.211-0400 m31200| 2015-07-09T13:57:02.210-0400 I COMMAND [conn52] command db24.$cmd command: listCollections { listCollections: 1, filter: { name: "coll24_out_agg_sort_1" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:227 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 484434 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 484ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:02.211-0400 m31200| 2015-07-09T13:57:02.210-0400 I COMMAND [conn83] command db24.$cmd command: listCollections { listCollections: 1, filter: { name: "coll24_out_agg_sort_4" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:227 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 1016708 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 516ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:02.244-0400 m31201| 2015-07-09T13:57:02.242-0400 I COMMAND [repl writer worker 13] CMD: drop db24.coll24_out_agg_sort_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:02.245-0400 m31202| 2015-07-09T13:57:02.244-0400 I COMMAND [repl writer worker 10] CMD: drop db24.coll24_out_agg_sort_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:02.245-0400 m31200| 2015-07-09T13:57:02.245-0400 I NETWORK [conn41] scoped connection to bs-osx108-8:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:02.249-0400 m31100| 2015-07-09T13:57:02.247-0400 I NETWORK [conn95] end connection 127.0.0.1:63094 (77 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:02.255-0400 m31200| 2015-07-09T13:57:02.254-0400 I NETWORK [conn41] scoped connection to bs-osx108-8:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:02.255-0400 m31200| 2015-07-09T13:57:02.255-0400 I NETWORK [conn100] end connection 127.0.0.1:63095 (81 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:02.258-0400 m31200| 2015-07-09T13:57:02.255-0400 I COMMAND [conn41] command db24.coll24_out_agg_sort_0 command: aggregate { aggregate: "coll24", pipeline: [ { $mergeCursors: [ { host: "bs-osx108-8:31100", id: 2156924908111 }, { host: "bs-osx108-8:31200", id: 1805794985756 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll24_out_agg_sort_0" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:144 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 10741, W: 2310 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 1, W: 2 }, timeAcquiringMicros: { r: 26202, W: 346457 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 1350ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:02.260-0400 m31202| 2015-07-09T13:57:02.260-0400 I COMMAND [repl writer worker 7] CMD: drop db24.coll24_out_agg_sort_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:02.264-0400 m31201| 2015-07-09T13:57:02.264-0400 I COMMAND [repl writer worker 4] CMD: drop db24.coll24_out_agg_sort_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:02.273-0400 m31202| 2015-07-09T13:57:02.272-0400 I COMMAND [repl writer worker 4] CMD: drop db24.coll24_out_agg_sort_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:02.278-0400 m31201| 2015-07-09T13:57:02.278-0400 I COMMAND [repl writer worker 0] CMD: drop db24.coll24_out_agg_sort_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:02.303-0400 m31200| 2015-07-09T13:57:02.303-0400 I NETWORK [conn80] scoped connection to bs-osx108-8:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:02.304-0400 m31100| 2015-07-09T13:57:02.304-0400 I NETWORK [conn96] end connection 127.0.0.1:63096 (76 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:02.305-0400 m31200| 2015-07-09T13:57:02.304-0400 I NETWORK [conn30] scoped connection to bs-osx108-8:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:02.306-0400 m31100| 2015-07-09T13:57:02.305-0400 I NETWORK [conn94] end connection 127.0.0.1:63092 (75 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:02.311-0400 m31200| 2015-07-09T13:57:02.309-0400 I NETWORK [conn80] scoped connection to bs-osx108-8:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:02.311-0400 m31200| 2015-07-09T13:57:02.310-0400 I COMMAND [conn80] command db24.coll24_out_agg_sort_3 command: aggregate { aggregate: "coll24", pipeline: [ { $mergeCursors: [ { host: "bs-osx108-8:31100", id: 2158166966251 }, { host: "bs-osx108-8:31200", id: 1805590049548 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll24_out_agg_sort_3" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:144 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 4740, W: 2532 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 3, W: 2 }, timeAcquiringMicros: { r: 66518, W: 513508 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 1415ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:02.311-0400 m31200| 2015-07-09T13:57:02.310-0400 I NETWORK [conn101] end connection 127.0.0.1:63097 (80 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:02.317-0400 m31200| 2015-07-09T13:57:02.316-0400 I NETWORK [conn30] scoped connection to bs-osx108-8:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:02.318-0400 m31200| 2015-07-09T13:57:02.317-0400 I NETWORK [conn99] end connection 127.0.0.1:63093 (79 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:02.320-0400 m31200| 2015-07-09T13:57:02.318-0400 I COMMAND [conn30] command db24.coll24_out_agg_sort_2 command: aggregate { aggregate: "coll24", pipeline: [ { $mergeCursors: [ { host: "bs-osx108-8:31100", id: 2157853859596 }, { host: "bs-osx108-8:31200", id: 1805270349630 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll24_out_agg_sort_2" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:144 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 9034 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 1, W: 2 }, timeAcquiringMicros: { r: 38497, W: 10926 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 1445ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:02.329-0400 m31100| 2015-07-09T13:57:02.328-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63098 #97 (76 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:02.332-0400 m31100| 2015-07-09T13:57:02.329-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63099 #98 (77 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:02.338-0400 m31200| 2015-07-09T13:57:02.337-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63100 #102 (80 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:02.353-0400 m31200| 2015-07-09T13:57:02.353-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63101 #103 (81 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:03.062-0400 m31200| 2015-07-09T13:57:03.061-0400 I WRITE [conn52] insert db24.tmp.agg_out.17 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 28018 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { R: 1, W: 2 }, timeAcquiringMicros: { R: 484434, W: 54746 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 671ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:03.090-0400 m31200| 2015-07-09T13:57:03.089-0400 I WRITE [conn83] insert db24.tmp.agg_out.16 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 27382 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { r: 1, R: 1, W: 2 }, timeAcquiringMicros: { r: 27934, R: 1016708, W: 23896 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 654ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:03.090-0400 m31200| 2015-07-09T13:57:03.090-0400 I COMMAND [conn80] command db24.$cmd command: listCollections { listCollections: 1, filter: { name: "coll24_out_agg_sort_3" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:227 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 495558 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 496ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:03.091-0400 m31200| 2015-07-09T13:57:03.090-0400 I COMMAND [conn41] command db24.$cmd command: listCollections { listCollections: 1, filter: { name: "coll24_out_agg_sort_0" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:227 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 1031827 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 532ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:03.091-0400 m31200| 2015-07-09T13:57:03.090-0400 I COMMAND [conn30] command db24.$cmd command: listCollections { listCollections: 1, filter: { name: "coll24_out_agg_sort_2" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:227 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 488962 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 489ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:03.114-0400 m31202| 2015-07-09T13:57:03.111-0400 I COMMAND [repl writer worker 4] CMD: drop db24.coll24_out_agg_sort_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:03.119-0400 m31201| 2015-07-09T13:57:03.117-0400 I COMMAND [repl writer worker 15] CMD: drop db24.coll24_out_agg_sort_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:03.128-0400 m31202| 2015-07-09T13:57:03.126-0400 I COMMAND [repl writer worker 12] CMD: drop db24.coll24_out_agg_sort_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:03.135-0400 m31201| 2015-07-09T13:57:03.135-0400 I COMMAND [repl writer worker 1] CMD: drop db24.coll24_out_agg_sort_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:03.170-0400 m31200| 2015-07-09T13:57:03.170-0400 I NETWORK [conn52] scoped connection to bs-osx108-8:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:03.172-0400 m31100| 2015-07-09T13:57:03.172-0400 I NETWORK [conn97] end connection 127.0.0.1:63098 (76 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:03.179-0400 m31200| 2015-07-09T13:57:03.178-0400 I NETWORK [conn83] scoped connection to bs-osx108-8:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:03.180-0400 m31100| 2015-07-09T13:57:03.179-0400 I NETWORK [conn98] end connection 127.0.0.1:63099 (75 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:03.183-0400 m31200| 2015-07-09T13:57:03.182-0400 I NETWORK [conn52] scoped connection to bs-osx108-8:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:03.183-0400 m31200| 2015-07-09T13:57:03.183-0400 I NETWORK [conn102] end connection 127.0.0.1:63100 (80 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:03.184-0400 m31200| 2015-07-09T13:57:03.183-0400 I COMMAND [conn52] command db24.coll24_out_agg_sort_1 command: aggregate { aggregate: "coll24", pipeline: [ { $mergeCursors: [ { host: "bs-osx108-8:31100", id: 2156689999883 }, { host: "bs-osx108-8:31200", id: 1804950219253 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll24_out_agg_sort_1" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:144 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 7136, w: 28018, W: 14284 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 1, R: 1, W: 2 }, timeAcquiringMicros: { r: 59093, R: 484434, W: 54746 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 1458ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:03.188-0400 m31100| 2015-07-09T13:57:03.187-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63102 #99 (76 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:03.189-0400 m31200| 2015-07-09T13:57:03.188-0400 I NETWORK [conn83] scoped connection to bs-osx108-8:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:03.190-0400 m31100| 2015-07-09T13:57:03.189-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63103 #100 (77 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:03.191-0400 m31200| 2015-07-09T13:57:03.190-0400 I NETWORK [conn103] end connection 127.0.0.1:63101 (79 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:03.192-0400 m31200| 2015-07-09T13:57:03.191-0400 I COMMAND [conn83] command db24.coll24_out_agg_sort_4 command: aggregate { aggregate: "coll24", pipeline: [ { $mergeCursors: [ { host: "bs-osx108-8:31100", id: 2157882104766 }, { host: "bs-osx108-8:31200", id: 1804534626610 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll24_out_agg_sort_4" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:144 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 27382, W: 5429 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 2, R: 1, W: 2 }, timeAcquiringMicros: { r: 71291, R: 1016708, W: 23896 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 1496ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:03.192-0400 m31100| 2015-07-09T13:57:03.192-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63104 #101 (78 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:03.208-0400 m31200| 2015-07-09T13:57:03.205-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63105 #104 (80 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:03.218-0400 m31200| 2015-07-09T13:57:03.216-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63106 #105 (81 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:03.220-0400 m31200| 2015-07-09T13:57:03.218-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63107 #106 (82 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:04.286-0400 m31200| 2015-07-09T13:57:04.282-0400 I WRITE [conn30] insert db24.tmp.agg_out.20 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 18882 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { R: 1, W: 2 }, timeAcquiringMicros: { R: 488962, W: 44440 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 1005ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:04.311-0400 m31200| 2015-07-09T13:57:04.310-0400 I WRITE [conn41] insert db24.tmp.agg_out.18 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 19323 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { r: 1, R: 1, W: 2 }, timeAcquiringMicros: { r: 18002, R: 1031827, W: 26072 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 1021ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:04.323-0400 m31200| 2015-07-09T13:57:04.322-0400 I WRITE [conn80] insert db24.tmp.agg_out.19 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 19457 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { r: 1, R: 1, W: 1 }, timeAcquiringMicros: { r: 34005, R: 495558, W: 4350 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 1013ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:04.324-0400 m31200| 2015-07-09T13:57:04.323-0400 I COMMAND [conn52] command db24.$cmd command: listCollections { listCollections: 1, filter: { name: "coll24_out_agg_sort_1" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:227 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 1320149 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 819ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:04.324-0400 m31200| 2015-07-09T13:57:04.323-0400 I COMMAND [conn83] command db24.$cmd command: listCollections { listCollections: 1, filter: { name: "coll24_out_agg_sort_4" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:227 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 1395207 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 896ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:04.361-0400 m31201| 2015-07-09T13:57:04.358-0400 I COMMAND [repl writer worker 12] CMD: drop db24.coll24_out_agg_sort_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:04.362-0400 m31202| 2015-07-09T13:57:04.358-0400 I COMMAND [repl writer worker 11] CMD: drop db24.coll24_out_agg_sort_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:04.374-0400 m31201| 2015-07-09T13:57:04.374-0400 I COMMAND [repl writer worker 5] CMD: drop db24.coll24_out_agg_sort_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:04.380-0400 m31202| 2015-07-09T13:57:04.379-0400 I COMMAND [repl writer worker 5] CMD: drop db24.coll24_out_agg_sort_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:04.387-0400 m31202| 2015-07-09T13:57:04.386-0400 I COMMAND [repl writer worker 4] CMD: drop db24.coll24_out_agg_sort_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:04.394-0400 m31201| 2015-07-09T13:57:04.394-0400 I COMMAND [repl writer worker 4] CMD: drop db24.coll24_out_agg_sort_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:04.419-0400 m31200| 2015-07-09T13:57:04.412-0400 I NETWORK [conn41] scoped connection to bs-osx108-8:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:04.425-0400 m31100| 2015-07-09T13:57:04.420-0400 I NETWORK [conn99] end connection 127.0.0.1:63102 (77 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:04.428-0400 m31200| 2015-07-09T13:57:04.427-0400 I NETWORK [conn30] scoped connection to bs-osx108-8:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:04.428-0400 m31100| 2015-07-09T13:57:04.427-0400 I NETWORK [conn101] end connection 127.0.0.1:63104 (76 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:04.436-0400 m31200| 2015-07-09T13:57:04.435-0400 I NETWORK [conn80] scoped connection to bs-osx108-8:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:04.436-0400 m31200| 2015-07-09T13:57:04.436-0400 I NETWORK [conn41] scoped connection to bs-osx108-8:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:04.437-0400 m31100| 2015-07-09T13:57:04.435-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63108 #102 (77 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:04.437-0400 m31100| 2015-07-09T13:57:04.436-0400 I NETWORK [conn100] end connection 127.0.0.1:63103 (76 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:04.438-0400 m31200| 2015-07-09T13:57:04.436-0400 I COMMAND [conn41] command db24.coll24_out_agg_sort_0 command: aggregate { aggregate: "coll24", pipeline: [ { $mergeCursors: [ { host: "bs-osx108-8:31100", id: 2156426528893 }, { host: "bs-osx108-8:31200", id: 1804096643709 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll24_out_agg_sort_0" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:144 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 16282, w: 19323, W: 8788 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 2, R: 1, W: 2 }, timeAcquiringMicros: { r: 72388, R: 1031827, W: 26072 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 1878ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:04.438-0400 m31200| 2015-07-09T13:57:04.436-0400 I NETWORK [conn104] end connection 127.0.0.1:63105 (81 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:04.442-0400 m31100| 2015-07-09T13:57:04.441-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63109 #103 (77 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:04.449-0400 m31200| 2015-07-09T13:57:04.449-0400 I NETWORK [conn30] scoped connection to bs-osx108-8:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:04.452-0400 m31200| 2015-07-09T13:57:04.450-0400 I COMMAND [conn30] command db24.coll24_out_agg_sort_2 command: aggregate { aggregate: "coll24", pipeline: [ { $mergeCursors: [ { host: "bs-osx108-8:31100", id: 2156092335166 }, { host: "bs-osx108-8:31200", id: 1805084899891 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll24_out_agg_sort_2" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:144 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 18882, W: 37128 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 1, R: 1, W: 2 }, timeAcquiringMicros: { r: 50412, R: 488962, W: 44440 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 1849ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:04.452-0400 m31200| 2015-07-09T13:57:04.450-0400 I NETWORK [conn106] end connection 127.0.0.1:63107 (80 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:04.453-0400 m31200| 2015-07-09T13:57:04.451-0400 I NETWORK [conn80] scoped connection to bs-osx108-8:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:04.453-0400 m31200| 2015-07-09T13:57:04.451-0400 I NETWORK [conn105] end connection 127.0.0.1:63106 (79 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:04.454-0400 m31200| 2015-07-09T13:57:04.451-0400 I COMMAND [conn80] command db24.coll24_out_agg_sort_3 command: aggregate { aggregate: "coll24", pipeline: [ { $mergeCursors: [ { host: "bs-osx108-8:31100", id: 2156158429486 }, { host: "bs-osx108-8:31200", id: 1805675834141 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll24_out_agg_sort_3" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:144 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 1608, w: 19457, W: 6543 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 2, R: 1, W: 1 }, timeAcquiringMicros: { r: 97561, R: 495558, W: 4350 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 1857ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:04.455-0400 m31200| 2015-07-09T13:57:04.453-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63110 #107 (80 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:04.459-0400 m31200| 2015-07-09T13:57:04.458-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63111 #108 (81 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:04.475-0400 m30999| 2015-07-09T13:57:04.474-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T13:57:04.471-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30999:1436464534:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:05.439-0400 m31200| 2015-07-09T13:57:05.438-0400 I WRITE [conn83] insert db24.tmp.agg_out.21 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 33277 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { R: 1, W: 2 }, timeAcquiringMicros: { R: 1395207, W: 37225 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 786ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:05.481-0400 m31200| 2015-07-09T13:57:05.480-0400 I WRITE [conn52] insert db24.tmp.agg_out.22 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 33524 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { r: 1, R: 1, W: 1 }, timeAcquiringMicros: { r: 28410, R: 1320149, W: 9880 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 976ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:05.482-0400 m31200| 2015-07-09T13:57:05.480-0400 I COMMAND [conn30] command db24.$cmd command: listCollections { listCollections: 1, filter: { name: "coll24_out_agg_sort_2" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:227 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 1153705 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 652ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:05.482-0400 m31200| 2015-07-09T13:57:05.480-0400 I COMMAND [conn41] command db24.$cmd command: listCollections { listCollections: 1, filter: { name: "coll24_out_agg_sort_0" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:227 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 1149741 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 648ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:05.482-0400 m31200| 2015-07-09T13:57:05.480-0400 I COMMAND [conn80] command db24.$cmd command: listCollections { listCollections: 1, filter: { name: "coll24_out_agg_sort_3" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:227 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 1093811 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 592ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:05.522-0400 m31202| 2015-07-09T13:57:05.522-0400 I COMMAND [repl writer worker 0] CMD: drop db24.coll24_out_agg_sort_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:05.525-0400 m31201| 2015-07-09T13:57:05.525-0400 I COMMAND [repl writer worker 8] CMD: drop db24.coll24_out_agg_sort_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:05.537-0400 m31202| 2015-07-09T13:57:05.537-0400 I COMMAND [repl writer worker 9] CMD: drop db24.coll24_out_agg_sort_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:05.546-0400 m31201| 2015-07-09T13:57:05.546-0400 I COMMAND [repl writer worker 7] CMD: drop db24.coll24_out_agg_sort_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:05.568-0400 m31200| 2015-07-09T13:57:05.568-0400 I NETWORK [conn83] scoped connection to bs-osx108-8:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:05.573-0400 m31100| 2015-07-09T13:57:05.572-0400 I NETWORK [conn103] end connection 127.0.0.1:63109 (76 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:05.584-0400 m31200| 2015-07-09T13:57:05.584-0400 I NETWORK [conn52] scoped connection to bs-osx108-8:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:05.586-0400 m31100| 2015-07-09T13:57:05.585-0400 I NETWORK [conn102] end connection 127.0.0.1:63108 (75 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:05.588-0400 m31200| 2015-07-09T13:57:05.587-0400 I NETWORK [conn83] scoped connection to bs-osx108-8:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:05.589-0400 m31200| 2015-07-09T13:57:05.588-0400 I NETWORK [conn107] end connection 127.0.0.1:63110 (80 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:05.591-0400 m31200| 2015-07-09T13:57:05.589-0400 I COMMAND [conn83] command db24.coll24_out_agg_sort_4 command: aggregate { aggregate: "coll24", pipeline: [ { $mergeCursors: [ { host: "bs-osx108-8:31100", id: 2156088017885 }, { host: "bs-osx108-8:31200", id: 1804282761341 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll24_out_agg_sort_4" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:144 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 2912, w: 33277, W: 28410 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 1, R: 1, W: 2 }, timeAcquiringMicros: { r: 56173, R: 1395207, W: 37225 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 2162ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:05.597-0400 m31200| 2015-07-09T13:57:05.595-0400 I NETWORK [conn52] scoped connection to bs-osx108-8:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:05.597-0400 m31200| 2015-07-09T13:57:05.595-0400 I COMMAND [conn52] command db24.coll24_out_agg_sort_1 command: aggregate { aggregate: "coll24", pipeline: [ { $mergeCursors: [ { host: "bs-osx108-8:31100", id: 2156326529068 }, { host: "bs-osx108-8:31200", id: 1805058579832 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll24_out_agg_sort_1" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:144 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 33524, W: 8054 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 2, R: 1, W: 1 }, timeAcquiringMicros: { r: 68541, R: 1320149, W: 9880 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 2092ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:05.598-0400 m31200| 2015-07-09T13:57:05.596-0400 I NETWORK [conn108] end connection 127.0.0.1:63111 (79 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:05.615-0400 m31200| 2015-07-09T13:57:05.614-0400 I COMMAND [conn80] command db24.tmp.agg_out.25 command: create { create: "tmp.agg_out.25", temp: true } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:97 locks:{ Global: { acquireCount: { r: 8, w: 2 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 31130 } }, Database: { acquireCount: { r: 2, w: 1, R: 1, W: 1 }, acquireWaitCount: { R: 1, W: 1 }, timeAcquiringMicros: { R: 1093811, W: 70908 } }, Collection: { acquireCount: { r: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 132ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:05.640-0400 m31100| 2015-07-09T13:57:05.634-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63112 #104 (76 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:05.641-0400 m31100| 2015-07-09T13:57:05.641-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63113 #105 (77 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:05.643-0400 m31200| 2015-07-09T13:57:05.641-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63114 #109 (80 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:05.645-0400 m31100| 2015-07-09T13:57:05.645-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63115 #106 (78 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:05.651-0400 m31200| 2015-07-09T13:57:05.651-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63116 #110 (81 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:05.676-0400 m31200| 2015-07-09T13:57:05.668-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63117 #111 (82 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:05.894-0400 m30998| 2015-07-09T13:57:05.894-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T13:57:05.885-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30998:1436464535:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:06.316-0400 m31200| 2015-07-09T13:57:06.315-0400 I WRITE [conn30] insert db24.tmp.agg_out.23 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 31247 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { r: 1, R: 1, W: 1 }, timeAcquiringMicros: { r: 23515, R: 1153705, W: 55539 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 610ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:06.548-0400 m31200| 2015-07-09T13:57:06.331-0400 I WRITE [conn41] insert db24.tmp.agg_out.24 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 31179 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { r: 1, R: 1, W: 2 }, timeAcquiringMicros: { r: 47038, R: 1149741, W: 43438 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 565ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:06.549-0400 m31200| 2015-07-09T13:57:06.335-0400 I WRITE [conn80] insert db24.tmp.agg_out.25 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 31130 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { r: 1, R: 1, W: 2 }, timeAcquiringMicros: { r: 11350, R: 1093811, W: 71151 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 595ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:06.549-0400 m31200| 2015-07-09T13:57:06.336-0400 I COMMAND [conn83] command db24.$cmd command: listCollections { listCollections: 1, filter: { name: "coll24_out_agg_sort_4" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:227 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 386562 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 387ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:06.550-0400 m31200| 2015-07-09T13:57:06.336-0400 I COMMAND [conn52] command db24.$cmd command: listCollections { listCollections: 1, filter: { name: "coll24_out_agg_sort_1" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:227 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 437334 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 437ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:06.550-0400 m31202| 2015-07-09T13:57:06.369-0400 I COMMAND [repl writer worker 2] CMD: drop db24.coll24_out_agg_sort_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:06.550-0400 m31201| 2015-07-09T13:57:06.371-0400 I COMMAND [repl writer worker 10] CMD: drop db24.coll24_out_agg_sort_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:06.550-0400 m31202| 2015-07-09T13:57:06.381-0400 I COMMAND [repl writer worker 0] CMD: drop db24.coll24_out_agg_sort_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:06.551-0400 m31201| 2015-07-09T13:57:06.385-0400 I COMMAND [repl writer worker 1] CMD: drop db24.coll24_out_agg_sort_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:06.551-0400 m31201| 2015-07-09T13:57:06.392-0400 I COMMAND [repl writer worker 3] CMD: drop db24.coll24_out_agg_sort_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:06.551-0400 m31202| 2015-07-09T13:57:06.395-0400 I COMMAND [repl writer worker 15] CMD: drop db24.coll24_out_agg_sort_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:06.551-0400 m31200| 2015-07-09T13:57:06.398-0400 I NETWORK [conn41] scoped connection to bs-osx108-8:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:06.551-0400 m31100| 2015-07-09T13:57:06.398-0400 I NETWORK [conn106] end connection 127.0.0.1:63115 (77 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:06.551-0400 m31200| 2015-07-09T13:57:06.408-0400 I NETWORK [conn41] scoped connection to bs-osx108-8:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:06.552-0400 m31200| 2015-07-09T13:57:06.408-0400 I COMMAND [conn41] command db24.coll24_out_agg_sort_0 command: aggregate { aggregate: "coll24", pipeline: [ { $mergeCursors: [ { host: "bs-osx108-8:31100", id: 2157979980576 }, { host: "bs-osx108-8:31200", id: 1804044112508 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll24_out_agg_sort_0" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:144 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 11189, w: 31179, W: 481 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 2, R: 1, W: 2 }, timeAcquiringMicros: { r: 85390, R: 1149741, W: 43438 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 1576ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:06.552-0400 m31200| 2015-07-09T13:57:06.408-0400 I NETWORK [conn30] scoped connection to bs-osx108-8:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:06.552-0400 m31200| 2015-07-09T13:57:06.408-0400 I NETWORK [conn111] end connection 127.0.0.1:63117 (81 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:06.552-0400 m31100| 2015-07-09T13:57:06.409-0400 I NETWORK [conn104] end connection 127.0.0.1:63112 (76 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:06.552-0400 m31100| 2015-07-09T13:57:06.409-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63119 #107 (77 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:06.553-0400 m31100| 2015-07-09T13:57:06.410-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63120 #108 (78 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:06.553-0400 m31200| 2015-07-09T13:57:06.410-0400 I NETWORK [conn80] scoped connection to bs-osx108-8:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:06.553-0400 m31100| 2015-07-09T13:57:06.412-0400 I NETWORK [conn105] end connection 127.0.0.1:63113 (77 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:06.553-0400 m31200| 2015-07-09T13:57:06.420-0400 I NETWORK [conn80] scoped connection to bs-osx108-8:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:06.553-0400 m31200| 2015-07-09T13:57:06.420-0400 I NETWORK [conn110] end connection 127.0.0.1:63116 (80 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:06.553-0400 m31200| 2015-07-09T13:57:06.420-0400 I NETWORK [conn30] scoped connection to bs-osx108-8:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:06.553-0400 m31200| 2015-07-09T13:57:06.420-0400 I NETWORK [conn109] end connection 127.0.0.1:63114 (79 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:06.554-0400 m31200| 2015-07-09T13:57:06.420-0400 I COMMAND [conn80] command db24.coll24_out_agg_sort_3 command: aggregate { aggregate: "coll24", pipeline: [ { $mergeCursors: [ { host: "bs-osx108-8:31100", id: 2157971479452 }, { host: "bs-osx108-8:31200", id: 1805266756681 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll24_out_agg_sort_3" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:144 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 5590, w: 31130, W: 5664 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 2, R: 1, W: 2 }, timeAcquiringMicros: { r: 49688, R: 1093811, W: 71151 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 1533ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:06.554-0400 m31200| 2015-07-09T13:57:06.420-0400 I COMMAND [conn30] command db24.coll24_out_agg_sort_2 command: aggregate { aggregate: "coll24", pipeline: [ { $mergeCursors: [ { host: "bs-osx108-8:31100", id: 2157153161017 }, { host: "bs-osx108-8:31200", id: 1804111633324 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll24_out_agg_sort_2" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:144 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 31247, W: 26147 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 2, R: 1, W: 1 }, timeAcquiringMicros: { r: 54523, R: 1153705, W: 55539 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 1593ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:06.555-0400 m31200| 2015-07-09T13:57:06.422-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63121 #112 (80 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:06.555-0400 m31200| 2015-07-09T13:57:06.423-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63122 #113 (81 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:06.797-0400 m31100| 2015-07-09T13:57:06.797-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T13:57:06.796-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31100:1436464536:197041335', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:07.184-0400 m31200| 2015-07-09T13:57:07.184-0400 I WRITE [conn83] insert db24.tmp.agg_out.27 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 23175 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { r: 1, R: 1, W: 1 }, timeAcquiringMicros: { r: 22561, R: 386562, W: 4986 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 705ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:07.219-0400 m31200| 2015-07-09T13:57:07.212-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T13:57:07.200-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31200:1436464537:809424560', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:07.238-0400 m31200| 2015-07-09T13:57:07.238-0400 I WRITE [conn52] insert db24.tmp.agg_out.26 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 22519 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { R: 1, W: 2 }, timeAcquiringMicros: { R: 437334, W: 17163 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 724ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:07.240-0400 m31200| 2015-07-09T13:57:07.238-0400 I COMMAND [conn30] command db24.$cmd command: listCollections { listCollections: 1, filter: { name: "coll24_out_agg_sort_2" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:227 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 416850 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 417ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:07.240-0400 m31200| 2015-07-09T13:57:07.238-0400 I COMMAND [conn80] command db24.$cmd command: listCollections { listCollections: 1, filter: { name: "coll24_out_agg_sort_3" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:227 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 408469 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 408ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:07.240-0400 m31200| 2015-07-09T13:57:07.239-0400 I COMMAND [conn41] command db24.$cmd command: listCollections { listCollections: 1, filter: { name: "coll24_out_agg_sort_0" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:227 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 390884 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 391ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:07.267-0400 m31201| 2015-07-09T13:57:07.266-0400 I COMMAND [repl writer worker 12] CMD: drop db24.coll24_out_agg_sort_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:07.279-0400 m31202| 2015-07-09T13:57:07.278-0400 I COMMAND [repl writer worker 6] CMD: drop db24.coll24_out_agg_sort_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:07.287-0400 m31201| 2015-07-09T13:57:07.285-0400 I COMMAND [repl writer worker 5] CMD: drop db24.coll24_out_agg_sort_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:07.303-0400 m31202| 2015-07-09T13:57:07.303-0400 I COMMAND [repl writer worker 1] CMD: drop db24.coll24_out_agg_sort_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:07.339-0400 m31200| 2015-07-09T13:57:07.338-0400 I NETWORK [conn83] scoped connection to bs-osx108-8:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:07.345-0400 m31100| 2015-07-09T13:57:07.344-0400 I NETWORK [conn107] end connection 127.0.0.1:63119 (76 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:07.346-0400 m31200| 2015-07-09T13:57:07.346-0400 I NETWORK [conn83] scoped connection to bs-osx108-8:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:07.347-0400 m31200| 2015-07-09T13:57:07.347-0400 I NETWORK [conn112] end connection 127.0.0.1:63121 (80 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:07.350-0400 m31200| 2015-07-09T13:57:07.347-0400 I COMMAND [conn83] command db24.coll24_out_agg_sort_4 command: aggregate { aggregate: "coll24", pipeline: [ { $mergeCursors: [ { host: "bs-osx108-8:31100", id: 2156363078095 }, { host: "bs-osx108-8:31200", id: 1804070775287 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll24_out_agg_sort_4" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:144 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 7059, w: 23175, W: 19949 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 2, R: 1, W: 1 }, timeAcquiringMicros: { r: 98214, R: 386562, W: 4986 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 1398ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:07.351-0400 m31100| 2015-07-09T13:57:07.351-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63123 #109 (77 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:07.352-0400 m31100| 2015-07-09T13:57:07.352-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63124 #110 (78 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:07.353-0400 m31200| 2015-07-09T13:57:07.352-0400 I NETWORK [conn52] scoped connection to bs-osx108-8:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:07.364-0400 m31100| 2015-07-09T13:57:07.352-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63125 #111 (79 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:07.364-0400 m31100| 2015-07-09T13:57:07.362-0400 I NETWORK [conn108] end connection 127.0.0.1:63120 (78 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:07.365-0400 m31200| 2015-07-09T13:57:07.362-0400 I NETWORK [conn52] scoped connection to bs-osx108-8:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:07.367-0400 m31200| 2015-07-09T13:57:07.365-0400 I NETWORK [conn113] end connection 127.0.0.1:63122 (79 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:07.368-0400 m31200| 2015-07-09T13:57:07.365-0400 I COMMAND [conn52] command db24.coll24_out_agg_sort_1 command: aggregate { aggregate: "coll24", pipeline: [ { $mergeCursors: [ { host: "bs-osx108-8:31100", id: 2157735570761 }, { host: "bs-osx108-8:31200", id: 1804096156163 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll24_out_agg_sort_1" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:144 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 22519, W: 4783 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 1, R: 1, W: 2 }, timeAcquiringMicros: { r: 58881, R: 437334, W: 17163 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 1467ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:07.368-0400 m31200| 2015-07-09T13:57:07.367-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63126 #114 (80 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:07.368-0400 m31200| 2015-07-09T13:57:07.367-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63127 #115 (81 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:07.373-0400 m31200| 2015-07-09T13:57:07.372-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63128 #116 (82 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.037-0400 m31200| 2015-07-09T13:57:08.036-0400 I WRITE [conn41] insert db24.tmp.agg_out.30 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 23720 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { R: 1, W: 2 }, timeAcquiringMicros: { R: 390884, W: 65680 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 575ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.051-0400 m31200| 2015-07-09T13:57:08.050-0400 I WRITE [conn80] insert db24.tmp.agg_out.29 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 23982 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { r: 1, R: 1, W: 1 }, timeAcquiringMicros: { r: 46646, R: 408469, W: 9834 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 585ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.055-0400 m31200| 2015-07-09T13:57:08.055-0400 I WRITE [conn30] insert db24.tmp.agg_out.28 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 24167 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { r: 1, R: 1, W: 2 }, timeAcquiringMicros: { r: 18533, R: 416850, W: 41446 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 524ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.056-0400 m31200| 2015-07-09T13:57:08.055-0400 I COMMAND [conn83] command db24.$cmd command: listCollections { listCollections: 1, filter: { name: "coll24_out_agg_sort_4" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:227 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 1008496 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 507ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.056-0400 m31200| 2015-07-09T13:57:08.056-0400 I COMMAND [conn52] command db24.$cmd command: listCollections { listCollections: 1, filter: { name: "coll24_out_agg_sort_1" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:227 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 475892 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 476ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.089-0400 m31202| 2015-07-09T13:57:08.088-0400 I COMMAND [repl writer worker 2] CMD: drop db24.coll24_out_agg_sort_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.089-0400 m31201| 2015-07-09T13:57:08.089-0400 I COMMAND [repl writer worker 7] CMD: drop db24.coll24_out_agg_sort_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.102-0400 m31202| 2015-07-09T13:57:08.102-0400 I COMMAND [repl writer worker 3] CMD: drop db24.coll24_out_agg_sort_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.109-0400 m31201| 2015-07-09T13:57:08.109-0400 I COMMAND [repl writer worker 8] CMD: drop db24.coll24_out_agg_sort_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.116-0400 m31202| 2015-07-09T13:57:08.115-0400 I COMMAND [repl writer worker 5] CMD: drop db24.coll24_out_agg_sort_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.124-0400 m31201| 2015-07-09T13:57:08.124-0400 I COMMAND [repl writer worker 15] CMD: drop db24.coll24_out_agg_sort_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.137-0400 m31200| 2015-07-09T13:57:08.135-0400 I NETWORK [conn80] scoped connection to bs-osx108-8:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.145-0400 m31100| 2015-07-09T13:57:08.137-0400 I NETWORK [conn109] end connection 127.0.0.1:63123 (77 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.147-0400 m31200| 2015-07-09T13:57:08.146-0400 I NETWORK [conn30] scoped connection to bs-osx108-8:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.147-0400 m31100| 2015-07-09T13:57:08.147-0400 I NETWORK [conn111] end connection 127.0.0.1:63125 (76 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.149-0400 m31200| 2015-07-09T13:57:08.149-0400 I NETWORK [conn80] scoped connection to bs-osx108-8:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.152-0400 m31200| 2015-07-09T13:57:08.149-0400 I NETWORK [conn114] end connection 127.0.0.1:63126 (81 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.152-0400 m31200| 2015-07-09T13:57:08.149-0400 I COMMAND [conn80] command db24.coll24_out_agg_sort_3 command: aggregate { aggregate: "coll24", pipeline: [ { $mergeCursors: [ { host: "bs-osx108-8:31100", id: 2156159634292 }, { host: "bs-osx108-8:31200", id: 1805447085858 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll24_out_agg_sort_3" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:144 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 9785, w: 23982, W: 549 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 2, R: 1, W: 1 }, timeAcquiringMicros: { r: 94463, R: 408469, W: 9834 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 1319ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.153-0400 m31200| 2015-07-09T13:57:08.151-0400 I NETWORK [conn41] scoped connection to bs-osx108-8:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.153-0400 m31100| 2015-07-09T13:57:08.151-0400 I NETWORK [conn110] end connection 127.0.0.1:63124 (75 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.154-0400 m31100| 2015-07-09T13:57:08.154-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63129 #112 (76 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.156-0400 m31100| 2015-07-09T13:57:08.155-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63130 #113 (77 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.158-0400 m31200| 2015-07-09T13:57:08.157-0400 I NETWORK [conn30] scoped connection to bs-osx108-8:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.159-0400 m31200| 2015-07-09T13:57:08.158-0400 I COMMAND [conn30] command db24.coll24_out_agg_sort_2 command: aggregate { aggregate: "coll24", pipeline: [ { $mergeCursors: [ { host: "bs-osx108-8:31100", id: 2156717686560 }, { host: "bs-osx108-8:31200", id: 1805569940022 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll24_out_agg_sort_2" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:144 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 3026, w: 24167, W: 5790 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 2, R: 1, W: 2 }, timeAcquiringMicros: { r: 68777, R: 416850, W: 41446 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 1337ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.159-0400 m31200| 2015-07-09T13:57:08.158-0400 I NETWORK [conn116] end connection 127.0.0.1:63128 (80 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.160-0400 m31200| 2015-07-09T13:57:08.160-0400 I NETWORK [conn41] scoped connection to bs-osx108-8:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.162-0400 m31200| 2015-07-09T13:57:08.160-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63131 #117 (81 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.162-0400 m31200| 2015-07-09T13:57:08.161-0400 I COMMAND [conn41] command db24.coll24_out_agg_sort_0 command: aggregate { aggregate: "coll24", pipeline: [ { $mergeCursors: [ { host: "bs-osx108-8:31100", id: 2157924724039 }, { host: "bs-osx108-8:31200", id: 1805611069662 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll24_out_agg_sort_0" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:144 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 23720, W: 28241 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 1, R: 1, W: 2 }, timeAcquiringMicros: { r: 51373, R: 390884, W: 65680 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 1313ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.170-0400 m31200| 2015-07-09T13:57:08.165-0400 I NETWORK [conn115] end connection 127.0.0.1:63127 (80 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.172-0400 m31200| 2015-07-09T13:57:08.171-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63132 #118 (81 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.732-0400 m31200| 2015-07-09T13:57:08.731-0400 I WRITE [conn83] insert db24.tmp.agg_out.31 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 31635 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { R: 1, W: 2 }, timeAcquiringMicros: { R: 1008496, W: 32570 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 499ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.755-0400 m31200| 2015-07-09T13:57:08.755-0400 I WRITE [conn52] insert db24.tmp.agg_out.32 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 30856 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { r: 1, R: 1, W: 1 }, timeAcquiringMicros: { r: 23582, R: 475892, W: 3064 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 499ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.756-0400 m31200| 2015-07-09T13:57:08.755-0400 I COMMAND [conn41] command db24.$cmd command: listCollections { listCollections: 1, filter: { name: "coll24_out_agg_sort_0" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:227 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 281540 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 281ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.756-0400 m31200| 2015-07-09T13:57:08.755-0400 I COMMAND [conn80] command db24.$cmd command: listCollections { listCollections: 1, filter: { name: "coll24_out_agg_sort_3" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:227 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 288577 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 288ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.761-0400 m31200| 2015-07-09T13:57:08.757-0400 I COMMAND [conn30] command db24.$cmd command: listCollections { listCollections: 1, filter: { name: "coll24_out_agg_sort_2" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:227 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 275769 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 278ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.778-0400 m31201| 2015-07-09T13:57:08.777-0400 I COMMAND [repl writer worker 8] CMD: drop db24.coll24_out_agg_sort_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.778-0400 m31202| 2015-07-09T13:57:08.778-0400 I COMMAND [repl writer worker 13] CMD: drop db24.coll24_out_agg_sort_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.788-0400 m31201| 2015-07-09T13:57:08.787-0400 I COMMAND [repl writer worker 15] CMD: drop db24.coll24_out_agg_sort_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.792-0400 m31202| 2015-07-09T13:57:08.792-0400 I COMMAND [repl writer worker 7] CMD: drop db24.coll24_out_agg_sort_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.853-0400 m31200| 2015-07-09T13:57:08.853-0400 I NETWORK [conn52] scoped connection to bs-osx108-8:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.854-0400 m31100| 2015-07-09T13:57:08.853-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63133 #114 (78 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.855-0400 m31100| 2015-07-09T13:57:08.853-0400 I NETWORK [conn112] end connection 127.0.0.1:63129 (76 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.855-0400 m31200| 2015-07-09T13:57:08.853-0400 I NETWORK [conn83] scoped connection to bs-osx108-8:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.861-0400 m31200| 2015-07-09T13:57:08.859-0400 I NETWORK [conn52] scoped connection to bs-osx108-8:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.862-0400 m31100| 2015-07-09T13:57:08.860-0400 I NETWORK [conn113] end connection 127.0.0.1:63130 (76 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.863-0400 m31100| 2015-07-09T13:57:08.860-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63134 #115 (77 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.867-0400 m31200| 2015-07-09T13:57:08.860-0400 I COMMAND [conn52] command db24.coll24_out_agg_sort_1 command: aggregate { aggregate: "coll24", pipeline: [ { $mergeCursors: [ { host: "bs-osx108-8:31100", id: 2156117765058 }, { host: "bs-osx108-8:31200", id: 1805292059531 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll24_out_agg_sort_1" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:144 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 30856, W: 6139 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 2, R: 1, W: 1 }, timeAcquiringMicros: { r: 85096, R: 475892, W: 3064 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 1280ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.867-0400 m31200| 2015-07-09T13:57:08.860-0400 I NETWORK [conn117] end connection 127.0.0.1:63131 (80 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.868-0400 m31200| 2015-07-09T13:57:08.862-0400 I NETWORK [conn83] scoped connection to bs-osx108-8:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.869-0400 m31200| 2015-07-09T13:57:08.864-0400 I NETWORK [conn118] end connection 127.0.0.1:63132 (79 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.869-0400 m31200| 2015-07-09T13:57:08.864-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63136 #119 (80 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.869-0400 m31100| 2015-07-09T13:57:08.864-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63135 #116 (78 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.880-0400 m31200| 2015-07-09T13:57:08.867-0400 I COMMAND [conn83] command db24.coll24_out_agg_sort_4 command: aggregate { aggregate: "coll24", pipeline: [ { $mergeCursors: [ { host: "bs-osx108-8:31100", id: 2156648269528 }, { host: "bs-osx108-8:31200", id: 1804454972840 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll24_out_agg_sort_4" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:144 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 6521, w: 31635, W: 14702 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 1, R: 1, W: 2 }, timeAcquiringMicros: { r: 67879, R: 1008496, W: 32570 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 1321ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.881-0400 m31200| 2015-07-09T13:57:08.867-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63137 #120 (81 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:08.883-0400 m31200| 2015-07-09T13:57:08.882-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63138 #121 (82 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:09.972-0400 m31200| 2015-07-09T13:57:09.971-0400 I WRITE [conn80] insert db24.tmp.agg_out.33 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 19811 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { r: 1, R: 1, W: 2 }, timeAcquiringMicros: { r: 24995, R: 288577, W: 28321 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 975ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:09.984-0400 m31200| 2015-07-09T13:57:09.981-0400 I WRITE [conn30] insert db24.tmp.agg_out.35 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 17083 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { R: 1, W: 2 }, timeAcquiringMicros: { R: 275769, W: 47273 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 1063ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:10.000-0400 m31200| 2015-07-09T13:57:10.000-0400 I WRITE [conn41] insert db24.tmp.agg_out.34 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 20119 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { r: 1, R: 1, W: 1 }, timeAcquiringMicros: { r: 45434, R: 281540, W: 63 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 1060ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:10.001-0400 m31200| 2015-07-09T13:57:10.000-0400 I COMMAND [conn83] command db24.$cmd command: listCollections { listCollections: 1, filter: { name: "coll24_out_agg_sort_4" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:227 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 1377439 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 876ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:10.001-0400 m31200| 2015-07-09T13:57:10.000-0400 I COMMAND [conn52] command db24.$cmd command: listCollections { listCollections: 1, filter: { name: "coll24_out_agg_sort_1" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:227 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 1337578 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 837ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:10.036-0400 m31202| 2015-07-09T13:57:10.035-0400 I COMMAND [repl writer worker 3] CMD: drop db24.coll24_out_agg_sort_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:10.039-0400 m31201| 2015-07-09T13:57:10.039-0400 I COMMAND [repl writer worker 6] CMD: drop db24.coll24_out_agg_sort_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:10.050-0400 m31202| 2015-07-09T13:57:10.050-0400 I COMMAND [repl writer worker 1] CMD: drop db24.coll24_out_agg_sort_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:10.057-0400 m31201| 2015-07-09T13:57:10.056-0400 I COMMAND [repl writer worker 15] CMD: drop db24.coll24_out_agg_sort_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:10.065-0400 m31202| 2015-07-09T13:57:10.064-0400 I COMMAND [repl writer worker 8] CMD: drop db24.coll24_out_agg_sort_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:10.072-0400 m31201| 2015-07-09T13:57:10.072-0400 I COMMAND [repl writer worker 13] CMD: drop db24.coll24_out_agg_sort_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:10.089-0400 m31200| 2015-07-09T13:57:10.089-0400 I NETWORK [conn41] scoped connection to bs-osx108-8:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:10.089-0400 m31100| 2015-07-09T13:57:10.089-0400 I NETWORK [conn114] end connection 127.0.0.1:63133 (77 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:10.094-0400 m31100| 2015-07-09T13:57:10.093-0400 I NETWORK [conn116] end connection 127.0.0.1:63135 (76 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:10.094-0400 m31100| 2015-07-09T13:57:10.093-0400 I NETWORK [conn115] end connection 127.0.0.1:63134 (76 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:10.094-0400 m31200| 2015-07-09T13:57:10.090-0400 I NETWORK [conn80] scoped connection to bs-osx108-8:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:10.095-0400 m31200| 2015-07-09T13:57:10.093-0400 I NETWORK [conn30] scoped connection to bs-osx108-8:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:10.103-0400 m31100| 2015-07-09T13:57:10.103-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63139 #117 (76 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:10.103-0400 m31200| 2015-07-09T13:57:10.103-0400 I NETWORK [conn41] scoped connection to bs-osx108-8:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:10.104-0400 m31200| 2015-07-09T13:57:10.103-0400 I COMMAND [conn41] command db24.coll24_out_agg_sort_0 command: aggregate { aggregate: "coll24", pipeline: [ { $mergeCursors: [ { host: "bs-osx108-8:31100", id: 2156337382371 }, { host: "bs-osx108-8:31200", id: 1805738364764 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll24_out_agg_sort_0" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:144 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 2757, w: 20119, W: 2300 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 2, R: 1, W: 1 }, timeAcquiringMicros: { r: 92726, R: 281540, W: 63 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 1630ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:10.105-0400 m31200| 2015-07-09T13:57:10.104-0400 I NETWORK [conn119] end connection 127.0.0.1:63136 (81 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:10.107-0400 m31200| 2015-07-09T13:57:10.107-0400 I NETWORK [conn30] scoped connection to bs-osx108-8:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:10.109-0400 m31200| 2015-07-09T13:57:10.108-0400 I NETWORK [conn120] end connection 127.0.0.1:63137 (80 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:10.110-0400 m31200| 2015-07-09T13:57:10.108-0400 I NETWORK [conn80] scoped connection to bs-osx108-8:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:10.110-0400 m31100| 2015-07-09T13:57:10.108-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63140 #118 (77 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:10.111-0400 m31200| 2015-07-09T13:57:10.108-0400 I COMMAND [conn30] command db24.coll24_out_agg_sort_2 command: aggregate { aggregate: "coll24", pipeline: [ { $mergeCursors: [ { host: "bs-osx108-8:31100", id: 2157355855237 }, { host: "bs-osx108-8:31200", id: 1804653750870 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll24_out_agg_sort_2" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:144 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 14693, w: 17083, W: 6268 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 1, R: 1, W: 2 }, timeAcquiringMicros: { r: 47586, R: 275769, W: 47273 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 1629ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:10.111-0400 m31200| 2015-07-09T13:57:10.108-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63141 #122 (81 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:10.111-0400 m31200| 2015-07-09T13:57:10.108-0400 I NETWORK [conn121] end connection 127.0.0.1:63138 (79 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:10.112-0400 m31200| 2015-07-09T13:57:10.108-0400 I COMMAND [conn80] command db24.coll24_out_agg_sort_3 command: aggregate { aggregate: "coll24", pipeline: [ { $mergeCursors: [ { host: "bs-osx108-8:31100", id: 2156448974074 }, { host: "bs-osx108-8:31200", id: 1805863520059 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll24_out_agg_sort_3" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:144 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 19811, W: 36447 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 2, R: 1, W: 2 }, timeAcquiringMicros: { r: 73266, R: 288577, W: 28321 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 1642ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:10.112-0400 m31200| 2015-07-09T13:57:10.111-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63142 #123 (81 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:10.831-0400 m31200| 2015-07-09T13:57:10.830-0400 I WRITE [conn52] insert db24.tmp.agg_out.37 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 28890 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { r: 1, R: 1, W: 1 }, timeAcquiringMicros: { r: 27839, R: 1337578, W: 960 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 661ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:10.867-0400 m31200| 2015-07-09T13:57:10.866-0400 I WRITE [conn83] insert db24.tmp.agg_out.36 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 28888 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { R: 1, W: 2 }, timeAcquiringMicros: { R: 1377439, W: 28508 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 665ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:10.867-0400 m31200| 2015-07-09T13:57:10.866-0400 I COMMAND [conn80] command db24.$cmd command: listCollections { listCollections: 1, filter: { name: "coll24_out_agg_sort_3" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:227 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 1040236 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 539ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:10.868-0400 m31200| 2015-07-09T13:57:10.866-0400 I COMMAND [conn30] command db24.$cmd command: listCollections { listCollections: 1, filter: { name: "coll24_out_agg_sort_2" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:227 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 417931 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 418ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:10.868-0400 m31200| 2015-07-09T13:57:10.866-0400 I COMMAND [conn41] command db24.$cmd command: listCollections { listCollections: 1, filter: { name: "coll24_out_agg_sort_0" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:227 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 456105 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 456ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:10.894-0400 m31201| 2015-07-09T13:57:10.892-0400 I COMMAND [repl writer worker 8] CMD: drop db24.coll24_out_agg_sort_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:10.895-0400 m31202| 2015-07-09T13:57:10.895-0400 I COMMAND [repl writer worker 7] CMD: drop db24.coll24_out_agg_sort_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:10.908-0400 m31201| 2015-07-09T13:57:10.908-0400 I COMMAND [repl writer worker 7] CMD: drop db24.coll24_out_agg_sort_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:10.914-0400 m31202| 2015-07-09T13:57:10.913-0400 I COMMAND [repl writer worker 9] CMD: drop db24.coll24_out_agg_sort_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:10.957-0400 m31200| 2015-07-09T13:57:10.956-0400 I NETWORK [conn83] scoped connection to bs-osx108-8:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:10.957-0400 m31100| 2015-07-09T13:57:10.957-0400 I NETWORK [conn118] end connection 127.0.0.1:63140 (76 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:10.972-0400 m31200| 2015-07-09T13:57:10.972-0400 I NETWORK [conn52] scoped connection to bs-osx108-8:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:10.974-0400 m31100| 2015-07-09T13:57:10.971-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63143 #119 (77 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:10.974-0400 m31100| 2015-07-09T13:57:10.974-0400 I NETWORK [conn117] end connection 127.0.0.1:63139 (76 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:10.975-0400 m31100| 2015-07-09T13:57:10.974-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63144 #120 (77 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:10.975-0400 m31100| 2015-07-09T13:57:10.974-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63145 #121 (78 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:10.981-0400 m31200| 2015-07-09T13:57:10.979-0400 I NETWORK [conn52] scoped connection to bs-osx108-8:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:10.986-0400 m31200| 2015-07-09T13:57:10.980-0400 I COMMAND [conn52] command db24.coll24_out_agg_sort_1 command: aggregate { aggregate: "coll24", pipeline: [ { $mergeCursors: [ { host: "bs-osx108-8:31100", id: 2157022842499 }, { host: "bs-osx108-8:31200", id: 1804024118369 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll24_out_agg_sort_1" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:144 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 5677, w: 28890, W: 13242 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 2, R: 1, W: 1 }, timeAcquiringMicros: { r: 93922, R: 1337578, W: 960 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 1817ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:10.987-0400 m31200| 2015-07-09T13:57:10.982-0400 I NETWORK [conn83] scoped connection to bs-osx108-8:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:10.987-0400 m31200| 2015-07-09T13:57:10.984-0400 I NETWORK [conn122] end connection 127.0.0.1:63141 (80 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:10.988-0400 m31200| 2015-07-09T13:57:10.984-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63146 #124 (82 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:10.988-0400 m31200| 2015-07-09T13:57:10.985-0400 I NETWORK [conn123] end connection 127.0.0.1:63142 (80 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:10.988-0400 m31200| 2015-07-09T13:57:10.987-0400 I COMMAND [conn83] command db24.coll24_out_agg_sort_4 command: aggregate { aggregate: "coll24", pipeline: [ { $mergeCursors: [ { host: "bs-osx108-8:31100", id: 2156538513845 }, { host: "bs-osx108-8:31200", id: 1804123167684 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll24_out_agg_sort_4" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:144 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 28888, W: 6193 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 1, R: 1, W: 2 }, timeAcquiringMicros: { r: 54802, R: 1377439, W: 28508 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 1864ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:10.995-0400 m31200| 2015-07-09T13:57:10.993-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63147 #125 (81 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:11.001-0400 m31200| 2015-07-09T13:57:10.998-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63148 #126 (82 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:11.824-0400 m31200| 2015-07-09T13:57:11.823-0400 I WRITE [conn41] insert db24.tmp.agg_out.39 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 23613 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { R: 1, W: 2 }, timeAcquiringMicros: { R: 456105, W: 46204 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 773ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:11.852-0400 m31200| 2015-07-09T13:57:11.852-0400 I WRITE [conn80] insert db24.tmp.agg_out.38 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 24424 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { r: 1, R: 1, W: 1 }, timeAcquiringMicros: { r: 36908, R: 1040236, W: 1171 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 745ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:11.858-0400 m31200| 2015-07-09T13:57:11.858-0400 I WRITE [conn30] insert db24.tmp.agg_out.40 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 24095 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { r: 1, R: 1, W: 2 }, timeAcquiringMicros: { r: 20197, R: 417931, W: 28383 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 765ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:11.859-0400 m31200| 2015-07-09T13:57:11.858-0400 I COMMAND [conn83] command db24.$cmd command: listCollections { listCollections: 1, filter: { name: "coll24_out_agg_sort_4" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:227 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 1149722 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 649ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:11.859-0400 m31200| 2015-07-09T13:57:11.859-0400 I COMMAND [conn52] command db24.$cmd command: listCollections { listCollections: 1, filter: { name: "coll24_out_agg_sort_1" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:227 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 1146700 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 647ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:11.891-0400 m31201| 2015-07-09T13:57:11.887-0400 I COMMAND [repl writer worker 9] CMD: drop db24.coll24_out_agg_sort_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:11.891-0400 m31202| 2015-07-09T13:57:11.890-0400 I COMMAND [repl writer worker 11] CMD: drop db24.coll24_out_agg_sort_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:11.900-0400 m31201| 2015-07-09T13:57:11.899-0400 I COMMAND [repl writer worker 6] CMD: drop db24.coll24_out_agg_sort_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:11.912-0400 m31202| 2015-07-09T13:57:11.911-0400 I COMMAND [repl writer worker 2] CMD: drop db24.coll24_out_agg_sort_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:11.915-0400 m31201| 2015-07-09T13:57:11.915-0400 I COMMAND [repl writer worker 5] CMD: drop db24.coll24_out_agg_sort_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:11.921-0400 m31202| 2015-07-09T13:57:11.921-0400 I COMMAND [repl writer worker 4] CMD: drop db24.coll24_out_agg_sort_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:11.945-0400 m31200| 2015-07-09T13:57:11.936-0400 I NETWORK [conn80] scoped connection to bs-osx108-8:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:11.946-0400 m31100| 2015-07-09T13:57:11.945-0400 I NETWORK [conn119] end connection 127.0.0.1:63143 (77 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:11.950-0400 m31200| 2015-07-09T13:57:11.948-0400 I NETWORK [conn30] scoped connection to bs-osx108-8:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:11.950-0400 m31200| 2015-07-09T13:57:11.948-0400 I NETWORK [conn80] scoped connection to bs-osx108-8:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:11.950-0400 m31100| 2015-07-09T13:57:11.949-0400 I NETWORK [conn121] end connection 127.0.0.1:63145 (76 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:11.950-0400 m31200| 2015-07-09T13:57:11.949-0400 I NETWORK [conn124] end connection 127.0.0.1:63146 (81 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:11.956-0400 m31100| 2015-07-09T13:57:11.952-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63149 #122 (77 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:11.956-0400 m31200| 2015-07-09T13:57:11.951-0400 I COMMAND [conn80] command db24.coll24_out_agg_sort_3 command: aggregate { aggregate: "coll24", pipeline: [ { $mergeCursors: [ { host: "bs-osx108-8:31100", id: 2157833056356 }, { host: "bs-osx108-8:31200", id: 1804998321809 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll24_out_agg_sort_3" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:144 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 9963, w: 24424, W: 976 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 2, R: 1, W: 1 }, timeAcquiringMicros: { r: 86368, R: 1040236, W: 1171 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 1623ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:11.957-0400 m31200| 2015-07-09T13:57:11.951-0400 I NETWORK [conn41] scoped connection to bs-osx108-8:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:11.957-0400 m31100| 2015-07-09T13:57:11.955-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63150 #123 (78 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:11.960-0400 m31100| 2015-07-09T13:57:11.956-0400 I NETWORK [conn120] end connection 127.0.0.1:63144 (77 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:11.961-0400 m31200| 2015-07-09T13:57:11.961-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63151 #127 (82 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:11.962-0400 m31200| 2015-07-09T13:57:11.961-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63152 #128 (83 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:11.964-0400 m31200| 2015-07-09T13:57:11.962-0400 I NETWORK [conn30] scoped connection to bs-osx108-8:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:11.964-0400 m31200| 2015-07-09T13:57:11.964-0400 I NETWORK [conn41] scoped connection to bs-osx108-8:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:11.965-0400 m31200| 2015-07-09T13:57:11.964-0400 I COMMAND [conn41] command db24.coll24_out_agg_sort_0 command: aggregate { aggregate: "coll24", pipeline: [ { $mergeCursors: [ { host: "bs-osx108-8:31100", id: 2156290296717 }, { host: "bs-osx108-8:31200", id: 1804601554548 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll24_out_agg_sort_0" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:144 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 23613, W: 39884 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 1, R: 1, W: 2 }, timeAcquiringMicros: { r: 41699, R: 456105, W: 46204 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 1553ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:11.965-0400 m31200| 2015-07-09T13:57:11.964-0400 I NETWORK [conn125] end connection 127.0.0.1:63147 (82 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:11.968-0400 m31200| 2015-07-09T13:57:11.967-0400 I COMMAND [conn30] command db24.coll24_out_agg_sort_2 command: aggregate { aggregate: "coll24", pipeline: [ { $mergeCursors: [ { host: "bs-osx108-8:31100", id: 2156719104657 }, { host: "bs-osx108-8:31200", id: 1803922406149 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll24_out_agg_sort_2" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:144 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 6562, w: 24095, W: 4714 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 2, R: 1, W: 2 }, timeAcquiringMicros: { r: 69080, R: 417931, W: 28383 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 1519ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:11.970-0400 m31200| 2015-07-09T13:57:11.969-0400 I NETWORK [conn126] end connection 127.0.0.1:63148 (81 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:12.149-0400 m31200| 2015-07-09T13:57:12.146-0400 I QUERY [conn56] getmore db24.coll24 cursorid:1805803351014 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:5 nreturned:350 reslen:4205970 locks:{ Global: { acquireCount: { r: 12 } }, Database: { acquireCount: { r: 6 } }, Collection: { acquireCount: { r: 6 } } } 116ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:12.624-0400 m31200| 2015-07-09T13:57:12.623-0400 I WRITE [conn52] insert db24.tmp.agg_out.42 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 27088 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { R: 1, W: 2 }, timeAcquiringMicros: { R: 1146700, W: 33426 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 595ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:12.648-0400 m31200| 2015-07-09T13:57:12.647-0400 I WRITE [conn83] insert db24.tmp.agg_out.41 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 27238 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { r: 1, R: 1, W: 1 }, timeAcquiringMicros: { r: 34556, R: 1149722, W: 1451 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 569ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:12.649-0400 m31200| 2015-07-09T13:57:12.647-0400 I COMMAND [conn80] command db24.$cmd command: listCollections { listCollections: 1, filter: { name: "coll24_out_agg_sort_3" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:227 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 342360 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 342ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:12.650-0400 m31200| 2015-07-09T13:57:12.647-0400 I COMMAND [conn30] command db24.$cmd command: listCollections { listCollections: 1, filter: { name: "coll24_out_agg_sort_2" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:227 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 259211 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 259ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:12.655-0400 m31200| 2015-07-09T13:57:12.647-0400 I COMMAND [conn41] command db24.$cmd command: listCollections { listCollections: 1, filter: { name: "coll24_out_agg_sort_0" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:227 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 279292 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 279ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:12.670-0400 m31202| 2015-07-09T13:57:12.669-0400 I COMMAND [repl writer worker 12] CMD: drop db24.coll24_out_agg_sort_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:12.671-0400 m31201| 2015-07-09T13:57:12.671-0400 I COMMAND [repl writer worker 10] CMD: drop db24.coll24_out_agg_sort_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:12.680-0400 m31201| 2015-07-09T13:57:12.680-0400 I COMMAND [repl writer worker 2] CMD: drop db24.coll24_out_agg_sort_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:12.686-0400 m31202| 2015-07-09T13:57:12.685-0400 I COMMAND [repl writer worker 5] CMD: drop db24.coll24_out_agg_sort_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:12.688-0400 m31200| 2015-07-09T13:57:12.688-0400 I NETWORK [conn83] scoped connection to bs-osx108-8:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:12.688-0400 m31100| 2015-07-09T13:57:12.688-0400 I NETWORK [conn122] end connection 127.0.0.1:63149 (76 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:12.700-0400 m31200| 2015-07-09T13:57:12.697-0400 I NETWORK [conn83] scoped connection to bs-osx108-8:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:12.701-0400 m31200| 2015-07-09T13:57:12.697-0400 I COMMAND [conn83] command db24.coll24_out_agg_sort_4 command: aggregate { aggregate: "coll24", pipeline: [ { $mergeCursors: [ { host: "bs-osx108-8:31100", id: 2156137694967 }, { host: "bs-osx108-8:31200", id: 1804766759047 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll24_out_agg_sort_4" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:144 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 27238, W: 4452 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 2, R: 1, W: 1 }, timeAcquiringMicros: { r: 52033, R: 1149722, W: 1451 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 1488ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:12.701-0400 m31200| 2015-07-09T13:57:12.698-0400 I NETWORK [conn128] end connection 127.0.0.1:63152 (80 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:12.701-0400 m31200| 2015-07-09T13:57:12.699-0400 I NETWORK [conn52] scoped connection to bs-osx108-8:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:12.701-0400 m31100| 2015-07-09T13:57:12.700-0400 I NETWORK [conn123] end connection 127.0.0.1:63150 (75 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:12.703-0400 m31200| 2015-07-09T13:57:12.702-0400 I NETWORK [conn52] scoped connection to bs-osx108-8:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:12.703-0400 m31200| 2015-07-09T13:57:12.703-0400 I NETWORK [conn127] end connection 127.0.0.1:63151 (79 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:12.709-0400 m31200| 2015-07-09T13:57:12.703-0400 I COMMAND [conn52] command db24.coll24_out_agg_sort_1 command: aggregate { aggregate: "coll24", pipeline: [ { $mergeCursors: [ { host: "bs-osx108-8:31100", id: 2156431556982 }, { host: "bs-osx108-8:31200", id: 1805148667911 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll24_out_agg_sort_1" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:144 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 8888, w: 27088, W: 17537 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 1, R: 1, W: 2 }, timeAcquiringMicros: { r: 21021, R: 1146700, W: 33426 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 1491ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:12.757-0400 m31100| 2015-07-09T13:57:12.756-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63153 #124 (76 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:12.761-0400 m31100| 2015-07-09T13:57:12.761-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63154 #125 (77 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:12.761-0400 m31100| 2015-07-09T13:57:12.761-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63155 #126 (78 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:12.785-0400 m31200| 2015-07-09T13:57:12.784-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63156 #129 (80 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:12.786-0400 m31200| 2015-07-09T13:57:12.786-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63157 #130 (81 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:12.798-0400 m31200| 2015-07-09T13:57:12.797-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63158 #131 (82 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:13.820-0400 m31200| 2015-07-09T13:57:13.819-0400 I WRITE [conn80] insert db24.tmp.agg_out.43 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 19550 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { r: 1, R: 1, W: 1 }, timeAcquiringMicros: { r: 52557, R: 342360, W: 12400 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 948ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:13.863-0400 m31200| 2015-07-09T13:57:13.862-0400 I WRITE [conn41] insert db24.tmp.agg_out.44 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 18844 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { R: 1, W: 2 }, timeAcquiringMicros: { R: 279292, W: 60531 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 962ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:13.875-0400 m31200| 2015-07-09T13:57:13.874-0400 I WRITE [conn30] insert db24.tmp.agg_out.45 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 19450 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { r: 1, R: 1, W: 2 }, timeAcquiringMicros: { r: 21100, R: 259211, W: 31976 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 997ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:13.875-0400 m31200| 2015-07-09T13:57:13.874-0400 I COMMAND [conn52] command db24.$cmd command: listCollections { listCollections: 1, filter: { name: "coll24_out_agg_sort_1" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:227 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 1366663 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 866ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:13.876-0400 m31200| 2015-07-09T13:57:13.874-0400 I COMMAND [conn83] command db24.$cmd command: listCollections { listCollections: 1, filter: { name: "coll24_out_agg_sort_4" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:227 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 1394269 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 893ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:13.924-0400 m31201| 2015-07-09T13:57:13.922-0400 I COMMAND [repl writer worker 4] CMD: drop db24.coll24_out_agg_sort_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:13.927-0400 m31202| 2015-07-09T13:57:13.922-0400 I COMMAND [repl writer worker 5] CMD: drop db24.coll24_out_agg_sort_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:13.937-0400 m31202| 2015-07-09T13:57:13.936-0400 I COMMAND [repl writer worker 10] CMD: drop db24.coll24_out_agg_sort_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:13.943-0400 m31201| 2015-07-09T13:57:13.943-0400 I COMMAND [repl writer worker 0] CMD: drop db24.coll24_out_agg_sort_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:13.946-0400 m31202| 2015-07-09T13:57:13.946-0400 I COMMAND [repl writer worker 15] CMD: drop db24.coll24_out_agg_sort_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:13.954-0400 m31201| 2015-07-09T13:57:13.953-0400 I COMMAND [repl writer worker 6] CMD: drop db24.coll24_out_agg_sort_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:13.982-0400 m31200| 2015-07-09T13:57:13.981-0400 I COMMAND [conn83] command db24.tmp.agg_out.46 command: create { create: "tmp.agg_out.46", temp: true } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:97 locks:{ Global: { acquireCount: { r: 8, w: 2 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 33612 } }, Database: { acquireCount: { r: 2, w: 1, R: 1, W: 1 }, acquireWaitCount: { R: 1, W: 1 }, timeAcquiringMicros: { R: 1394269, W: 41823 } }, Collection: { acquireCount: { r: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 106ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:13.982-0400 m31200| 2015-07-09T13:57:13.981-0400 I NETWORK [conn80] scoped connection to bs-osx108-8:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:13.983-0400 m31100| 2015-07-09T13:57:13.983-0400 I NETWORK [conn124] end connection 127.0.0.1:63153 (77 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:13.996-0400 m31200| 2015-07-09T13:57:13.992-0400 I NETWORK [conn41] scoped connection to bs-osx108-8:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:13.997-0400 m31100| 2015-07-09T13:57:13.996-0400 I NETWORK [conn126] end connection 127.0.0.1:63155 (76 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:13.999-0400 m31200| 2015-07-09T13:57:13.998-0400 I NETWORK [conn80] scoped connection to bs-osx108-8:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:14.000-0400 m31200| 2015-07-09T13:57:13.998-0400 I COMMAND [conn80] command db24.coll24_out_agg_sort_3 command: aggregate { aggregate: "coll24", pipeline: [ { $mergeCursors: [ { host: "bs-osx108-8:31100", id: 2157002044157 }, { host: "bs-osx108-8:31200", id: 1804739390344 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll24_out_agg_sort_3" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:144 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 19550, W: 54519 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 2, R: 1, W: 1 }, timeAcquiringMicros: { r: 119151, R: 342360, W: 12400 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 1693ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:14.000-0400 m31200| 2015-07-09T13:57:13.999-0400 I NETWORK [conn129] end connection 127.0.0.1:63156 (81 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:14.000-0400 m31200| 2015-07-09T13:57:14.000-0400 I NETWORK [conn30] scoped connection to bs-osx108-8:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:14.001-0400 m31100| 2015-07-09T13:57:14.000-0400 I NETWORK [conn125] end connection 127.0.0.1:63154 (75 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:14.006-0400 m31100| 2015-07-09T13:57:14.005-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63159 #127 (76 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:14.013-0400 m31100| 2015-07-09T13:57:14.012-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63160 #128 (77 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:14.016-0400 m31200| 2015-07-09T13:57:14.006-0400 I NETWORK [conn41] scoped connection to bs-osx108-8:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:14.017-0400 m31200| 2015-07-09T13:57:14.007-0400 I NETWORK [conn131] end connection 127.0.0.1:63158 (80 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:14.017-0400 m31200| 2015-07-09T13:57:14.007-0400 I COMMAND [conn41] command db24.coll24_out_agg_sort_0 command: aggregate { aggregate: "coll24", pipeline: [ { $mergeCursors: [ { host: "bs-osx108-8:31100", id: 2157085271630 }, { host: "bs-osx108-8:31200", id: 1803985790128 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll24_out_agg_sort_0" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:144 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 16163, w: 18844, W: 5706 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 1, R: 1, W: 2 }, timeAcquiringMicros: { r: 72802, R: 279292, W: 60531 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 1638ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:14.017-0400 m31200| 2015-07-09T13:57:14.016-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63161 #132 (81 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:14.018-0400 m31200| 2015-07-09T13:57:14.018-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63162 #133 (82 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:14.025-0400 m31200| 2015-07-09T13:57:14.020-0400 I NETWORK [conn30] scoped connection to bs-osx108-8:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:14.025-0400 m31200| 2015-07-09T13:57:14.021-0400 I COMMAND [conn30] command db24.coll24_out_agg_sort_2 command: aggregate { aggregate: "coll24", pipeline: [ { $mergeCursors: [ { host: "bs-osx108-8:31100", id: 2156310355226 }, { host: "bs-osx108-8:31200", id: 1804024954131 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll24_out_agg_sort_2" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:144 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 4877, w: 19450, W: 4383 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 2, R: 1, W: 2 }, timeAcquiringMicros: { r: 96177, R: 259211, W: 31976 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 1632ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:14.025-0400 m31200| 2015-07-09T13:57:14.023-0400 I NETWORK [conn130] end connection 127.0.0.1:63157 (81 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:14.744-0400 m31200| 2015-07-09T13:57:14.743-0400 I WRITE [conn83] insert db24.tmp.agg_out.46 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 33612 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { R: 1, W: 2 }, timeAcquiringMicros: { R: 1394269, W: 55464 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 649ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:14.755-0400 m31200| 2015-07-09T13:57:14.754-0400 I WRITE [conn52] insert db24.tmp.agg_out.47 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 33736 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { r: 1, R: 1, W: 1 }, timeAcquiringMicros: { r: 32879, R: 1366663, W: 9033 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 671ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:14.756-0400 m31200| 2015-07-09T13:57:14.755-0400 I COMMAND [conn41] command db24.$cmd command: listCollections { listCollections: 1, filter: { name: "coll24_out_agg_sort_0" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:227 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 427350 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 427ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:14.756-0400 m31200| 2015-07-09T13:57:14.755-0400 I COMMAND [conn30] command db24.$cmd command: listCollections { listCollections: 1, filter: { name: "coll24_out_agg_sort_2" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:227 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 363175 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 364ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:14.757-0400 m31200| 2015-07-09T13:57:14.756-0400 I COMMAND [conn80] command db24.$cmd command: listCollections { listCollections: 1, filter: { name: "coll24_out_agg_sort_3" }, cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:227 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 2, R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 360608 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_query 361ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:14.818-0400 m31200| 2015-07-09T13:57:14.816-0400 I NETWORK [conn52] scoped connection to bs-osx108-8:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:14.819-0400 m31100| 2015-07-09T13:57:14.816-0400 I NETWORK [conn128] end connection 127.0.0.1:63160 (76 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:14.824-0400 m31200| 2015-07-09T13:57:14.824-0400 I NETWORK [conn83] scoped connection to bs-osx108-8:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:14.824-0400 m31200| 2015-07-09T13:57:14.824-0400 I NETWORK [conn52] scoped connection to bs-osx108-8:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:14.826-0400 m31200| 2015-07-09T13:57:14.825-0400 I COMMAND [conn52] command db24.coll24_out_agg_sort_1 command: aggregate { aggregate: "coll24", pipeline: [ { $mergeCursors: [ { host: "bs-osx108-8:31100", id: 2157119457173 }, { host: "bs-osx108-8:31200", id: 1805893070907 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll24_out_agg_sort_1" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:144 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 6412, w: 33736, W: 35390 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { r: 1, R: 1, W: 1 }, timeAcquiringMicros: { r: 32879, R: 1366663, W: 9033 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 1816ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:14.827-0400 m31100| 2015-07-09T13:57:14.826-0400 I NETWORK [conn127] end connection 127.0.0.1:63159 (75 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:14.827-0400 m31100| 2015-07-09T13:57:14.827-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63164 #129 (76 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:14.830-0400 m31200| 2015-07-09T13:57:14.830-0400 I NETWORK [conn133] end connection 127.0.0.1:63162 (80 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:14.831-0400 m31100| 2015-07-09T13:57:14.830-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63165 #130 (77 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:14.832-0400 m31100| 2015-07-09T13:57:14.831-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63166 #131 (78 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:14.832-0400 m31200| 2015-07-09T13:57:14.832-0400 I NETWORK [conn83] scoped connection to bs-osx108-8:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:14.833-0400 m31200| 2015-07-09T13:57:14.832-0400 I COMMAND [conn83] command db24.coll24_out_agg_sort_4 command: aggregate { aggregate: "coll24", pipeline: [ { $mergeCursors: [ { host: "bs-osx108-8:31100", id: 2156766313387 }, { host: "bs-osx108-8:31200", id: 1804549907661 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll24_out_agg_sort_4" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:144 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 33612, W: 52735 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { R: 1, W: 2 }, timeAcquiringMicros: { R: 1394269, W: 55464 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 1850ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:14.834-0400 m31200| 2015-07-09T13:57:14.832-0400 I NETWORK [conn132] end connection 127.0.0.1:63161 (79 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:14.834-0400 m31200| 2015-07-09T13:57:14.833-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63167 #134 (81 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:14.842-0400 m31200| 2015-07-09T13:57:14.840-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63168 #135 (81 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:14.851-0400 m31200| 2015-07-09T13:57:14.850-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63169 #136 (82 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:14.964-0400 m31202| 2015-07-09T13:57:14.952-0400 I COMMAND [repl writer worker 2] CMD: drop db24.coll24_out_agg_sort_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:14.998-0400 m31202| 2015-07-09T13:57:14.998-0400 I COMMAND [repl writer worker 15] CMD: drop db24.coll24_out_agg_sort_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:15.026-0400 m31201| 2015-07-09T13:57:15.025-0400 I COMMAND [repl writer worker 13] CMD: drop db24.coll24_out_agg_sort_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:15.054-0400 m31201| 2015-07-09T13:57:15.054-0400 I COMMAND [repl writer worker 12] CMD: drop db24.coll24_out_agg_sort_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:15.202-0400 m30999| 2015-07-09T13:57:15.201-0400 I NETWORK [conn152] end connection 127.0.0.1:63061 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:15.327-0400 m30998| 2015-07-09T13:57:15.327-0400 I NETWORK [conn152] end connection 127.0.0.1:63063 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:16.303-0400 m31200| 2015-07-09T13:57:16.301-0400 I WRITE [conn30] insert db24.tmp.agg_out.49 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 28925 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { R: 1, W: 1 }, timeAcquiringMicros: { R: 363175, W: 18477 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 1323ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:16.317-0400 m31200| 2015-07-09T13:57:16.316-0400 I WRITE [conn80] insert db24.tmp.agg_out.50 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 14844 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { R: 1, W: 1 }, timeAcquiringMicros: { R: 360608, W: 30723 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 1358ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:16.338-0400 m31200| 2015-07-09T13:57:16.338-0400 I WRITE [conn41] insert db24.tmp.agg_out.48 ninserted:500 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 512, w: 504 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 41386 } }, Database: { acquireCount: { r: 3, w: 502, R: 1, W: 2 }, acquireWaitCount: { R: 1, W: 1 }, timeAcquiringMicros: { R: 427350, W: 293 } }, Collection: { acquireCount: { r: 3, w: 1 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } 1351ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:16.365-0400 m31200| 2015-07-09T13:57:16.364-0400 I NETWORK [conn80] scoped connection to bs-osx108-8:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:16.366-0400 m31100| 2015-07-09T13:57:16.366-0400 I NETWORK [conn129] end connection 127.0.0.1:63164 (77 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:16.384-0400 m31200| 2015-07-09T13:57:16.376-0400 I NETWORK [conn80] scoped connection to bs-osx108-8:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:16.384-0400 m31200| 2015-07-09T13:57:16.376-0400 I COMMAND [conn80] command db24.coll24_out_agg_sort_3 command: aggregate { aggregate: "coll24", pipeline: [ { $mergeCursors: [ { host: "bs-osx108-8:31100", id: 2156489866252 }, { host: "bs-osx108-8:31200", id: 1804522284016 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll24_out_agg_sort_3" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:144 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 2, W: 1 }, timeAcquiringMicros: { r: 18195, W: 4078 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { R: 1, W: 1 }, timeAcquiringMicros: { R: 360608, W: 30723 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 1982ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:16.385-0400 m31200| 2015-07-09T13:57:16.385-0400 I NETWORK [conn134] end connection 127.0.0.1:63167 (81 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:16.386-0400 m31200| 2015-07-09T13:57:16.385-0400 I NETWORK [conn30] scoped connection to bs-osx108-8:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:16.387-0400 m31201| 2015-07-09T13:57:16.381-0400 I COMMAND [repl writer worker 15] CMD: drop db24.coll24_out_agg_sort_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:16.389-0400 m31100| 2015-07-09T13:57:16.389-0400 I NETWORK [conn131] end connection 127.0.0.1:63166 (76 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:16.389-0400 m31202| 2015-07-09T13:57:16.389-0400 I COMMAND [repl writer worker 5] CMD: drop db24.coll24_out_agg_sort_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:16.390-0400 m31200| 2015-07-09T13:57:16.390-0400 I NETWORK [conn41] scoped connection to bs-osx108-8:31100 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:16.390-0400 m31200| 2015-07-09T13:57:16.390-0400 I NETWORK [conn30] scoped connection to bs-osx108-8:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:16.391-0400 m31100| 2015-07-09T13:57:16.391-0400 I NETWORK [conn130] end connection 127.0.0.1:63165 (75 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:16.392-0400 m31200| 2015-07-09T13:57:16.391-0400 I NETWORK [conn135] end connection 127.0.0.1:63168 (80 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:16.393-0400 m31200| 2015-07-09T13:57:16.393-0400 I COMMAND [conn30] command db24.coll24_out_agg_sort_2 command: aggregate { aggregate: "coll24", pipeline: [ { $mergeCursors: [ { host: "bs-osx108-8:31100", id: 2156301280479 }, { host: "bs-osx108-8:31200", id: 1805852465677 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll24_out_agg_sort_2" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:144 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 2, W: 1 }, timeAcquiringMicros: { r: 40887, W: 3043 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { R: 1, W: 1 }, timeAcquiringMicros: { R: 363175, W: 18477 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 2001ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:16.396-0400 m31201| 2015-07-09T13:57:16.396-0400 I COMMAND [repl writer worker 7] CMD: drop db24.coll24_out_agg_sort_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:16.399-0400 m31200| 2015-07-09T13:57:16.398-0400 I NETWORK [conn41] scoped connection to bs-osx108-8:31200 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:16.399-0400 m31200| 2015-07-09T13:57:16.399-0400 I NETWORK [conn136] end connection 127.0.0.1:63169 (79 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:16.402-0400 m31200| 2015-07-09T13:57:16.400-0400 I COMMAND [conn41] command db24.coll24_out_agg_sort_0 command: aggregate { aggregate: "coll24", pipeline: [ { $mergeCursors: [ { host: "bs-osx108-8:31100", id: 2156892041119 }, { host: "bs-osx108-8:31200", id: 1805330986171 } ] }, { $sort: { rand: 1, $mergePresorted: true } }, { $out: "coll24_out_agg_sort_0" } ], cursor: {} } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:144 locks:{ Global: { acquireCount: { r: 517, w: 506, W: 1 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 41386, W: 2649 } }, Database: { acquireCount: { r: 4, w: 504, R: 1, W: 2 }, acquireWaitCount: { R: 1, W: 1 }, timeAcquiringMicros: { R: 427350, W: 293 } }, Collection: { acquireCount: { r: 4, w: 1 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 2072ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:16.409-0400 m31202| 2015-07-09T13:57:16.407-0400 I COMMAND [repl writer worker 2] CMD: drop db24.coll24_out_agg_sort_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:16.418-0400 m31201| 2015-07-09T13:57:16.417-0400 I COMMAND [repl writer worker 6] CMD: drop db24.coll24_out_agg_sort_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:16.430-0400 m31202| 2015-07-09T13:57:16.429-0400 I COMMAND [repl writer worker 6] CMD: drop db24.coll24_out_agg_sort_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:16.853-0400 m30999| 2015-07-09T13:57:16.852-0400 I NETWORK [conn153] end connection 127.0.0.1:63062 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.052-0400 m30998| 2015-07-09T13:57:17.052-0400 I NETWORK [conn153] end connection 127.0.0.1:63064 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.080-0400 m30999| 2015-07-09T13:57:17.080-0400 I NETWORK [conn154] end connection 127.0.0.1:63065 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.137-0400 m30999| 2015-07-09T13:57:17.136-0400 I COMMAND [conn1] DROP: db24.coll24 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.137-0400 m30999| 2015-07-09T13:57:17.136-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:17.136-0400-559eb5fdca4787b9985d1c81", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464637136), what: "dropCollection.start", ns: "db24.coll24", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.195-0400 m30999| 2015-07-09T13:57:17.194-0400 I SHARDING [conn1] distributed lock 'db24.coll24/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5fdca4787b9985d1c82 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.196-0400 m31100| 2015-07-09T13:57:17.196-0400 I COMMAND [conn38] CMD: drop db24.coll24 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.207-0400 m31200| 2015-07-09T13:57:17.207-0400 I COMMAND [conn64] CMD: drop db24.coll24 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.223-0400 m31202| 2015-07-09T13:57:17.222-0400 I COMMAND [repl writer worker 15] CMD: drop db24.coll24 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.223-0400 m31201| 2015-07-09T13:57:17.223-0400 I COMMAND [repl writer worker 1] CMD: drop db24.coll24 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.272-0400 m31100| 2015-07-09T13:57:17.271-0400 I SHARDING [conn38] remotely refreshing metadata for db24.coll24 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559eb5e8ca4787b9985d1c7f, current metadata version is 2|3||559eb5e8ca4787b9985d1c7f [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.274-0400 m31100| 2015-07-09T13:57:17.273-0400 W SHARDING [conn38] no chunks found when reloading db24.coll24, previous version was 0|0||559eb5e8ca4787b9985d1c7f, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.274-0400 m31100| 2015-07-09T13:57:17.273-0400 I SHARDING [conn38] dropping metadata for db24.coll24 at shard version 2|3||559eb5e8ca4787b9985d1c7f, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.275-0400 m31200| 2015-07-09T13:57:17.275-0400 I SHARDING [conn64] remotely refreshing metadata for db24.coll24 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559eb5e8ca4787b9985d1c7f, current metadata version is 2|5||559eb5e8ca4787b9985d1c7f [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.277-0400 m31200| 2015-07-09T13:57:17.277-0400 W SHARDING [conn64] no chunks found when reloading db24.coll24, previous version was 0|0||559eb5e8ca4787b9985d1c7f, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.278-0400 m31200| 2015-07-09T13:57:17.277-0400 I SHARDING [conn64] dropping metadata for db24.coll24 at shard version 2|5||559eb5e8ca4787b9985d1c7f, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.279-0400 m30999| 2015-07-09T13:57:17.278-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:17.278-0400-559eb5fdca4787b9985d1c83", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464637278), what: "dropCollection", ns: "db24.coll24", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.333-0400 m30999| 2015-07-09T13:57:17.332-0400 I SHARDING [conn1] distributed lock 'db24.coll24/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.390-0400 m30999| 2015-07-09T13:57:17.389-0400 I COMMAND [conn1] DROP: db24.coll24_out_agg_sort_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.390-0400 m30999| 2015-07-09T13:57:17.389-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.390-0400 m31200| 2015-07-09T13:57:17.389-0400 I COMMAND [conn41] CMD: drop db24.coll24_out_agg_sort_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.393-0400 m30999| 2015-07-09T13:57:17.393-0400 I COMMAND [conn1] DROP: db24.coll24_out_agg_sort_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.393-0400 m30999| 2015-07-09T13:57:17.393-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.394-0400 m31200| 2015-07-09T13:57:17.394-0400 I COMMAND [conn41] CMD: drop db24.coll24_out_agg_sort_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.398-0400 m31200| 2015-07-09T13:57:17.397-0400 I COMMAND [conn41] CMD: drop db24.coll24_out_agg_sort_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.399-0400 m30999| 2015-07-09T13:57:17.397-0400 I COMMAND [conn1] DROP: db24.coll24_out_agg_sort_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.399-0400 m30999| 2015-07-09T13:57:17.397-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.401-0400 m30999| 2015-07-09T13:57:17.401-0400 I COMMAND [conn1] DROP: db24.coll24_out_agg_sort_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.401-0400 m30999| 2015-07-09T13:57:17.401-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.402-0400 m31200| 2015-07-09T13:57:17.401-0400 I COMMAND [conn41] CMD: drop db24.coll24_out_agg_sort_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.404-0400 m30999| 2015-07-09T13:57:17.404-0400 I COMMAND [conn1] DROP: db24.coll24_out_agg_sort_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.404-0400 m30999| 2015-07-09T13:57:17.404-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.404-0400 m31202| 2015-07-09T13:57:17.404-0400 I COMMAND [repl writer worker 13] CMD: drop db24.coll24_out_agg_sort_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.406-0400 m31200| 2015-07-09T13:57:17.405-0400 I COMMAND [conn41] CMD: drop db24.coll24_out_agg_sort_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.408-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.409-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.409-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.409-0400 jstests/concurrency/fsm_workloads/agg_sort.js: Workload completed in 19071 ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.409-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.409-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.409-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.410-0400 m30999| 2015-07-09T13:57:17.410-0400 I COMMAND [conn1] DROP: db24.coll24 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.410-0400 m30999| 2015-07-09T13:57:17.410-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.411-0400 m31200| 2015-07-09T13:57:17.411-0400 I COMMAND [conn41] CMD: drop db24.coll24 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.413-0400 m31202| 2015-07-09T13:57:17.412-0400 I COMMAND [repl writer worker 10] CMD: drop db24.coll24_out_agg_sort_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.414-0400 m31201| 2015-07-09T13:57:17.414-0400 I COMMAND [repl writer worker 12] CMD: drop db24.coll24_out_agg_sort_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.414-0400 m31202| 2015-07-09T13:57:17.414-0400 I COMMAND [repl writer worker 12] CMD: drop db24.coll24_out_agg_sort_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.415-0400 m30999| 2015-07-09T13:57:17.415-0400 I COMMAND [conn1] DROP DATABASE: db24 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.416-0400 m30999| 2015-07-09T13:57:17.415-0400 I SHARDING [conn1] DBConfig::dropDatabase: db24 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.416-0400 m30999| 2015-07-09T13:57:17.415-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:17.415-0400-559eb5fdca4787b9985d1c84", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464637415), what: "dropDatabase.start", ns: "db24", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.417-0400 m31202| 2015-07-09T13:57:17.416-0400 I COMMAND [repl writer worker 7] CMD: drop db24.coll24_out_agg_sort_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.417-0400 m31202| 2015-07-09T13:57:17.417-0400 I COMMAND [repl writer worker 4] CMD: drop db24.coll24_out_agg_sort_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.421-0400 m31201| 2015-07-09T13:57:17.420-0400 I COMMAND [repl writer worker 11] CMD: drop db24.coll24_out_agg_sort_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.422-0400 m31201| 2015-07-09T13:57:17.422-0400 I COMMAND [repl writer worker 2] CMD: drop db24.coll24_out_agg_sort_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.423-0400 m31201| 2015-07-09T13:57:17.423-0400 I COMMAND [repl writer worker 8] CMD: drop db24.coll24_out_agg_sort_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.425-0400 m31201| 2015-07-09T13:57:17.424-0400 I COMMAND [repl writer worker 3] CMD: drop db24.coll24_out_agg_sort_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.525-0400 m30999| 2015-07-09T13:57:17.524-0400 I SHARDING [conn1] DBConfig::dropDatabase: db24 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.525-0400 m31200| 2015-07-09T13:57:17.525-0400 I COMMAND [conn66] dropDatabase db24 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.525-0400 m31200| 2015-07-09T13:57:17.525-0400 I COMMAND [conn66] dropDatabase db24 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.526-0400 m30999| 2015-07-09T13:57:17.525-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:17.525-0400-559eb5fdca4787b9985d1c85", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464637525), what: "dropDatabase", ns: "db24", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.527-0400 m31201| 2015-07-09T13:57:17.526-0400 I COMMAND [repl writer worker 9] dropDatabase db24 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.527-0400 m31201| 2015-07-09T13:57:17.526-0400 I COMMAND [repl writer worker 9] dropDatabase db24 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.528-0400 m31202| 2015-07-09T13:57:17.527-0400 I COMMAND [repl writer worker 8] dropDatabase db24 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.528-0400 m31202| 2015-07-09T13:57:17.527-0400 I COMMAND [repl writer worker 8] dropDatabase db24 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.599-0400 m31101| 2015-07-09T13:57:17.599-0400 I COMMAND [repl writer worker 0] CMD: drop db24.coll24 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.600-0400 m31102| 2015-07-09T13:57:17.599-0400 I COMMAND [repl writer worker 0] CMD: drop db24.coll24 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.625-0400 m31100| 2015-07-09T13:57:17.625-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.632-0400 m31102| 2015-07-09T13:57:17.629-0400 I COMMAND [repl writer worker 9] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.632-0400 m31101| 2015-07-09T13:57:17.629-0400 I COMMAND [repl writer worker 6] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.674-0400 m31200| 2015-07-09T13:57:17.673-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.675-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.675-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.676-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.676-0400 jstests/concurrency/fsm_workloads/collmod_separate_collections.js [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.676-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.676-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.676-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.684-0400 m30999| 2015-07-09T13:57:17.683-0400 I SHARDING [conn1] distributed lock 'db25/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5fdca4787b9985d1c86 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.689-0400 m31202| 2015-07-09T13:57:17.687-0400 I COMMAND [repl writer worker 3] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.689-0400 m31201| 2015-07-09T13:57:17.688-0400 I COMMAND [repl writer worker 14] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.693-0400 m30999| 2015-07-09T13:57:17.693-0400 I SHARDING [conn1] Placing [db25] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.693-0400 m30999| 2015-07-09T13:57:17.693-0400 I SHARDING [conn1] Enabling sharding for database [db25] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.748-0400 m30999| 2015-07-09T13:57:17.748-0400 I SHARDING [conn1] distributed lock 'db25/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.781-0400 m31100| 2015-07-09T13:57:17.781-0400 I INDEX [conn68] build index on: db25.coll25 properties: { v: 1, key: { createdAt: 1.0 }, name: "createdAt_1", ns: "db25.coll25" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.781-0400 m31100| 2015-07-09T13:57:17.781-0400 I INDEX [conn68] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.804-0400 m31100| 2015-07-09T13:57:17.794-0400 I INDEX [conn68] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.804-0400 m30999| 2015-07-09T13:57:17.799-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db25.coll25", key: { createdAt: 1.0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.804-0400 m30999| 2015-07-09T13:57:17.803-0400 I SHARDING [conn1] distributed lock 'db25.coll25/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb5fdca4787b9985d1c87 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.806-0400 m30999| 2015-07-09T13:57:17.805-0400 I SHARDING [conn1] enable sharding on: db25.coll25 with shard key: { createdAt: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.807-0400 m30999| 2015-07-09T13:57:17.805-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:17.805-0400-559eb5fdca4787b9985d1c88", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464637805), what: "shardCollection.start", ns: "db25.coll25", details: { shardKey: { createdAt: 1.0 }, collection: "db25.coll25", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 1 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.817-0400 m31101| 2015-07-09T13:57:17.816-0400 I INDEX [repl writer worker 4] build index on: db25.coll25 properties: { v: 1, key: { createdAt: 1.0 }, name: "createdAt_1", ns: "db25.coll25" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.817-0400 m31101| 2015-07-09T13:57:17.816-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.821-0400 m31102| 2015-07-09T13:57:17.820-0400 I INDEX [repl writer worker 1] build index on: db25.coll25 properties: { v: 1, key: { createdAt: 1.0 }, name: "createdAt_1", ns: "db25.coll25" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.822-0400 m31102| 2015-07-09T13:57:17.820-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.827-0400 m31101| 2015-07-09T13:57:17.826-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.830-0400 m31102| 2015-07-09T13:57:17.830-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.861-0400 m30999| 2015-07-09T13:57:17.861-0400 I SHARDING [conn1] going to create 1 chunk(s) for: db25.coll25 using new epoch 559eb5fdca4787b9985d1c89 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.916-0400 m30999| 2015-07-09T13:57:17.915-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db25.coll25: 0ms sequenceNumber: 115 version: 1|0||559eb5fdca4787b9985d1c89 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.972-0400 m30999| 2015-07-09T13:57:17.972-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db25.coll25: 0ms sequenceNumber: 116 version: 1|0||559eb5fdca4787b9985d1c89 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.975-0400 m31100| 2015-07-09T13:57:17.974-0400 I SHARDING [conn52] remotely refreshing metadata for db25.coll25 with requested shard version 1|0||559eb5fdca4787b9985d1c89, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.977-0400 m31100| 2015-07-09T13:57:17.976-0400 I SHARDING [conn52] collection db25.coll25 was previously unsharded, new metadata loaded with shard version 1|0||559eb5fdca4787b9985d1c89 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.977-0400 m31100| 2015-07-09T13:57:17.976-0400 I SHARDING [conn52] collection version was loaded at version 1|0||559eb5fdca4787b9985d1c89, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:17.977-0400 m30999| 2015-07-09T13:57:17.977-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:17.977-0400-559eb5fdca4787b9985d1c8a", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464637977), what: "shardCollection", ns: "db25.coll25", details: { version: "1|0||559eb5fdca4787b9985d1c89" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:18.032-0400 m30999| 2015-07-09T13:57:18.031-0400 I SHARDING [conn1] distributed lock 'db25.coll25/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:18.032-0400 Using 10 threads (requested 10) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:18.194-0400 m30998| 2015-07-09T13:57:18.194-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63171 #154 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:18.195-0400 m30999| 2015-07-09T13:57:18.195-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63173 #155 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:18.195-0400 m30998| 2015-07-09T13:57:18.195-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63172 #155 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:18.196-0400 m30999| 2015-07-09T13:57:18.195-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63174 #156 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:18.199-0400 m30999| 2015-07-09T13:57:18.199-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63175 #157 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:18.200-0400 m30999| 2015-07-09T13:57:18.200-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63176 #158 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:18.201-0400 m30998| 2015-07-09T13:57:18.200-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63177 #156 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:18.207-0400 m30998| 2015-07-09T13:57:18.207-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63178 #157 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:18.207-0400 m30998| 2015-07-09T13:57:18.207-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63179 #158 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:18.208-0400 m30999| 2015-07-09T13:57:18.208-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63180 #159 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:18.217-0400 setting random seed: 9349722843617 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:18.217-0400 setting random seed: 791905848309 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:18.218-0400 setting random seed: 3166739372536 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:18.218-0400 setting random seed: 9561598729342 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:18.227-0400 setting random seed: 9463280159980 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:18.228-0400 setting random seed: 3388659004122 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:18.234-0400 setting random seed: 1963435667566 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:18.238-0400 setting random seed: 1500682062469 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:18.248-0400 setting random seed: 1789918886497 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:18.250-0400 setting random seed: 4634467819705 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:18.252-0400 m30998| 2015-07-09T13:57:18.251-0400 I SHARDING [conn154] ChunkManager: time to load chunks for db25.coll25: 0ms sequenceNumber: 29 version: 1|0||559eb5fdca4787b9985d1c89 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:18.397-0400 m31100| 2015-07-09T13:57:18.396-0400 I WRITE [conn27] insert db25.collmod_separate_collections_6 query: { _id: ObjectId('559eb5feeac5440bf8d22ebc'), createdAt: new Date(1436464638220) } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 3, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 17532, W: 61301 } }, Collection: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 2 } }, oplog: { acquireCount: { w: 2 } } } 110ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:18.432-0400 m31100| 2015-07-09T13:57:18.430-0400 I WRITE [conn31] insert db25.collmod_separate_collections_2 query: { _id: ObjectId('559eb5feeac5440bf8d23bc0'), createdAt: new Date(1436464638238) } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 3, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 93199 } }, Collection: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 2 } }, oplog: { acquireCount: { w: 2 } } } 127ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:18.469-0400 m31100| 2015-07-09T13:57:18.468-0400 I WRITE [conn70] insert db25.collmod_separate_collections_1 query: { _id: ObjectId('559eb5feeac5440bf8d23bbf'), createdAt: new Date(1436464638240) } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 3, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 5876, W: 127278 } }, Collection: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 2 } }, oplog: { acquireCount: { w: 2 } } } 170ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:18.495-0400 m31100| 2015-07-09T13:57:18.494-0400 I WRITE [conn30] insert db25.collmod_separate_collections_9 query: { _id: ObjectId('559eb5feeac5440bf8d242aa'), createdAt: new Date(1436464638230) } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 3, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 2492, W: 164716 } }, Collection: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 2 } }, oplog: { acquireCount: { w: 2 } } } 193ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:18.522-0400 m31100| 2015-07-09T13:57:18.520-0400 I WRITE [conn67] insert db25.collmod_separate_collections_4 query: { _id: ObjectId('559eb5feeac5440bf8d22576'), createdAt: new Date(1436464638219) } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 3, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 40984, W: 190948 } }, Collection: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 2 } }, oplog: { acquireCount: { w: 2 } } } 258ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:18.550-0400 m31100| 2015-07-09T13:57:18.549-0400 I WRITE [conn24] insert db25.collmod_separate_collections_0 query: { _id: ObjectId('559eb5feeac5440bf8d23064'), createdAt: new Date(1436464638252) } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 3, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 2442, W: 216814 } }, Collection: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 2 } }, oplog: { acquireCount: { w: 2 } } } 247ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:18.551-0400 m31100| 2015-07-09T13:57:18.550-0400 I WRITE [conn27] insert db25.collmod_separate_collections_6 query: { _id: ObjectId('559eb5feeac5440bf8d22ebd'), createdAt: new Date(1436464638221) } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 6, w: 6 } }, Database: { acquireCount: { w: 5, W: 1 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 170085, W: 61301 } }, Collection: { acquireCount: { w: 2, W: 1 } }, Metadata: { acquireCount: { w: 3 } }, oplog: { acquireCount: { w: 3 } } } 152ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:18.552-0400 m31100| 2015-07-09T13:57:18.551-0400 I WRITE [conn16] insert db25.collmod_separate_collections_5 query: { _id: ObjectId('559eb5feeac5440bf8d22d48'), createdAt: new Date(1436464638220) } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 8, w: 8 } }, Database: { acquireCount: { w: 7, W: 1 }, acquireWaitCount: { w: 3, W: 1 }, timeAcquiringMicros: { w: 243658, W: 128 } }, Collection: { acquireCount: { w: 3, W: 1 } }, Metadata: { acquireCount: { w: 4 } }, oplog: { acquireCount: { w: 4 } } } 185ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:18.554-0400 m31100| 2015-07-09T13:57:18.552-0400 I WRITE [conn68] insert db25.collmod_separate_collections_3 query: { _id: ObjectId('559eb5feeac5440bf8d22960'), createdAt: new Date(1436464638231) } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 8, w: 8 } }, Database: { acquireCount: { w: 7, W: 1 }, acquireWaitCount: { w: 2 }, timeAcquiringMicros: { w: 247602 } }, Collection: { acquireCount: { w: 3, W: 1 } }, Metadata: { acquireCount: { w: 4 } }, oplog: { acquireCount: { w: 4 } } } 187ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:18.554-0400 m31100| 2015-07-09T13:57:18.552-0400 I WRITE [conn26] insert db25.collmod_separate_collections_8 query: { _id: ObjectId('559eb5feeac5440bf8d23666'), createdAt: new Date(1436464638223) } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 6, w: 6 } }, Database: { acquireCount: { w: 5, W: 1 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 199831, W: 25768 } }, Collection: { acquireCount: { w: 2, W: 1 } }, Metadata: { acquireCount: { w: 3 } }, oplog: { acquireCount: { w: 3 } } } 187ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:18.555-0400 m31100| 2015-07-09T13:57:18.554-0400 I WRITE [conn31] insert db25.collmod_separate_collections_2 query: { _id: ObjectId('559eb5feeac5440bf8d23bc1'), createdAt: new Date(1436464638240) } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 6, w: 6 } }, Database: { acquireCount: { w: 5, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 123137, W: 93199 } }, Collection: { acquireCount: { w: 2, W: 1 } }, Metadata: { acquireCount: { w: 3 } }, oplog: { acquireCount: { w: 3 } } } 123ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:18.581-0400 m31100| 2015-07-09T13:57:18.579-0400 I WRITE [conn22] insert db25.collmod_separate_collections_7 query: { _id: ObjectId('559eb5feeac5440bf8d2489e'), createdAt: new Date(1436464638251) } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 3, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 60119, W: 190060 } }, Collection: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 2 } }, oplog: { acquireCount: { w: 2 } } } 274ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:21.730-0400 m31100| 2015-07-09T13:57:21.729-0400 I COMMAND [conn31] command db25.$cmd command: insert { insert: "collmod_separate_collections_2", documents: 1000, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 1137, w: 1137 } }, Database: { acquireCount: { w: 1136, W: 1 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 157600, W: 93199 } }, Collection: { acquireCount: { w: 135, W: 1 } }, Metadata: { acquireCount: { w: 1001 } }, oplog: { acquireCount: { w: 1001 } } } protocol:op_command 3426ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:21.738-0400 m31100| 2015-07-09T13:57:21.738-0400 I COMMAND [conn67] command db25.$cmd command: insert { insert: "collmod_separate_collections_4", documents: 1000, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 1138, w: 1138 } }, Database: { acquireCount: { w: 1137, W: 1 }, acquireWaitCount: { w: 3, W: 1 }, timeAcquiringMicros: { w: 100606, W: 190948 } }, Collection: { acquireCount: { w: 136, W: 1 } }, Metadata: { acquireCount: { w: 1001 } }, oplog: { acquireCount: { w: 1001 } } } protocol:op_command 3475ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:21.771-0400 m31100| 2015-07-09T13:57:21.771-0400 I INDEX [conn72] build index on: db25.collmod_separate_collections_2 properties: { v: 1, key: { createdAt: 1.0 }, name: "createdAt_1", ns: "db25.collmod_separate_collections_2", expireAfterSeconds: 3600.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:21.777-0400 m31100| 2015-07-09T13:57:21.771-0400 I INDEX [conn72] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:21.791-0400 m31100| 2015-07-09T13:57:21.790-0400 I INDEX [conn72] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:21.796-0400 m31101| 2015-07-09T13:57:21.795-0400 I INDEX [repl writer worker 3] build index on: db25.collmod_separate_collections_2 properties: { v: 1, key: { createdAt: 1.0 }, name: "createdAt_1", ns: "db25.collmod_separate_collections_2", expireAfterSeconds: 3600.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:21.796-0400 m31101| 2015-07-09T13:57:21.795-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:21.797-0400 m31100| 2015-07-09T13:57:21.795-0400 I INDEX [conn46] build index on: db25.collmod_separate_collections_4 properties: { v: 1, key: { createdAt: 1.0 }, name: "createdAt_1", ns: "db25.collmod_separate_collections_4", expireAfterSeconds: 3600.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:21.797-0400 m31100| 2015-07-09T13:57:21.795-0400 I INDEX [conn46] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:21.808-0400 m31101| 2015-07-09T13:57:21.808-0400 I INDEX [repl writer worker 3] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:21.817-0400 m31102| 2015-07-09T13:57:21.815-0400 I INDEX [repl writer worker 6] build index on: db25.collmod_separate_collections_2 properties: { v: 1, key: { createdAt: 1.0 }, name: "createdAt_1", ns: "db25.collmod_separate_collections_2", expireAfterSeconds: 3600.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:21.817-0400 m31102| 2015-07-09T13:57:21.816-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:21.824-0400 m31100| 2015-07-09T13:57:21.823-0400 I INDEX [conn46] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:21.851-0400 m31102| 2015-07-09T13:57:21.851-0400 I INDEX [repl writer worker 6] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:21.862-0400 m31101| 2015-07-09T13:57:21.862-0400 I INDEX [repl writer worker 8] build index on: db25.collmod_separate_collections_4 properties: { v: 1, key: { createdAt: 1.0 }, name: "createdAt_1", ns: "db25.collmod_separate_collections_4", expireAfterSeconds: 3600.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:21.862-0400 m31101| 2015-07-09T13:57:21.862-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:21.870-0400 m31102| 2015-07-09T13:57:21.870-0400 I INDEX [repl writer worker 9] build index on: db25.collmod_separate_collections_4 properties: { v: 1, key: { createdAt: 1.0 }, name: "createdAt_1", ns: "db25.collmod_separate_collections_4", expireAfterSeconds: 3600.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:21.870-0400 m31102| 2015-07-09T13:57:21.870-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:21.879-0400 m31101| 2015-07-09T13:57:21.879-0400 I INDEX [repl writer worker 8] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:21.903-0400 m31102| 2015-07-09T13:57:21.902-0400 I INDEX [repl writer worker 9] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:21.966-0400 m31100| 2015-07-09T13:57:21.965-0400 I COMMAND [conn24] command db25.$cmd command: insert { insert: "collmod_separate_collections_0", documents: 1000, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 1142, w: 1142 } }, Database: { acquireCount: { w: 1141, W: 1 }, acquireWaitCount: { w: 9, W: 1 }, timeAcquiringMicros: { w: 181840, W: 216814 } }, Collection: { acquireCount: { w: 140, W: 1 } }, Metadata: { acquireCount: { w: 1001 } }, oplog: { acquireCount: { w: 1001 } } } protocol:op_command 3664ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:21.987-0400 m31100| 2015-07-09T13:57:21.986-0400 I COMMAND [conn70] command db25.$cmd command: insert { insert: "collmod_separate_collections_1", documents: 1000, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 1147, w: 1147 } }, Database: { acquireCount: { w: 1146, W: 1 }, acquireWaitCount: { w: 11, W: 1 }, timeAcquiringMicros: { w: 210137, W: 127278 } }, Collection: { acquireCount: { w: 145, W: 1 } }, Metadata: { acquireCount: { w: 1001 } }, oplog: { acquireCount: { w: 1001 } } } protocol:op_command 3690ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.003-0400 m31100| 2015-07-09T13:57:22.003-0400 I INDEX [conn46] build index on: db25.collmod_separate_collections_0 properties: { v: 1, key: { createdAt: 1.0 }, name: "createdAt_1", ns: "db25.collmod_separate_collections_0", expireAfterSeconds: 3600.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.003-0400 m31100| 2015-07-09T13:57:22.003-0400 I INDEX [conn46] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.016-0400 m31100| 2015-07-09T13:57:22.015-0400 I INDEX [conn46] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.030-0400 m31100| 2015-07-09T13:57:22.029-0400 I INDEX [conn52] build index on: db25.collmod_separate_collections_1 properties: { v: 1, key: { createdAt: 1.0 }, name: "createdAt_1", ns: "db25.collmod_separate_collections_1", expireAfterSeconds: 3600.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.031-0400 m31100| 2015-07-09T13:57:22.029-0400 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.039-0400 m31101| 2015-07-09T13:57:22.037-0400 I INDEX [repl writer worker 12] build index on: db25.collmod_separate_collections_0 properties: { v: 1, key: { createdAt: 1.0 }, name: "createdAt_1", ns: "db25.collmod_separate_collections_0", expireAfterSeconds: 3600.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.039-0400 m31101| 2015-07-09T13:57:22.037-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.040-0400 m31102| 2015-07-09T13:57:22.037-0400 I INDEX [repl writer worker 8] build index on: db25.collmod_separate_collections_0 properties: { v: 1, key: { createdAt: 1.0 }, name: "createdAt_1", ns: "db25.collmod_separate_collections_0", expireAfterSeconds: 3600.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.040-0400 m31102| 2015-07-09T13:57:22.037-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.052-0400 m31101| 2015-07-09T13:57:22.052-0400 I INDEX [repl writer worker 12] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.052-0400 m31100| 2015-07-09T13:57:22.052-0400 I INDEX [conn52] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.059-0400 m31102| 2015-07-09T13:57:22.058-0400 I INDEX [repl writer worker 8] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.066-0400 m31101| 2015-07-09T13:57:22.066-0400 I INDEX [repl writer worker 1] build index on: db25.collmod_separate_collections_1 properties: { v: 1, key: { createdAt: 1.0 }, name: "createdAt_1", ns: "db25.collmod_separate_collections_1", expireAfterSeconds: 3600.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.067-0400 m31101| 2015-07-09T13:57:22.066-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.077-0400 m31102| 2015-07-09T13:57:22.076-0400 I INDEX [repl writer worker 14] build index on: db25.collmod_separate_collections_1 properties: { v: 1, key: { createdAt: 1.0 }, name: "createdAt_1", ns: "db25.collmod_separate_collections_1", expireAfterSeconds: 3600.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.077-0400 m31102| 2015-07-09T13:57:22.076-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.095-0400 m31101| 2015-07-09T13:57:22.095-0400 I INDEX [repl writer worker 1] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.111-0400 m31102| 2015-07-09T13:57:22.111-0400 I INDEX [repl writer worker 14] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.127-0400 m31100| 2015-07-09T13:57:22.126-0400 I COMMAND [conn26] command db25.$cmd command: insert { insert: "collmod_separate_collections_8", documents: 1000, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 1152, w: 1152 } }, Database: { acquireCount: { w: 1151, W: 1 }, acquireWaitCount: { w: 16, W: 1 }, timeAcquiringMicros: { w: 426411, W: 25768 } }, Collection: { acquireCount: { w: 150, W: 1 } }, Metadata: { acquireCount: { w: 1001 } }, oplog: { acquireCount: { w: 1001 } } } protocol:op_command 3835ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.152-0400 m31100| 2015-07-09T13:57:22.152-0400 I INDEX [conn46] build index on: db25.collmod_separate_collections_8 properties: { v: 1, key: { createdAt: 1.0 }, name: "createdAt_1", ns: "db25.collmod_separate_collections_8", expireAfterSeconds: 3600.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.153-0400 m31100| 2015-07-09T13:57:22.152-0400 I INDEX [conn46] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.169-0400 m31100| 2015-07-09T13:57:22.169-0400 I INDEX [conn46] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.178-0400 m31102| 2015-07-09T13:57:22.177-0400 I INDEX [repl writer worker 5] build index on: db25.collmod_separate_collections_8 properties: { v: 1, key: { createdAt: 1.0 }, name: "createdAt_1", ns: "db25.collmod_separate_collections_8", expireAfterSeconds: 3600.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.179-0400 m31102| 2015-07-09T13:57:22.177-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.187-0400 m31101| 2015-07-09T13:57:22.178-0400 I INDEX [repl writer worker 15] build index on: db25.collmod_separate_collections_8 properties: { v: 1, key: { createdAt: 1.0 }, name: "createdAt_1", ns: "db25.collmod_separate_collections_8", expireAfterSeconds: 3600.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.187-0400 m31101| 2015-07-09T13:57:22.178-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.198-0400 m31102| 2015-07-09T13:57:22.190-0400 I INDEX [repl writer worker 5] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.209-0400 m31100| 2015-07-09T13:57:22.208-0400 I COMMAND [conn30] command db25.$cmd command: insert { insert: "collmod_separate_collections_9", documents: 1000, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 1155, w: 1155 } }, Database: { acquireCount: { w: 1154, W: 1 }, acquireWaitCount: { w: 20, W: 1 }, timeAcquiringMicros: { w: 353329, W: 164716 } }, Collection: { acquireCount: { w: 153, W: 1 } }, Metadata: { acquireCount: { w: 1001 } }, oplog: { acquireCount: { w: 1001 } } } protocol:op_command 3907ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.209-0400 m31100| 2015-07-09T13:57:22.208-0400 I COMMAND [conn22] command db25.$cmd command: insert { insert: "collmod_separate_collections_7", documents: 1000, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 1155, w: 1155 } }, Database: { acquireCount: { w: 1154, W: 1 }, acquireWaitCount: { w: 18, W: 1 }, timeAcquiringMicros: { w: 394827, W: 190060 } }, Collection: { acquireCount: { w: 153, W: 1 } }, Metadata: { acquireCount: { w: 1001 } }, oplog: { acquireCount: { w: 1001 } } } protocol:op_command 3903ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.211-0400 m31101| 2015-07-09T13:57:22.211-0400 I INDEX [repl writer worker 15] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.228-0400 m31100| 2015-07-09T13:57:22.227-0400 I COMMAND [conn27] command db25.$cmd command: insert { insert: "collmod_separate_collections_6", documents: 1000, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 1157, w: 1157 } }, Database: { acquireCount: { w: 1156, W: 1 }, acquireWaitCount: { w: 20, W: 1 }, timeAcquiringMicros: { w: 527366, W: 61301 } }, Collection: { acquireCount: { w: 155, W: 1 } }, Metadata: { acquireCount: { w: 1001 } }, oplog: { acquireCount: { w: 1001 } } } protocol:op_command 3942ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.266-0400 m31100| 2015-07-09T13:57:22.264-0400 I INDEX [conn52] build index on: db25.collmod_separate_collections_9 properties: { v: 1, key: { createdAt: 1.0 }, name: "createdAt_1", ns: "db25.collmod_separate_collections_9", expireAfterSeconds: 3600.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.266-0400 m31100| 2015-07-09T13:57:22.264-0400 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.276-0400 m31100| 2015-07-09T13:57:22.275-0400 I INDEX [conn52] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.281-0400 m31102| 2015-07-09T13:57:22.281-0400 I INDEX [repl writer worker 12] build index on: db25.collmod_separate_collections_9 properties: { v: 1, key: { createdAt: 1.0 }, name: "createdAt_1", ns: "db25.collmod_separate_collections_9", expireAfterSeconds: 3600.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.282-0400 m31102| 2015-07-09T13:57:22.281-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.286-0400 m31101| 2015-07-09T13:57:22.286-0400 I INDEX [repl writer worker 15] build index on: db25.collmod_separate_collections_9 properties: { v: 1, key: { createdAt: 1.0 }, name: "createdAt_1", ns: "db25.collmod_separate_collections_9", expireAfterSeconds: 3600.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.286-0400 m31101| 2015-07-09T13:57:22.286-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.290-0400 m31100| 2015-07-09T13:57:22.287-0400 I INDEX [conn20] build index on: db25.collmod_separate_collections_7 properties: { v: 1, key: { createdAt: 1.0 }, name: "createdAt_1", ns: "db25.collmod_separate_collections_7", expireAfterSeconds: 3600.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.290-0400 m31100| 2015-07-09T13:57:22.287-0400 I INDEX [conn20] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.296-0400 m31101| 2015-07-09T13:57:22.294-0400 I INDEX [repl writer worker 15] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.299-0400 m31102| 2015-07-09T13:57:22.299-0400 I INDEX [repl writer worker 12] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.305-0400 m31100| 2015-07-09T13:57:22.305-0400 I INDEX [conn20] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.317-0400 m31100| 2015-07-09T13:57:22.317-0400 I INDEX [conn46] build index on: db25.collmod_separate_collections_6 properties: { v: 1, key: { createdAt: 1.0 }, name: "createdAt_1", ns: "db25.collmod_separate_collections_6", expireAfterSeconds: 3600.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.318-0400 m31100| 2015-07-09T13:57:22.317-0400 I INDEX [conn46] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.321-0400 m31102| 2015-07-09T13:57:22.321-0400 I INDEX [repl writer worker 3] build index on: db25.collmod_separate_collections_7 properties: { v: 1, key: { createdAt: 1.0 }, name: "createdAt_1", ns: "db25.collmod_separate_collections_7", expireAfterSeconds: 3600.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.321-0400 m31102| 2015-07-09T13:57:22.321-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.333-0400 m31101| 2015-07-09T13:57:22.333-0400 I INDEX [repl writer worker 10] build index on: db25.collmod_separate_collections_7 properties: { v: 1, key: { createdAt: 1.0 }, name: "createdAt_1", ns: "db25.collmod_separate_collections_7", expireAfterSeconds: 3600.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.333-0400 m31101| 2015-07-09T13:57:22.333-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.338-0400 m31102| 2015-07-09T13:57:22.338-0400 I INDEX [repl writer worker 3] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.340-0400 m31101| 2015-07-09T13:57:22.339-0400 I INDEX [repl writer worker 10] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.341-0400 m31100| 2015-07-09T13:57:22.341-0400 I INDEX [conn46] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.343-0400 m31100| 2015-07-09T13:57:22.342-0400 I COMMAND [conn46] command db25.$cmd command: createIndexes { createIndexes: "collmod_separate_collections_6", indexes: [ { key: { createdAt: 1.0 }, name: "createdAt_1", expireAfterSeconds: 3600.0 } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 74378 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 108ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.347-0400 m31100| 2015-07-09T13:57:22.344-0400 I COMMAND [conn32] command db25.collmod_separate_collections_2 command: collMod { collMod: "collmod_separate_collections_2", index: { keyPattern: { createdAt: 1.0 }, expireAfterSeconds: 2283.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:161 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 108983 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 109ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.347-0400 m31100| 2015-07-09T13:57:22.345-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63181 #132 (76 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.348-0400 m31100| 2015-07-09T13:57:22.347-0400 I COMMAND [conn35] command db25.collmod_separate_collections_0 command: collMod { collMod: "collmod_separate_collections_0", index: { keyPattern: { createdAt: 1.0 }, expireAfterSeconds: 234.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:161 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 112108 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 112ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.350-0400 m31100| 2015-07-09T13:57:22.349-0400 I COMMAND [conn39] command db25.collmod_separate_collections_8 command: collMod { collMod: "collmod_separate_collections_8", index: { keyPattern: { createdAt: 1.0 }, expireAfterSeconds: 3815.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:161 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 114006 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 115ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.351-0400 m31100| 2015-07-09T13:57:22.350-0400 I WRITE [conn16] insert db25.collmod_separate_collections_5 query: { _id: ObjectId('559eb5feeac5440bf8d235b0'), createdAt: new Date(1436464638241) } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 1156, w: 1156 } }, Database: { acquireCount: { w: 1155, W: 1 }, acquireWaitCount: { w: 23, W: 1 }, timeAcquiringMicros: { w: 744083, W: 128 } }, Collection: { acquireCount: { w: 157, W: 1 } }, Metadata: { acquireCount: { w: 998 } }, oplog: { acquireCount: { w: 998 } } } 107ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.352-0400 m31100| 2015-07-09T13:57:22.351-0400 I WRITE [conn68] insert db25.collmod_separate_collections_3 query: { _id: ObjectId('559eb5feeac5440bf8d22d2e'), createdAt: new Date(1436464638244) } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 1136, w: 1136 } }, Database: { acquireCount: { w: 1135, W: 1 }, acquireWaitCount: { w: 22 }, timeAcquiringMicros: { w: 741226 } }, Collection: { acquireCount: { w: 157, W: 1 } }, Metadata: { acquireCount: { w: 978 } }, oplog: { acquireCount: { w: 978 } } } 107ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.355-0400 m31100| 2015-07-09T13:57:22.353-0400 I COMMAND [conn38] command db25.collmod_separate_collections_1 command: collMod { collMod: "collmod_separate_collections_1", index: { keyPattern: { createdAt: 1.0 }, expireAfterSeconds: 1209.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:161 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 106540 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 108ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.363-0400 m31100| 2015-07-09T13:57:22.363-0400 I COMMAND [conn16] command db25.$cmd command: insert { insert: "collmod_separate_collections_5", documents: 1000, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 1160, w: 1160 } }, Database: { acquireCount: { w: 1159, W: 1 }, acquireWaitCount: { w: 24, W: 1 }, timeAcquiringMicros: { w: 754515, W: 128 } }, Collection: { acquireCount: { w: 158, W: 1 } }, Metadata: { acquireCount: { w: 1001 } }, oplog: { acquireCount: { w: 1001 } } } protocol:op_command 4083ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.373-0400 m31101| 2015-07-09T13:57:22.373-0400 I INDEX [repl writer worker 9] build index on: db25.collmod_separate_collections_6 properties: { v: 1, key: { createdAt: 1.0 }, name: "createdAt_1", ns: "db25.collmod_separate_collections_6", expireAfterSeconds: 3600.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.373-0400 m31101| 2015-07-09T13:57:22.373-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.376-0400 m31100| 2015-07-09T13:57:22.372-0400 I COMMAND [conn68] command db25.$cmd command: insert { insert: "collmod_separate_collections_3", documents: 1000, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 1160, w: 1160 } }, Database: { acquireCount: { w: 1159, W: 1 }, acquireWaitCount: { w: 23 }, timeAcquiringMicros: { w: 749580 } }, Collection: { acquireCount: { w: 158, W: 1 } }, Metadata: { acquireCount: { w: 1001 } }, oplog: { acquireCount: { w: 1001 } } } protocol:op_command 4113ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.383-0400 m31102| 2015-07-09T13:57:22.383-0400 I INDEX [repl writer worker 4] build index on: db25.collmod_separate_collections_6 properties: { v: 1, key: { createdAt: 1.0 }, name: "createdAt_1", ns: "db25.collmod_separate_collections_6", expireAfterSeconds: 3600.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.383-0400 m31102| 2015-07-09T13:57:22.383-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.395-0400 m31101| 2015-07-09T13:57:22.393-0400 I INDEX [repl writer worker 9] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.400-0400 m31102| 2015-07-09T13:57:22.399-0400 I INDEX [repl writer worker 4] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.419-0400 m31100| 2015-07-09T13:57:22.419-0400 I INDEX [conn20] build index on: db25.collmod_separate_collections_5 properties: { v: 1, key: { createdAt: 1.0 }, name: "createdAt_1", ns: "db25.collmod_separate_collections_5", expireAfterSeconds: 3600.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.420-0400 m31100| 2015-07-09T13:57:22.419-0400 I INDEX [conn20] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.431-0400 m31100| 2015-07-09T13:57:22.430-0400 I INDEX [conn20] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.443-0400 m31100| 2015-07-09T13:57:22.443-0400 I INDEX [conn52] build index on: db25.collmod_separate_collections_3 properties: { v: 1, key: { createdAt: 1.0 }, name: "createdAt_1", ns: "db25.collmod_separate_collections_3", expireAfterSeconds: 3600.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.443-0400 m31100| 2015-07-09T13:57:22.443-0400 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.451-0400 m31101| 2015-07-09T13:57:22.450-0400 I INDEX [repl writer worker 12] build index on: db25.collmod_separate_collections_5 properties: { v: 1, key: { createdAt: 1.0 }, name: "createdAt_1", ns: "db25.collmod_separate_collections_5", expireAfterSeconds: 3600.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.451-0400 m31101| 2015-07-09T13:57:22.450-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.462-0400 m31102| 2015-07-09T13:57:22.462-0400 I INDEX [repl writer worker 3] build index on: db25.collmod_separate_collections_5 properties: { v: 1, key: { createdAt: 1.0 }, name: "createdAt_1", ns: "db25.collmod_separate_collections_5", expireAfterSeconds: 3600.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.463-0400 m31102| 2015-07-09T13:57:22.462-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.473-0400 m31101| 2015-07-09T13:57:22.473-0400 I INDEX [repl writer worker 12] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.481-0400 m31100| 2015-07-09T13:57:22.475-0400 I INDEX [conn52] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.483-0400 m31102| 2015-07-09T13:57:22.475-0400 I INDEX [repl writer worker 3] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.489-0400 m31101| 2015-07-09T13:57:22.488-0400 I INDEX [repl writer worker 3] build index on: db25.collmod_separate_collections_3 properties: { v: 1, key: { createdAt: 1.0 }, name: "createdAt_1", ns: "db25.collmod_separate_collections_3", expireAfterSeconds: 3600.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.489-0400 m31101| 2015-07-09T13:57:22.488-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.497-0400 m30998| 2015-07-09T13:57:22.496-0400 I NETWORK [conn154] end connection 127.0.0.1:63171 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.504-0400 m31102| 2015-07-09T13:57:22.497-0400 I INDEX [repl writer worker 0] build index on: db25.collmod_separate_collections_3 properties: { v: 1, key: { createdAt: 1.0 }, name: "createdAt_1", ns: "db25.collmod_separate_collections_3", expireAfterSeconds: 3600.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.504-0400 m31102| 2015-07-09T13:57:22.497-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.518-0400 m31101| 2015-07-09T13:57:22.518-0400 I INDEX [repl writer worker 3] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.523-0400 m30998| 2015-07-09T13:57:22.521-0400 I NETWORK [conn157] end connection 127.0.0.1:63178 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.524-0400 m31102| 2015-07-09T13:57:22.524-0400 I INDEX [repl writer worker 0] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.597-0400 m30998| 2015-07-09T13:57:22.595-0400 I NETWORK [conn158] end connection 127.0.0.1:63179 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.608-0400 m30999| 2015-07-09T13:57:22.607-0400 I NETWORK [conn157] end connection 127.0.0.1:63175 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.690-0400 m30999| 2015-07-09T13:57:22.690-0400 I NETWORK [conn158] end connection 127.0.0.1:63176 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.712-0400 m30998| 2015-07-09T13:57:22.699-0400 I NETWORK [conn156] end connection 127.0.0.1:63177 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.712-0400 m30998| 2015-07-09T13:57:22.699-0400 I NETWORK [conn155] end connection 127.0.0.1:63172 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.728-0400 m30999| 2015-07-09T13:57:22.728-0400 I NETWORK [conn156] end connection 127.0.0.1:63174 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.735-0400 m30999| 2015-07-09T13:57:22.735-0400 I NETWORK [conn159] end connection 127.0.0.1:63180 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.742-0400 m30999| 2015-07-09T13:57:22.740-0400 I NETWORK [conn155] end connection 127.0.0.1:63173 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.767-0400 m30999| 2015-07-09T13:57:22.766-0400 I COMMAND [conn1] DROP: db25.collmod_separate_collections_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.767-0400 m30999| 2015-07-09T13:57:22.766-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.767-0400 m31100| 2015-07-09T13:57:22.766-0400 I COMMAND [conn52] CMD: drop db25.collmod_separate_collections_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.780-0400 m30999| 2015-07-09T13:57:22.778-0400 I COMMAND [conn1] DROP: db25.collmod_separate_collections_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.780-0400 m30999| 2015-07-09T13:57:22.778-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.781-0400 m31102| 2015-07-09T13:57:22.778-0400 I COMMAND [repl writer worker 10] CMD: drop db25.collmod_separate_collections_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.781-0400 m31101| 2015-07-09T13:57:22.780-0400 I COMMAND [repl writer worker 8] CMD: drop db25.collmod_separate_collections_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.782-0400 m31100| 2015-07-09T13:57:22.779-0400 I COMMAND [conn52] CMD: drop db25.collmod_separate_collections_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.785-0400 m30999| 2015-07-09T13:57:22.785-0400 I COMMAND [conn1] DROP: db25.collmod_separate_collections_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.786-0400 m30999| 2015-07-09T13:57:22.785-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.786-0400 m31100| 2015-07-09T13:57:22.786-0400 I COMMAND [conn52] CMD: drop db25.collmod_separate_collections_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.787-0400 m31101| 2015-07-09T13:57:22.787-0400 I COMMAND [repl writer worker 12] CMD: drop db25.collmod_separate_collections_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.788-0400 m31102| 2015-07-09T13:57:22.787-0400 I COMMAND [repl writer worker 3] CMD: drop db25.collmod_separate_collections_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.791-0400 m30999| 2015-07-09T13:57:22.791-0400 I COMMAND [conn1] DROP: db25.collmod_separate_collections_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.792-0400 m30999| 2015-07-09T13:57:22.791-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.792-0400 m31101| 2015-07-09T13:57:22.791-0400 I COMMAND [repl writer worker 2] CMD: drop db25.collmod_separate_collections_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.797-0400 m31100| 2015-07-09T13:57:22.792-0400 I COMMAND [conn52] CMD: drop db25.collmod_separate_collections_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.798-0400 m31102| 2015-07-09T13:57:22.795-0400 I COMMAND [repl writer worker 7] CMD: drop db25.collmod_separate_collections_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.802-0400 m30999| 2015-07-09T13:57:22.802-0400 I COMMAND [conn1] DROP: db25.collmod_separate_collections_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.803-0400 m30999| 2015-07-09T13:57:22.802-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.803-0400 m31100| 2015-07-09T13:57:22.803-0400 I COMMAND [conn52] CMD: drop db25.collmod_separate_collections_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.807-0400 m30999| 2015-07-09T13:57:22.807-0400 I COMMAND [conn1] DROP: db25.collmod_separate_collections_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.808-0400 m30999| 2015-07-09T13:57:22.807-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.808-0400 m31100| 2015-07-09T13:57:22.807-0400 I COMMAND [conn52] CMD: drop db25.collmod_separate_collections_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.811-0400 m30999| 2015-07-09T13:57:22.811-0400 I COMMAND [conn1] DROP: db25.collmod_separate_collections_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.811-0400 m30999| 2015-07-09T13:57:22.811-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.811-0400 m31100| 2015-07-09T13:57:22.811-0400 I COMMAND [conn52] CMD: drop db25.collmod_separate_collections_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.812-0400 m31101| 2015-07-09T13:57:22.812-0400 I COMMAND [repl writer worker 3] CMD: drop db25.collmod_separate_collections_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.813-0400 m31101| 2015-07-09T13:57:22.813-0400 I COMMAND [repl writer worker 5] CMD: drop db25.collmod_separate_collections_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.829-0400 m31102| 2015-07-09T13:57:22.814-0400 I COMMAND [repl writer worker 0] CMD: drop db25.collmod_separate_collections_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.829-0400 m31102| 2015-07-09T13:57:22.815-0400 I COMMAND [repl writer worker 6] CMD: drop db25.collmod_separate_collections_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.829-0400 m31101| 2015-07-09T13:57:22.818-0400 I COMMAND [repl writer worker 1] CMD: drop db25.collmod_separate_collections_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.829-0400 m31102| 2015-07-09T13:57:22.818-0400 I COMMAND [repl writer worker 8] CMD: drop db25.collmod_separate_collections_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.830-0400 m31102| 2015-07-09T13:57:22.821-0400 I COMMAND [repl writer worker 4] CMD: drop db25.collmod_separate_collections_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.830-0400 m31101| 2015-07-09T13:57:22.821-0400 I COMMAND [repl writer worker 11] CMD: drop db25.collmod_separate_collections_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.830-0400 m31100| 2015-07-09T13:57:22.822-0400 I COMMAND [conn52] CMD: drop db25.collmod_separate_collections_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.830-0400 m31100| 2015-07-09T13:57:22.826-0400 I COMMAND [conn52] CMD: drop db25.collmod_separate_collections_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.830-0400 m30999| 2015-07-09T13:57:22.820-0400 I COMMAND [conn1] DROP: db25.collmod_separate_collections_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.830-0400 m30999| 2015-07-09T13:57:22.820-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.830-0400 m30999| 2015-07-09T13:57:22.826-0400 I COMMAND [conn1] DROP: db25.collmod_separate_collections_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.831-0400 m30999| 2015-07-09T13:57:22.826-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.831-0400 m31102| 2015-07-09T13:57:22.830-0400 I COMMAND [repl writer worker 1] CMD: drop db25.collmod_separate_collections_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.834-0400 m31101| 2015-07-09T13:57:22.831-0400 I COMMAND [repl writer worker 9] CMD: drop db25.collmod_separate_collections_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.835-0400 m30999| 2015-07-09T13:57:22.833-0400 I COMMAND [conn1] DROP: db25.collmod_separate_collections_9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.835-0400 m30999| 2015-07-09T13:57:22.833-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.835-0400 m31100| 2015-07-09T13:57:22.833-0400 I COMMAND [conn52] CMD: drop db25.collmod_separate_collections_9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.837-0400 m31102| 2015-07-09T13:57:22.837-0400 I COMMAND [repl writer worker 12] CMD: drop db25.collmod_separate_collections_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.839-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.839-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.839-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.839-0400 jstests/concurrency/fsm_workloads/collmod_separate_collections.js: Workload completed in 4733 ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.839-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.839-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.839-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.839-0400 m30999| 2015-07-09T13:57:22.839-0400 I COMMAND [conn1] DROP: db25.coll25 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.839-0400 m30999| 2015-07-09T13:57:22.839-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:22.839-0400-559eb602ca4787b9985d1c8b", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464642839), what: "dropCollection.start", ns: "db25.coll25", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.842-0400 m31102| 2015-07-09T13:57:22.842-0400 I COMMAND [repl writer worker 14] CMD: drop db25.collmod_separate_collections_9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.846-0400 m31101| 2015-07-09T13:57:22.845-0400 I COMMAND [repl writer worker 10] CMD: drop db25.collmod_separate_collections_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.847-0400 m31101| 2015-07-09T13:57:22.847-0400 I COMMAND [repl writer worker 7] CMD: drop db25.collmod_separate_collections_9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.896-0400 m30999| 2015-07-09T13:57:22.896-0400 I SHARDING [conn1] distributed lock 'db25.coll25/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb602ca4787b9985d1c8c [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.897-0400 m31100| 2015-07-09T13:57:22.897-0400 I COMMAND [conn15] CMD: drop db25.coll25 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.899-0400 m31200| 2015-07-09T13:57:22.899-0400 I COMMAND [conn64] CMD: drop db25.coll25 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.908-0400 m31101| 2015-07-09T13:57:22.908-0400 I COMMAND [repl writer worker 4] CMD: drop db25.coll25 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.909-0400 m31102| 2015-07-09T13:57:22.908-0400 I COMMAND [repl writer worker 15] CMD: drop db25.coll25 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.953-0400 m31100| 2015-07-09T13:57:22.952-0400 I SHARDING [conn15] remotely refreshing metadata for db25.coll25 with requested shard version 0|0||000000000000000000000000, current shard version is 1|0||559eb5fdca4787b9985d1c89, current metadata version is 1|0||559eb5fdca4787b9985d1c89 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.954-0400 m31100| 2015-07-09T13:57:22.954-0400 W SHARDING [conn15] no chunks found when reloading db25.coll25, previous version was 0|0||559eb5fdca4787b9985d1c89, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.954-0400 m31100| 2015-07-09T13:57:22.954-0400 I SHARDING [conn15] dropping metadata for db25.coll25 at shard version 1|0||559eb5fdca4787b9985d1c89, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:22.956-0400 m30999| 2015-07-09T13:57:22.956-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:22.956-0400-559eb602ca4787b9985d1c8d", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464642956), what: "dropCollection", ns: "db25.coll25", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.010-0400 m30999| 2015-07-09T13:57:23.010-0400 I SHARDING [conn1] distributed lock 'db25.coll25/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.066-0400 m30999| 2015-07-09T13:57:23.066-0400 I COMMAND [conn1] DROP DATABASE: db25 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.066-0400 m30999| 2015-07-09T13:57:23.066-0400 I SHARDING [conn1] DBConfig::dropDatabase: db25 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.067-0400 m30999| 2015-07-09T13:57:23.066-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:23.066-0400-559eb603ca4787b9985d1c8e", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464643066), what: "dropDatabase.start", ns: "db25", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.172-0400 m30999| 2015-07-09T13:57:23.172-0400 I SHARDING [conn1] DBConfig::dropDatabase: db25 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.173-0400 m31100| 2015-07-09T13:57:23.172-0400 I COMMAND [conn28] dropDatabase db25 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.173-0400 m31100| 2015-07-09T13:57:23.172-0400 I COMMAND [conn28] dropDatabase db25 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.173-0400 m30999| 2015-07-09T13:57:23.173-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:23.173-0400-559eb603ca4787b9985d1c8f", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464643173), what: "dropDatabase", ns: "db25", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.174-0400 m31102| 2015-07-09T13:57:23.173-0400 I COMMAND [repl writer worker 5] dropDatabase db25 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.174-0400 m31102| 2015-07-09T13:57:23.173-0400 I COMMAND [repl writer worker 5] dropDatabase db25 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.174-0400 m31101| 2015-07-09T13:57:23.174-0400 I COMMAND [repl writer worker 14] dropDatabase db25 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.175-0400 m31101| 2015-07-09T13:57:23.174-0400 I COMMAND [repl writer worker 14] dropDatabase db25 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.258-0400 m31100| 2015-07-09T13:57:23.258-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.264-0400 m31102| 2015-07-09T13:57:23.264-0400 I COMMAND [repl writer worker 11] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.266-0400 m31101| 2015-07-09T13:57:23.266-0400 I COMMAND [repl writer worker 0] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.310-0400 m31200| 2015-07-09T13:57:23.309-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.312-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.313-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.313-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.313-0400 jstests/concurrency/fsm_workloads/reindex_background.js [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.313-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.313-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.313-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.323-0400 m30999| 2015-07-09T13:57:23.322-0400 I SHARDING [conn1] distributed lock 'db26/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb603ca4787b9985d1c90 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.323-0400 m31201| 2015-07-09T13:57:23.323-0400 I COMMAND [repl writer worker 7] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.325-0400 m31202| 2015-07-09T13:57:23.325-0400 I COMMAND [repl writer worker 2] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.329-0400 m30999| 2015-07-09T13:57:23.329-0400 I SHARDING [conn1] Placing [db26] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.330-0400 m30999| 2015-07-09T13:57:23.329-0400 I SHARDING [conn1] Enabling sharding for database [db26] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.385-0400 m30999| 2015-07-09T13:57:23.384-0400 I SHARDING [conn1] distributed lock 'db26/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.415-0400 m31100| 2015-07-09T13:57:23.415-0400 I INDEX [conn68] build index on: db26.coll26 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db26.coll26" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.416-0400 m31100| 2015-07-09T13:57:23.415-0400 I INDEX [conn68] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.423-0400 m31100| 2015-07-09T13:57:23.423-0400 I INDEX [conn68] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.425-0400 m30999| 2015-07-09T13:57:23.425-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db26.coll26", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.433-0400 m30999| 2015-07-09T13:57:23.433-0400 I SHARDING [conn1] distributed lock 'db26.coll26/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb603ca4787b9985d1c91 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.434-0400 m30999| 2015-07-09T13:57:23.433-0400 I SHARDING [conn1] enable sharding on: db26.coll26 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.434-0400 m30999| 2015-07-09T13:57:23.433-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:23.433-0400-559eb603ca4787b9985d1c92", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464643433), what: "shardCollection.start", ns: "db26.coll26", details: { shardKey: { _id: "hashed" }, collection: "db26.coll26", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.445-0400 m31101| 2015-07-09T13:57:23.444-0400 I INDEX [repl writer worker 8] build index on: db26.coll26 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db26.coll26" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.445-0400 m31101| 2015-07-09T13:57:23.444-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.446-0400 m31102| 2015-07-09T13:57:23.444-0400 I INDEX [repl writer worker 10] build index on: db26.coll26 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db26.coll26" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.446-0400 m31102| 2015-07-09T13:57:23.444-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.454-0400 m31102| 2015-07-09T13:57:23.453-0400 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.456-0400 m31101| 2015-07-09T13:57:23.456-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.487-0400 m30999| 2015-07-09T13:57:23.486-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db26.coll26 using new epoch 559eb603ca4787b9985d1c93 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.595-0400 m30999| 2015-07-09T13:57:23.595-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db26.coll26: 1ms sequenceNumber: 117 version: 1|1||559eb603ca4787b9985d1c93 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.653-0400 m30999| 2015-07-09T13:57:23.652-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db26.coll26: 1ms sequenceNumber: 118 version: 1|1||559eb603ca4787b9985d1c93 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.655-0400 m31100| 2015-07-09T13:57:23.654-0400 I SHARDING [conn52] remotely refreshing metadata for db26.coll26 with requested shard version 1|1||559eb603ca4787b9985d1c93, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.656-0400 m31100| 2015-07-09T13:57:23.656-0400 I SHARDING [conn52] collection db26.coll26 was previously unsharded, new metadata loaded with shard version 1|1||559eb603ca4787b9985d1c93 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.657-0400 m31100| 2015-07-09T13:57:23.656-0400 I SHARDING [conn52] collection version was loaded at version 1|1||559eb603ca4787b9985d1c93, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.657-0400 m30999| 2015-07-09T13:57:23.656-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:23.656-0400-559eb603ca4787b9985d1c94", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464643656), what: "shardCollection", ns: "db26.coll26", details: { version: "1|1||559eb603ca4787b9985d1c93" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.711-0400 m30999| 2015-07-09T13:57:23.711-0400 I SHARDING [conn1] distributed lock 'db26.coll26/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.712-0400 m30999| 2015-07-09T13:57:23.712-0400 I SHARDING [conn1] moving chunk ns: db26.coll26 moving ( ns: db26.coll26, shard: test-rs0, lastmod: 1|1||000000000000000000000000, min: { _id: 0 }, max: { _id: MaxKey }) test-rs0 -> test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.713-0400 m31100| 2015-07-09T13:57:23.712-0400 I SHARDING [conn15] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.714-0400 m31100| 2015-07-09T13:57:23.714-0400 I SHARDING [conn15] received moveChunk request: { moveChunk: "db26.coll26", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb603ca4787b9985d1c93') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.718-0400 m31100| 2015-07-09T13:57:23.718-0400 I SHARDING [conn15] distributed lock 'db26.coll26/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb603792e00bb6727493e [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.719-0400 m31100| 2015-07-09T13:57:23.718-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:23.718-0400-559eb603792e00bb6727493f", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464643718), what: "moveChunk.start", ns: "db26.coll26", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.772-0400 m31100| 2015-07-09T13:57:23.771-0400 I SHARDING [conn15] remotely refreshing metadata for db26.coll26 based on current shard version 1|1||559eb603ca4787b9985d1c93, current metadata version is 1|1||559eb603ca4787b9985d1c93 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.774-0400 m31100| 2015-07-09T13:57:23.773-0400 I SHARDING [conn15] metadata of collection db26.coll26 already up to date (shard version : 1|1||559eb603ca4787b9985d1c93, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.774-0400 m31100| 2015-07-09T13:57:23.773-0400 I SHARDING [conn15] moveChunk request accepted at version 1|1||559eb603ca4787b9985d1c93 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.774-0400 m31100| 2015-07-09T13:57:23.774-0400 I SHARDING [conn15] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.775-0400 m31200| 2015-07-09T13:57:23.774-0400 I SHARDING [conn16] remotely refreshing metadata for db26.coll26, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.777-0400 m31200| 2015-07-09T13:57:23.776-0400 I SHARDING [conn16] collection db26.coll26 was previously unsharded, new metadata loaded with shard version 0|0||559eb603ca4787b9985d1c93 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.777-0400 m31200| 2015-07-09T13:57:23.776-0400 I SHARDING [conn16] collection version was loaded at version 1|1||559eb603ca4787b9985d1c93, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.777-0400 m31200| 2015-07-09T13:57:23.777-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: 0 } -> { _id: MaxKey } for collection db26.coll26 from test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 at epoch 559eb603ca4787b9985d1c93 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.779-0400 m31100| 2015-07-09T13:57:23.779-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db26.coll26", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.783-0400 m31100| 2015-07-09T13:57:23.782-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db26.coll26", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.792-0400 m31100| 2015-07-09T13:57:23.791-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db26.coll26", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.801-0400 m31200| 2015-07-09T13:57:23.800-0400 I INDEX [migrateThread] build index on: db26.coll26 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.coll26" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.802-0400 m31200| 2015-07-09T13:57:23.800-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.802-0400 m31100| 2015-07-09T13:57:23.801-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db26.coll26", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.813-0400 m31200| 2015-07-09T13:57:23.812-0400 I INDEX [migrateThread] build index on: db26.coll26 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db26.coll26" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.813-0400 m31200| 2015-07-09T13:57:23.812-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.820-0400 m31100| 2015-07-09T13:57:23.819-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db26.coll26", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.825-0400 m31200| 2015-07-09T13:57:23.825-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.827-0400 m31200| 2015-07-09T13:57:23.827-0400 I SHARDING [migrateThread] Deleter starting delete for: db26.coll26 from { _id: 0 } -> { _id: MaxKey }, with opId: 42003 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.828-0400 m31200| 2015-07-09T13:57:23.828-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db26.coll26 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.842-0400 m31202| 2015-07-09T13:57:23.842-0400 I INDEX [repl writer worker 1] build index on: db26.coll26 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db26.coll26" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.843-0400 m31202| 2015-07-09T13:57:23.842-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.853-0400 m31201| 2015-07-09T13:57:23.852-0400 I INDEX [repl writer worker 4] build index on: db26.coll26 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db26.coll26" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.853-0400 m31201| 2015-07-09T13:57:23.852-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.854-0400 m31100| 2015-07-09T13:57:23.853-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db26.coll26", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.855-0400 m31202| 2015-07-09T13:57:23.855-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.857-0400 m31201| 2015-07-09T13:57:23.856-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.857-0400 m31200| 2015-07-09T13:57:23.857-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.857-0400 m31200| 2015-07-09T13:57:23.857-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db26.coll26' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.919-0400 m31100| 2015-07-09T13:57:23.919-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db26.coll26", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.920-0400 m31100| 2015-07-09T13:57:23.919-0400 I SHARDING [conn15] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.920-0400 m31100| 2015-07-09T13:57:23.920-0400 I SHARDING [conn15] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.920-0400 m31100| 2015-07-09T13:57:23.920-0400 I SHARDING [conn15] moveChunk setting version to: 2|0||559eb603ca4787b9985d1c93 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.928-0400 m31200| 2015-07-09T13:57:23.927-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db26.coll26' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.929-0400 m31200| 2015-07-09T13:57:23.928-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:23.928-0400-559eb603d5a107a5b9c0db1c", server: "bs-osx108-8", clientAddr: "", time: new Date(1436464643928), what: "moveChunk.to", ns: "db26.coll26", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 5: 49, step 2 of 5: 29, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 70, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.982-0400 m31100| 2015-07-09T13:57:23.981-0400 I SHARDING [conn15] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db26.coll26", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.982-0400 m31100| 2015-07-09T13:57:23.981-0400 I SHARDING [conn15] moveChunk updating self version to: 2|1||559eb603ca4787b9985d1c93 through { _id: MinKey } -> { _id: 0 } for collection 'db26.coll26' [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:23.983-0400 m31100| 2015-07-09T13:57:23.983-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:23.983-0400-559eb603792e00bb67274940", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464643983), what: "moveChunk.commit", ns: "db26.coll26", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.037-0400 m31100| 2015-07-09T13:57:24.037-0400 I SHARDING [conn15] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.038-0400 m31100| 2015-07-09T13:57:24.037-0400 I SHARDING [conn15] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.038-0400 m31100| 2015-07-09T13:57:24.037-0400 I SHARDING [conn15] Deleter starting delete for: db26.coll26 from { _id: 0 } -> { _id: MaxKey }, with opId: 30158 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.038-0400 m31100| 2015-07-09T13:57:24.037-0400 I SHARDING [conn15] rangeDeleter deleted 0 documents for db26.coll26 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.038-0400 m31100| 2015-07-09T13:57:24.037-0400 I SHARDING [conn15] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.039-0400 m31100| 2015-07-09T13:57:24.039-0400 I SHARDING [conn15] distributed lock 'db26.coll26/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.040-0400 m31100| 2015-07-09T13:57:24.039-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:24.039-0400-559eb604792e00bb67274941", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464644039), what: "moveChunk.from", ns: "db26.coll26", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 6: 0, step 2 of 6: 59, step 3 of 6: 3, step 4 of 6: 142, step 5 of 6: 118, step 6 of 6: 0, to: "test-rs1", from: "test-rs0", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.095-0400 m31100| 2015-07-09T13:57:24.093-0400 I COMMAND [conn15] command db26.coll26 command: moveChunk { moveChunk: "db26.coll26", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb603ca4787b9985d1c93') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 380ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.097-0400 m30999| 2015-07-09T13:57:24.096-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db26.coll26: 1ms sequenceNumber: 119 version: 2|1||559eb603ca4787b9985d1c93 based on: 1|1||559eb603ca4787b9985d1c93 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.098-0400 m31100| 2015-07-09T13:57:24.098-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db26.coll26", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb603ca4787b9985d1c93') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.104-0400 m31100| 2015-07-09T13:57:24.103-0400 I SHARDING [conn15] distributed lock 'db26.coll26/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb604792e00bb67274942 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.104-0400 m31100| 2015-07-09T13:57:24.103-0400 I SHARDING [conn15] remotely refreshing metadata for db26.coll26 based on current shard version 2|0||559eb603ca4787b9985d1c93, current metadata version is 2|0||559eb603ca4787b9985d1c93 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.106-0400 m31100| 2015-07-09T13:57:24.105-0400 I SHARDING [conn15] updating metadata for db26.coll26 from shard version 2|0||559eb603ca4787b9985d1c93 to shard version 2|1||559eb603ca4787b9985d1c93 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.106-0400 m31100| 2015-07-09T13:57:24.105-0400 I SHARDING [conn15] collection version was loaded at version 2|1||559eb603ca4787b9985d1c93, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.106-0400 m31100| 2015-07-09T13:57:24.105-0400 I SHARDING [conn15] splitChunk accepted at version 2|1||559eb603ca4787b9985d1c93 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.108-0400 m31100| 2015-07-09T13:57:24.107-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:24.107-0400-559eb604792e00bb67274943", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436464644107), what: "split", ns: "db26.coll26", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559eb603ca4787b9985d1c93') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559eb603ca4787b9985d1c93') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.163-0400 m31100| 2015-07-09T13:57:24.162-0400 I SHARDING [conn15] distributed lock 'db26.coll26/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.164-0400 m30999| 2015-07-09T13:57:24.164-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db26.coll26: 0ms sequenceNumber: 120 version: 2|3||559eb603ca4787b9985d1c93 based on: 2|1||559eb603ca4787b9985d1c93 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.165-0400 m31200| 2015-07-09T13:57:24.165-0400 I SHARDING [conn64] received splitChunk request: { splitChunk: "db26.coll26", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb603ca4787b9985d1c93') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.169-0400 m31200| 2015-07-09T13:57:24.169-0400 I SHARDING [conn64] distributed lock 'db26.coll26/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb604d5a107a5b9c0db1d [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.169-0400 m31200| 2015-07-09T13:57:24.169-0400 I SHARDING [conn64] remotely refreshing metadata for db26.coll26 based on current shard version 0|0||559eb603ca4787b9985d1c93, current metadata version is 1|1||559eb603ca4787b9985d1c93 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.171-0400 m31200| 2015-07-09T13:57:24.171-0400 I SHARDING [conn64] updating metadata for db26.coll26 from shard version 0|0||559eb603ca4787b9985d1c93 to shard version 2|0||559eb603ca4787b9985d1c93 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.172-0400 m31200| 2015-07-09T13:57:24.171-0400 I SHARDING [conn64] collection version was loaded at version 2|3||559eb603ca4787b9985d1c93, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.172-0400 m31200| 2015-07-09T13:57:24.171-0400 I SHARDING [conn64] splitChunk accepted at version 2|0||559eb603ca4787b9985d1c93 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.174-0400 m31200| 2015-07-09T13:57:24.173-0400 I SHARDING [conn64] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:24.173-0400-559eb604d5a107a5b9c0db1e", server: "bs-osx108-8", clientAddr: "127.0.0.1:62863", time: new Date(1436464644173), what: "split", ns: "db26.coll26", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559eb603ca4787b9985d1c93') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559eb603ca4787b9985d1c93') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.229-0400 m31200| 2015-07-09T13:57:24.228-0400 I SHARDING [conn64] distributed lock 'db26.coll26/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.231-0400 m30999| 2015-07-09T13:57:24.231-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db26.coll26: 1ms sequenceNumber: 121 version: 2|5||559eb603ca4787b9985d1c93 based on: 2|3||559eb603ca4787b9985d1c93 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.233-0400 Using 15 threads (requested 15) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.392-0400 m30998| 2015-07-09T13:57:24.392-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63182 #159 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.395-0400 m30999| 2015-07-09T13:57:24.395-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63184 #160 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.403-0400 m30998| 2015-07-09T13:57:24.402-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63183 #160 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.410-0400 m30999| 2015-07-09T13:57:24.409-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63185 #161 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.429-0400 m30998| 2015-07-09T13:57:24.429-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63186 #161 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.429-0400 m30999| 2015-07-09T13:57:24.429-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63188 #162 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.430-0400 m30998| 2015-07-09T13:57:24.429-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63187 #162 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.430-0400 m30999| 2015-07-09T13:57:24.430-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63189 #163 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.438-0400 m30998| 2015-07-09T13:57:24.437-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63190 #163 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.438-0400 m30999| 2015-07-09T13:57:24.438-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63191 #164 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.439-0400 m30999| 2015-07-09T13:57:24.438-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63192 #165 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.439-0400 m30999| 2015-07-09T13:57:24.439-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63193 #166 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.446-0400 m30998| 2015-07-09T13:57:24.445-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63194 #164 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.449-0400 m30998| 2015-07-09T13:57:24.448-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63195 #165 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.449-0400 m30998| 2015-07-09T13:57:24.449-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63196 #166 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.460-0400 setting random seed: 2883862308226 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.460-0400 setting random seed: 6411195998080 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.460-0400 setting random seed: 2971189906820 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.461-0400 setting random seed: 6090270923450 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.465-0400 setting random seed: 3128717066720 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.467-0400 setting random seed: 6799351596273 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.469-0400 setting random seed: 9183458094485 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.479-0400 setting random seed: 4347764849662 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.481-0400 setting random seed: 9073836929164 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.484-0400 setting random seed: 1444445727393 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.496-0400 setting random seed: 6702331779524 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.499-0400 setting random seed: 3191908798180 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.516-0400 setting random seed: 6380265881307 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.521-0400 setting random seed: 6788787185214 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.529-0400 setting random seed: 9146275026723 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.598-0400 m30998| 2015-07-09T13:57:24.593-0400 I SHARDING [conn162] ChunkManager: time to load chunks for db26.coll26: 0ms sequenceNumber: 30 version: 2|5||559eb603ca4787b9985d1c93 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.740-0400 m31100| 2015-07-09T13:57:24.738-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63197 #133 (77 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.755-0400 m31100| 2015-07-09T13:57:24.754-0400 I WRITE [conn26] insert db26.reindex_background_8 query: { _id: ObjectId('559eb604eac5440bf8d25a90'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -25.0, -25.0 ] }, integer: 0.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 3, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 29795, W: 56885 } }, Collection: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 2 } }, oplog: { acquireCount: { w: 2 } } } 120ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.782-0400 m31100| 2015-07-09T13:57:24.781-0400 I WRITE [conn67] insert db26.reindex_background_2 query: { _id: ObjectId('559eb604eac5440bf8d26ca2'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -25.0, -25.0 ] }, integer: 0.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 3, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 78114, W: 2471 } }, Collection: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 2 } }, oplog: { acquireCount: { w: 2 } } } 104ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.815-0400 m31100| 2015-07-09T13:57:24.814-0400 I WRITE [conn16] insert db26.reindex_background_11 query: { _id: ObjectId('559eb604eac5440bf8d27605'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -25.0, -25.0 ] }, integer: 0.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 3, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 70777, W: 28544 } }, Collection: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 2 } }, oplog: { acquireCount: { w: 2 } } } 130ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.840-0400 m31100| 2015-07-09T13:57:24.839-0400 I WRITE [conn68] insert db26.reindex_background_9 query: { _id: ObjectId('559eb604eac5440bf8d26407'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -25.0, -25.0 ] }, integer: 0.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 3, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 72715, W: 59854 } }, Collection: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 2 } }, oplog: { acquireCount: { w: 2 } } } 157ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.878-0400 m31100| 2015-07-09T13:57:24.877-0400 I WRITE [conn31] insert db26.reindex_background_6 query: { _id: ObjectId('559eb604eac5440bf8d27398'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -25.0, -25.0 ] }, integer: 0.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 3, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 69609, W: 84202 } }, Collection: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 2 } }, oplog: { acquireCount: { w: 2 } } } 191ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.909-0400 m31100| 2015-07-09T13:57:24.907-0400 I WRITE [conn25] insert db26.reindex_background_10 query: { _id: ObjectId('559eb604eac5440bf8d25716'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -25.0, -25.0 ] }, integer: 0.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 3, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 69175, W: 121353 } }, Collection: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 2 } }, oplog: { acquireCount: { w: 2 } } } 221ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.926-0400 m31100| 2015-07-09T13:57:24.924-0400 I WRITE [conn22] insert db26.reindex_background_7 query: { _id: ObjectId('559eb604eac5440bf8d2597e'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -25.0, -25.0 ] }, integer: 0.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 3, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 67362, W: 152283 } }, Collection: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 2 } }, oplog: { acquireCount: { w: 2 } } } 236ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.953-0400 m31100| 2015-07-09T13:57:24.951-0400 I WRITE [conn70] insert db26.reindex_background_1 query: { _id: ObjectId('559eb604eac5440bf8d25070'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -25.0, -25.0 ] }, integer: 0.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 3, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 50817, W: 169635 } }, Collection: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 2 } }, oplog: { acquireCount: { w: 2 } } } 246ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:24.992-0400 m31100| 2015-07-09T13:57:24.991-0400 I WRITE [conn66] insert db26.reindex_background_0 query: { _id: ObjectId('559eb604eac5440bf8d2748a'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -25.0, -25.0 ] }, integer: 0.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 3, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 51025, W: 195402 } }, Collection: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 2 } }, oplog: { acquireCount: { w: 2 } } } 287ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:25.040-0400 m31100| 2015-07-09T13:57:25.039-0400 I WRITE [conn23] insert db26.reindex_background_5 query: { _id: ObjectId('559eb604eac5440bf8d27f50'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -25.0, -25.0 ] }, integer: 0.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 3, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 35340, W: 236447 } }, Collection: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 2 } }, oplog: { acquireCount: { w: 2 } } } 318ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:25.095-0400 m31100| 2015-07-09T13:57:25.094-0400 I WRITE [conn29] insert db26.reindex_background_13 query: { _id: ObjectId('559eb604eac5440bf8d261a3'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -25.0, -25.0 ] }, integer: 0.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 3, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 44754, W: 284355 } }, Collection: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 2 } }, oplog: { acquireCount: { w: 2 } } } 384ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:25.129-0400 m31100| 2015-07-09T13:57:25.128-0400 I WRITE [conn30] insert db26.reindex_background_3 query: { _id: ObjectId('559eb604eac5440bf8d2541c'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -25.0, -25.0 ] }, integer: 0.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 3, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 55289, W: 338795 } }, Collection: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 2 } }, oplog: { acquireCount: { w: 2 } } } 427ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:25.171-0400 m31100| 2015-07-09T13:57:25.170-0400 I WRITE [conn133] insert db26.reindex_background_12 query: { _id: ObjectId('559eb604eac5440bf8d28338'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -25.0, -25.0 ] }, integer: 0.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 3, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 11009, W: 371972 } }, Collection: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 2 } }, oplog: { acquireCount: { w: 2 } } } 422ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:25.173-0400 m31100| 2015-07-09T13:57:25.171-0400 I WRITE [conn67] insert db26.reindex_background_2 query: { _id: ObjectId('559eb604eac5440bf8d26dcb'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -24.0, -24.0 ] }, integer: 1.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 6, w: 6 } }, Database: { acquireCount: { w: 5, W: 1 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 467148, W: 2471 } }, Collection: { acquireCount: { w: 2, W: 1 } }, Metadata: { acquireCount: { w: 3 } }, oplog: { acquireCount: { w: 3 } } } 390ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:25.174-0400 m31100| 2015-07-09T13:57:25.172-0400 I WRITE [conn26] insert db26.reindex_background_8 query: { _id: ObjectId('559eb604eac5440bf8d25aa3'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -23.0, -23.0 ] }, integer: 2.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 8, w: 8 } }, Database: { acquireCount: { w: 7, W: 1 }, acquireWaitCount: { w: 3, W: 1 }, timeAcquiringMicros: { w: 443131, W: 56885 } }, Collection: { acquireCount: { w: 3, W: 1 } }, Metadata: { acquireCount: { w: 4 } }, oplog: { acquireCount: { w: 4 } } } 387ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:25.175-0400 m31100| 2015-07-09T13:57:25.172-0400 I WRITE [conn16] insert db26.reindex_background_11 query: { _id: ObjectId('559eb604eac5440bf8d27607'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -24.0, -24.0 ] }, integer: 1.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 6, w: 6 } }, Database: { acquireCount: { w: 5, W: 1 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 426492, W: 28544 } }, Collection: { acquireCount: { w: 2, W: 1 } }, Metadata: { acquireCount: { w: 3 } }, oplog: { acquireCount: { w: 3 } } } 357ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:25.175-0400 m31100| 2015-07-09T13:57:25.172-0400 I WRITE [conn31] insert db26.reindex_background_6 query: { _id: ObjectId('559eb604eac5440bf8d27399'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -24.0, -24.0 ] }, integer: 1.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 6, w: 6 } }, Database: { acquireCount: { w: 5, W: 1 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 363262, W: 84202 } }, Collection: { acquireCount: { w: 2, W: 1 } }, Metadata: { acquireCount: { w: 3 } }, oplog: { acquireCount: { w: 3 } } } 295ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:25.176-0400 m31100| 2015-07-09T13:57:25.173-0400 I WRITE [conn23] insert db26.reindex_background_5 query: { _id: ObjectId('559eb604eac5440bf8d27f51'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -24.0, -24.0 ] }, integer: 1.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 6, w: 6 } }, Database: { acquireCount: { w: 5, W: 1 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 167414, W: 236447 } }, Collection: { acquireCount: { w: 2, W: 1 } }, Metadata: { acquireCount: { w: 3 } }, oplog: { acquireCount: { w: 3 } } } 134ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:25.180-0400 m31100| 2015-07-09T13:57:25.177-0400 I WRITE [conn27] insert db26.reindex_background_14 query: { _id: ObjectId('559eb604eac5440bf8d24c8d'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -20.0, -20.0 ] }, integer: 5.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 13, w: 13 } }, Database: { acquireCount: { w: 12, W: 1 }, acquireWaitCount: { w: 3 }, timeAcquiringMicros: { w: 483231 } }, Collection: { acquireCount: { w: 5, W: 1 } }, Metadata: { acquireCount: { w: 7 } }, oplog: { acquireCount: { w: 7 } } } 392ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:25.181-0400 m31100| 2015-07-09T13:57:25.177-0400 I WRITE [conn70] insert db26.reindex_background_1 query: { _id: ObjectId('559eb604eac5440bf8d25071'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -24.0, -24.0 ] }, integer: 1.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 6, w: 6 } }, Database: { acquireCount: { w: 5, W: 1 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 271741, W: 169635 } }, Collection: { acquireCount: { w: 2, W: 1 } }, Metadata: { acquireCount: { w: 3 } }, oplog: { acquireCount: { w: 3 } } } 226ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:25.181-0400 m31100| 2015-07-09T13:57:25.180-0400 I WRITE [conn22] insert db26.reindex_background_7 query: { _id: ObjectId('559eb604eac5440bf8d25985'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -24.0, -24.0 ] }, integer: 1.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 6, w: 6 } }, Database: { acquireCount: { w: 5, W: 1 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 319856, W: 152283 } }, Collection: { acquireCount: { w: 2, W: 1 } }, Metadata: { acquireCount: { w: 3 } }, oplog: { acquireCount: { w: 3 } } } 255ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:25.182-0400 m31100| 2015-07-09T13:57:25.180-0400 I WRITE [conn24] insert db26.reindex_background_4 query: { _id: ObjectId('559eb604eac5440bf8d26792'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -22.0, -22.0 ] }, integer: 3.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 10, w: 10 } }, Database: { acquireCount: { w: 9, W: 1 }, acquireWaitCount: { w: 4, W: 1 }, timeAcquiringMicros: { w: 459146, W: 23193 } }, Collection: { acquireCount: { w: 4, W: 1 } }, Metadata: { acquireCount: { w: 5 } }, oplog: { acquireCount: { w: 5 } } } 396ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:25.183-0400 m31100| 2015-07-09T13:57:25.180-0400 I WRITE [conn25] insert db26.reindex_background_10 query: { _id: ObjectId('559eb604eac5440bf8d2572b'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -24.0, -24.0 ] }, integer: 1.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 6, w: 6 } }, Database: { acquireCount: { w: 5, W: 1 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 333532, W: 121353 } }, Collection: { acquireCount: { w: 2, W: 1 } }, Metadata: { acquireCount: { w: 3 } }, oplog: { acquireCount: { w: 3 } } } 272ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:25.191-0400 m31100| 2015-07-09T13:57:25.181-0400 I WRITE [conn68] insert db26.reindex_background_9 query: { _id: ObjectId('559eb604eac5440bf8d2641b'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -24.0, -24.0 ] }, integer: 1.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 6, w: 6 } }, Database: { acquireCount: { w: 5, W: 1 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 413227, W: 59854 } }, Collection: { acquireCount: { w: 2, W: 1 } }, Metadata: { acquireCount: { w: 3 } }, oplog: { acquireCount: { w: 3 } } } 341ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:25.192-0400 m31100| 2015-07-09T13:57:25.181-0400 I WRITE [conn66] insert db26.reindex_background_0 query: { _id: ObjectId('559eb604eac5440bf8d27490'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -24.0, -24.0 ] }, integer: 1.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 6, w: 6 } }, Database: { acquireCount: { w: 5, W: 1 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 239328, W: 195402 } }, Collection: { acquireCount: { w: 2, W: 1 } }, Metadata: { acquireCount: { w: 3 } }, oplog: { acquireCount: { w: 3 } } } 188ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:30.421-0400 m31100| 2015-07-09T13:57:30.420-0400 I COMMAND [conn24] command db26.$cmd command: insert { insert: "reindex_background_4", documents: 1000, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 1226, w: 1226 } }, Database: { acquireCount: { w: 1225, W: 1 }, acquireWaitCount: { w: 4, W: 1 }, timeAcquiringMicros: { w: 459146, W: 23193 } }, Collection: { acquireCount: { w: 224, W: 1 } }, Metadata: { acquireCount: { w: 1001 } }, oplog: { acquireCount: { w: 1001 } } } protocol:op_command 5759ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:30.463-0400 m31100| 2015-07-09T13:57:30.462-0400 I INDEX [conn46] build index on: db26.reindex_background_4 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_4", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:30.668-0400 m31100| 2015-07-09T13:57:30.667-0400 I COMMAND [conn23] command db26.$cmd command: insert { insert: "reindex_background_5", documents: 1000, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 1229, w: 1229 } }, Database: { acquireCount: { w: 1228, W: 1 }, acquireWaitCount: { w: 3, W: 1 }, timeAcquiringMicros: { w: 188868, W: 236447 } }, Collection: { acquireCount: { w: 227, W: 1 } }, Metadata: { acquireCount: { w: 1001 } }, oplog: { acquireCount: { w: 1001 } } } protocol:op_command 5947ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:30.697-0400 m31100| 2015-07-09T13:57:30.696-0400 I INDEX [conn52] build index on: db26.reindex_background_5 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_5", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:30.709-0400 m31100| 2015-07-09T13:57:30.705-0400 I COMMAND [conn22] command db26.$cmd command: insert { insert: "reindex_background_7", documents: 1000, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 1228, w: 1228 } }, Database: { acquireCount: { w: 1227, W: 1 }, acquireWaitCount: { w: 4, W: 1 }, timeAcquiringMicros: { w: 362381, W: 152283 } }, Collection: { acquireCount: { w: 226, W: 1 } }, Metadata: { acquireCount: { w: 1001 } }, oplog: { acquireCount: { w: 1001 } } } protocol:op_command 6018ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:30.722-0400 m31100| 2015-07-09T13:57:30.722-0400 I COMMAND [conn16] command db26.$cmd command: insert { insert: "reindex_background_11", documents: 1000, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 1226, w: 1226 } }, Database: { acquireCount: { w: 1225, W: 1 }, acquireWaitCount: { w: 4, W: 1 }, timeAcquiringMicros: { w: 472493, W: 28544 } }, Collection: { acquireCount: { w: 224, W: 1 } }, Metadata: { acquireCount: { w: 1001 } }, oplog: { acquireCount: { w: 1001 } } } protocol:op_command 6039ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:30.723-0400 m31100| 2015-07-09T13:57:30.722-0400 I COMMAND [conn68] command db26.$cmd command: insert { insert: "reindex_background_9", documents: 1000, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 1233, w: 1233 } }, Database: { acquireCount: { w: 1232, W: 1 }, acquireWaitCount: { w: 4, W: 1 }, timeAcquiringMicros: { w: 459628, W: 59854 } }, Collection: { acquireCount: { w: 231, W: 1 } }, Metadata: { acquireCount: { w: 1001 } }, oplog: { acquireCount: { w: 1001 } } } protocol:op_command 6041ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:30.734-0400 m31100| 2015-07-09T13:57:30.733-0400 I INDEX [conn20] build index on: db26.reindex_background_7 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_7", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:30.762-0400 m31100| 2015-07-09T13:57:30.761-0400 I INDEX [conn59] build index on: db26.reindex_background_9 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_9", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:30.766-0400 m31100| 2015-07-09T13:57:30.765-0400 I INDEX [conn60] build index on: db26.reindex_background_11 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_11", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:30.789-0400 m31100| 2015-07-09T13:57:30.778-0400 I COMMAND [conn66] command db26.$cmd command: insert { insert: "reindex_background_0", documents: 1000, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 1232, w: 1232 } }, Database: { acquireCount: { w: 1231, W: 1 }, acquireWaitCount: { w: 6, W: 1 }, timeAcquiringMicros: { w: 323851, W: 195402 } }, Collection: { acquireCount: { w: 230, W: 1 } }, Metadata: { acquireCount: { w: 1001 } }, oplog: { acquireCount: { w: 1001 } } } protocol:op_command 6074ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:30.813-0400 m31100| 2015-07-09T13:57:30.811-0400 I COMMAND [conn133] command db26.$cmd command: insert { insert: "reindex_background_12", documents: 1000, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 1231, w: 1231 } }, Database: { acquireCount: { w: 1230, W: 1 }, acquireWaitCount: { w: 5, W: 1 }, timeAcquiringMicros: { w: 87173, W: 371972 } }, Collection: { acquireCount: { w: 229, W: 1 } }, Metadata: { acquireCount: { w: 1001 } }, oplog: { acquireCount: { w: 1001 } } } protocol:op_command 6065ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:30.826-0400 m31100| 2015-07-09T13:57:30.825-0400 I INDEX [conn72] build index on: db26.reindex_background_0 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_0", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:30.830-0400 m31100| 2015-07-09T13:57:30.827-0400 I COMMAND [conn25] command db26.$cmd command: insert { insert: "reindex_background_10", documents: 1000, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 1234, w: 1234 } }, Database: { acquireCount: { w: 1233, W: 1 }, acquireWaitCount: { w: 7, W: 1 }, timeAcquiringMicros: { w: 433416, W: 121353 } }, Collection: { acquireCount: { w: 232, W: 1 } }, Metadata: { acquireCount: { w: 1001 } }, oplog: { acquireCount: { w: 1001 } } } protocol:op_command 6141ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:30.857-0400 m31100| 2015-07-09T13:57:30.856-0400 I INDEX [conn58] build index on: db26.reindex_background_12 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_12", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:30.885-0400 m31100| 2015-07-09T13:57:30.885-0400 I INDEX [conn50] build index on: db26.reindex_background_10 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_10", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:30.902-0400 m31100| 2015-07-09T13:57:30.901-0400 I COMMAND [conn29] command db26.$cmd command: insert { insert: "reindex_background_13", documents: 1000, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 1236, w: 1236 } }, Database: { acquireCount: { w: 1235, W: 1 }, acquireWaitCount: { w: 9, W: 1 }, timeAcquiringMicros: { w: 279353, W: 284355 } }, Collection: { acquireCount: { w: 234, W: 1 } }, Metadata: { acquireCount: { w: 1001 } }, oplog: { acquireCount: { w: 1001 } } } protocol:op_command 6191ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:30.936-0400 m31100| 2015-07-09T13:57:30.936-0400 I INDEX [conn56] build index on: db26.reindex_background_13 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_13", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:30.964-0400 m31100| 2015-07-09T13:57:30.963-0400 I COMMAND [conn31] command db26.$cmd command: insert { insert: "reindex_background_6", documents: 1000, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 1241, w: 1241 } }, Database: { acquireCount: { w: 1240, W: 1 }, acquireWaitCount: { w: 10, W: 1 }, timeAcquiringMicros: { w: 526444, W: 84202 } }, Collection: { acquireCount: { w: 239, W: 1 } }, Metadata: { acquireCount: { w: 1001 } }, oplog: { acquireCount: { w: 1001 } } } protocol:op_command 6278ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:31.012-0400 m31100| 2015-07-09T13:57:31.004-0400 I INDEX [conn48] build index on: db26.reindex_background_6 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_6", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:31.013-0400 m31100| 2015-07-09T13:57:31.012-0400 I COMMAND [conn26] command db26.$cmd command: insert { insert: "reindex_background_8", documents: 1000, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 1248, w: 1248 } }, Database: { acquireCount: { w: 1247, W: 1 }, acquireWaitCount: { w: 12, W: 1 }, timeAcquiringMicros: { w: 632190, W: 56885 } }, Collection: { acquireCount: { w: 246, W: 1 } }, Metadata: { acquireCount: { w: 1001 } }, oplog: { acquireCount: { w: 1001 } } } protocol:op_command 6379ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:31.020-0400 m31100| 2015-07-09T13:57:31.019-0400 I COMMAND [conn70] command db26.$cmd command: insert { insert: "reindex_background_1", documents: 1000, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 1238, w: 1238 } }, Database: { acquireCount: { w: 1237, W: 1 }, acquireWaitCount: { w: 11, W: 1 }, timeAcquiringMicros: { w: 463894, W: 169635 } }, Collection: { acquireCount: { w: 236, W: 1 } }, Metadata: { acquireCount: { w: 1001 } }, oplog: { acquireCount: { w: 1001 } } } protocol:op_command 6315ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:31.050-0400 m31100| 2015-07-09T13:57:31.049-0400 I INDEX [conn33] build index on: db26.reindex_background_8 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_8", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:31.075-0400 m31100| 2015-07-09T13:57:31.074-0400 I INDEX [conn55] build index on: db26.reindex_background_1 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_1", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:31.144-0400 m31100| 2015-07-09T13:57:31.143-0400 I COMMAND [conn27] command db26.$cmd command: insert { insert: "reindex_background_14", documents: 1000, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 1245, w: 1245 } }, Database: { acquireCount: { w: 1244, W: 1 }, acquireWaitCount: { w: 14 }, timeAcquiringMicros: { w: 723047 } }, Collection: { acquireCount: { w: 243, W: 1 } }, Metadata: { acquireCount: { w: 1001 } }, oplog: { acquireCount: { w: 1001 } } } protocol:op_command 6546ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:31.161-0400 m31100| 2015-07-09T13:57:31.156-0400 I COMMAND [conn30] command db26.$cmd command: insert { insert: "reindex_background_3", documents: 1000, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 1248, w: 1248 } }, Database: { acquireCount: { w: 1247, W: 1 }, acquireWaitCount: { w: 13, W: 1 }, timeAcquiringMicros: { w: 329277, W: 338795 } }, Collection: { acquireCount: { w: 246, W: 1 } }, Metadata: { acquireCount: { w: 1001 } }, oplog: { acquireCount: { w: 1001 } } } protocol:op_command 6464ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:31.187-0400 m31100| 2015-07-09T13:57:31.186-0400 I INDEX [conn49] build index on: db26.reindex_background_14 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_14", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:31.192-0400 m31100| 2015-07-09T13:57:31.192-0400 I INDEX [conn57] build index on: db26.reindex_background_3 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_3", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:31.208-0400 m31100| 2015-07-09T13:57:31.207-0400 I COMMAND [conn67] command db26.$cmd command: insert { insert: "reindex_background_2", documents: 1000, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 1247, w: 1247 } }, Database: { acquireCount: { w: 1246, W: 1 }, acquireWaitCount: { w: 14, W: 1 }, timeAcquiringMicros: { w: 701303, W: 2471 } }, Collection: { acquireCount: { w: 245, W: 1 } }, Metadata: { acquireCount: { w: 1001 } }, oplog: { acquireCount: { w: 1001 } } } protocol:op_command 6539ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:31.243-0400 m31100| 2015-07-09T13:57:31.243-0400 I INDEX [conn54] build index on: db26.reindex_background_2 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_2", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.034-0400 m31100| 2015-07-09T13:57:32.034-0400 I INDEX [conn46] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.068-0400 m31100| 2015-07-09T13:57:32.055-0400 I COMMAND [conn46] command db26.$cmd command: createIndexes { createIndexes: "reindex_background_4", indexes: [ { key: { text: "text" }, name: "text_text", background: true } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:64 reslen:173 locks:{ Global: { acquireCount: { r: 66, w: 66 } }, Database: { acquireCount: { w: 66, W: 2 }, acquireWaitCount: { w: 12, W: 2 }, timeAcquiringMicros: { w: 226941, W: 43735 } }, Collection: { acquireCount: { w: 65 } }, Metadata: { acquireCount: { w: 1, W: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 1630ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.085-0400 m31101| 2015-07-09T13:57:32.084-0400 I INDEX [repl index builder 75] build index on: db26.reindex_background_4 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_4", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.085-0400 m31102| 2015-07-09T13:57:32.085-0400 I INDEX [repl index builder 75] build index on: db26.reindex_background_4 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_4", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.095-0400 m31100| 2015-07-09T13:57:32.095-0400 I INDEX [conn46] build index on: db26.reindex_background_4 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_4", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.174-0400 m31100| 2015-07-09T13:57:32.173-0400 I INDEX [conn52] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.180-0400 m31100| 2015-07-09T13:57:32.179-0400 I COMMAND [conn52] command db26.$cmd command: createIndexes { createIndexes: "reindex_background_5", indexes: [ { key: { text: "text" }, name: "text_text", background: true } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:59 reslen:173 locks:{ Global: { acquireCount: { r: 61, w: 61 } }, Database: { acquireCount: { w: 61, W: 2 }, acquireWaitCount: { w: 13, W: 2 }, timeAcquiringMicros: { w: 201127, W: 8551 } }, Collection: { acquireCount: { w: 60 } }, Metadata: { acquireCount: { w: 1, W: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 1507ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.190-0400 m31100| 2015-07-09T13:57:32.189-0400 I INDEX [conn60] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.207-0400 m31100| 2015-07-09T13:57:32.207-0400 I INDEX [conn59] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.215-0400 m31100| 2015-07-09T13:57:32.210-0400 I COMMAND [conn60] command db26.$cmd command: createIndexes { createIndexes: "reindex_background_11", indexes: [ { key: { text: "text" }, name: "text_text", background: true } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:59 reslen:173 locks:{ Global: { acquireCount: { r: 61, w: 61 } }, Database: { acquireCount: { w: 61, W: 2 }, acquireWaitCount: { w: 12, W: 2 }, timeAcquiringMicros: { w: 184974, W: 55290 } }, Collection: { acquireCount: { w: 60 } }, Metadata: { acquireCount: { w: 1, W: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 1483ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.225-0400 m31100| 2015-07-09T13:57:32.223-0400 I INDEX [conn52] build index on: db26.reindex_background_5 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_5", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.225-0400 m31101| 2015-07-09T13:57:32.225-0400 I INDEX [repl index builder 76] build index on: db26.reindex_background_5 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_5", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.245-0400 m31100| 2015-07-09T13:57:32.244-0400 I COMMAND [conn59] command db26.$cmd command: createIndexes { createIndexes: "reindex_background_9", indexes: [ { key: { text: "text" }, name: "text_text", background: true } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:58 reslen:173 locks:{ Global: { acquireCount: { r: 60, w: 60 } }, Database: { acquireCount: { w: 60, W: 2 }, acquireWaitCount: { w: 13, W: 2 }, timeAcquiringMicros: { w: 199282, W: 61142 } }, Collection: { acquireCount: { w: 59 } }, Metadata: { acquireCount: { w: 1, W: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 1518ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.248-0400 m31102| 2015-07-09T13:57:32.248-0400 I INDEX [repl index builder 76] build index on: db26.reindex_background_5 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_5", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.261-0400 m31100| 2015-07-09T13:57:32.261-0400 I INDEX [conn60] build index on: db26.reindex_background_11 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_11", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.267-0400 m31100| 2015-07-09T13:57:32.267-0400 I INDEX [conn48] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.271-0400 m31100| 2015-07-09T13:57:32.271-0400 I INDEX [conn20] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.281-0400 m31100| 2015-07-09T13:57:32.280-0400 I INDEX [conn59] build index on: db26.reindex_background_9 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_9", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.285-0400 m31100| 2015-07-09T13:57:32.285-0400 I INDEX [conn72] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.291-0400 m31100| 2015-07-09T13:57:32.290-0400 I INDEX [conn50] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.291-0400 m31101| 2015-07-09T13:57:32.291-0400 I INDEX [repl index builder 77] build index on: db26.reindex_background_11 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_11", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.292-0400 m31100| 2015-07-09T13:57:32.291-0400 I INDEX [conn58] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.293-0400 m31100| 2015-07-09T13:57:32.292-0400 I INDEX [conn56] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.297-0400 m31100| 2015-07-09T13:57:32.296-0400 I INDEX [conn55] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.308-0400 m31100| 2015-07-09T13:57:32.298-0400 I COMMAND [conn48] command db26.$cmd command: createIndexes { createIndexes: "reindex_background_6", indexes: [ { key: { text: "text" }, name: "text_text", background: true } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:53 reslen:173 locks:{ Global: { acquireCount: { r: 55, w: 55 } }, Database: { acquireCount: { w: 55, W: 2 }, acquireWaitCount: { w: 9, W: 2 }, timeAcquiringMicros: { w: 138878, W: 52640 } }, Collection: { acquireCount: { w: 54 } }, Metadata: { acquireCount: { w: 1, W: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 1329ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.309-0400 m31100| 2015-07-09T13:57:32.299-0400 I COMMAND [conn20] command db26.$cmd command: createIndexes { createIndexes: "reindex_background_7", indexes: [ { key: { text: "text" }, name: "text_text", background: true } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:60 reslen:173 locks:{ Global: { acquireCount: { r: 62, w: 62 } }, Database: { acquireCount: { w: 62, W: 2 }, acquireWaitCount: { w: 15, W: 2 }, timeAcquiringMicros: { w: 256976, W: 45915 } }, Collection: { acquireCount: { w: 61 } }, Metadata: { acquireCount: { w: 1, W: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 1590ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.329-0400 m31100| 2015-07-09T13:57:32.329-0400 I INDEX [conn33] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.330-0400 m31100| 2015-07-09T13:57:32.330-0400 I COMMAND [conn72] command db26.$cmd command: createIndexes { createIndexes: "reindex_background_0", indexes: [ { key: { text: "text" }, name: "text_text", background: true } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:59 reslen:173 locks:{ Global: { acquireCount: { r: 61, w: 61 } }, Database: { acquireCount: { w: 61, W: 2 }, acquireWaitCount: { w: 14, W: 2 }, timeAcquiringMicros: { w: 216872, W: 65909 } }, Collection: { acquireCount: { w: 60 } }, Metadata: { acquireCount: { w: 1, W: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 1538ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.331-0400 m31100| 2015-07-09T13:57:32.330-0400 I COMMAND [conn50] command db26.$cmd command: createIndexes { createIndexes: "reindex_background_10", indexes: [ { key: { text: "text" }, name: "text_text", background: true } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:57 reslen:173 locks:{ Global: { acquireCount: { r: 59, w: 59 } }, Database: { acquireCount: { w: 59, W: 2 }, acquireWaitCount: { w: 12, W: 2 }, timeAcquiringMicros: { w: 195487, W: 85546 } }, Collection: { acquireCount: { w: 58 } }, Metadata: { acquireCount: { w: 1, W: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 1498ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.333-0400 m31100| 2015-07-09T13:57:32.331-0400 I COMMAND [conn58] command db26.$cmd command: createIndexes { createIndexes: "reindex_background_12", indexes: [ { key: { text: "text" }, name: "text_text", background: true } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:58 reslen:173 locks:{ Global: { acquireCount: { r: 60, w: 60 } }, Database: { acquireCount: { w: 60, W: 2 }, acquireWaitCount: { w: 13, W: 2 }, timeAcquiringMicros: { w: 214458, W: 68675 } }, Collection: { acquireCount: { w: 59 } }, Metadata: { acquireCount: { w: 1, W: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 1513ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.333-0400 m31102| 2015-07-09T13:57:32.333-0400 I INDEX [repl index builder 77] build index on: db26.reindex_background_11 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_11", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.335-0400 m31100| 2015-07-09T13:57:32.332-0400 I COMMAND [conn56] command db26.$cmd command: createIndexes { createIndexes: "reindex_background_13", indexes: [ { key: { text: "text" }, name: "text_text", background: true } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:55 reslen:173 locks:{ Global: { acquireCount: { r: 57, w: 57 } }, Database: { acquireCount: { w: 57, W: 2 }, acquireWaitCount: { w: 11, W: 2 }, timeAcquiringMicros: { w: 194195, W: 49511 } }, Collection: { acquireCount: { w: 56 } }, Metadata: { acquireCount: { w: 1, W: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 1418ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.335-0400 m31100| 2015-07-09T13:57:32.333-0400 I COMMAND [conn55] command db26.$cmd command: createIndexes { createIndexes: "reindex_background_1", indexes: [ { key: { text: "text" }, name: "text_text", background: true } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:52 reslen:173 locks:{ Global: { acquireCount: { r: 54, w: 54 } }, Database: { acquireCount: { w: 54, W: 2 }, acquireWaitCount: { w: 8, W: 2 }, timeAcquiringMicros: { w: 97694, W: 72283 } }, Collection: { acquireCount: { w: 53 } }, Metadata: { acquireCount: { w: 1, W: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 1303ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.343-0400 m31100| 2015-07-09T13:57:32.343-0400 I INDEX [conn48] build index on: db26.reindex_background_6 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_6", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.363-0400 m31101| 2015-07-09T13:57:32.363-0400 I INDEX [repl index builder 78] build index on: db26.reindex_background_9 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_9", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.376-0400 m31100| 2015-07-09T13:57:32.376-0400 I INDEX [conn20] build index on: db26.reindex_background_7 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_7", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.378-0400 m31100| 2015-07-09T13:57:32.377-0400 I COMMAND [conn33] command db26.$cmd command: createIndexes { createIndexes: "reindex_background_8", indexes: [ { key: { text: "text" }, name: "text_text", background: true } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:53 reslen:173 locks:{ Global: { acquireCount: { r: 55, w: 55 } }, Database: { acquireCount: { w: 55, W: 2 }, acquireWaitCount: { w: 10, W: 2 }, timeAcquiringMicros: { w: 147811, W: 66908 } }, Collection: { acquireCount: { w: 54 } }, Metadata: { acquireCount: { w: 1, W: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 1360ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.399-0400 m31100| 2015-07-09T13:57:32.398-0400 I INDEX [conn58] build index on: db26.reindex_background_10 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_10", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.400-0400 m31102| 2015-07-09T13:57:32.400-0400 I INDEX [repl index builder 78] build index on: db26.reindex_background_9 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_9", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.406-0400 m31100| 2015-07-09T13:57:32.406-0400 I INDEX [conn50] build index on: db26.reindex_background_0 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_0", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.410-0400 m31100| 2015-07-09T13:57:32.410-0400 I INDEX [conn55] build index on: db26.reindex_background_13 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_13", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.417-0400 m31100| 2015-07-09T13:57:32.417-0400 I INDEX [conn72] build index on: db26.reindex_background_12 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_12", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.423-0400 m31100| 2015-07-09T13:57:32.423-0400 I INDEX [conn56] build index on: db26.reindex_background_1 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_1", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.429-0400 m31100| 2015-07-09T13:57:32.429-0400 I INDEX [conn49] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.435-0400 m31101| 2015-07-09T13:57:32.435-0400 I INDEX [repl index builder 79] build index on: db26.reindex_background_6 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_6", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.437-0400 m31100| 2015-07-09T13:57:32.437-0400 I INDEX [conn57] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.443-0400 m31100| 2015-07-09T13:57:32.443-0400 I INDEX [conn33] build index on: db26.reindex_background_8 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_8", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.451-0400 m31100| 2015-07-09T13:57:32.451-0400 I INDEX [conn54] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.462-0400 m31100| 2015-07-09T13:57:32.461-0400 I COMMAND [conn49] command db26.$cmd command: createIndexes { createIndexes: "reindex_background_14", indexes: [ { key: { text: "text" }, name: "text_text", background: true } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:51 reslen:173 locks:{ Global: { acquireCount: { r: 53, w: 53 } }, Database: { acquireCount: { w: 53, W: 2 }, acquireWaitCount: { w: 11, W: 2 }, timeAcquiringMicros: { w: 169414, W: 49041 } }, Collection: { acquireCount: { w: 52 } }, Metadata: { acquireCount: { w: 1, W: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 1306ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.463-0400 m31100| 2015-07-09T13:57:32.462-0400 I COMMAND [conn57] command db26.$cmd command: createIndexes { createIndexes: "reindex_background_3", indexes: [ { key: { text: "text" }, name: "text_text", background: true } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:51 reslen:173 locks:{ Global: { acquireCount: { r: 53, w: 53 } }, Database: { acquireCount: { w: 53, W: 2 }, acquireWaitCount: { w: 10, W: 2 }, timeAcquiringMicros: { w: 174154, W: 50158 } }, Collection: { acquireCount: { w: 52 } }, Metadata: { acquireCount: { w: 1, W: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 1301ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.484-0400 m31102| 2015-07-09T13:57:32.484-0400 I INDEX [repl index builder 79] build index on: db26.reindex_background_6 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_6", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.498-0400 m31100| 2015-07-09T13:57:32.497-0400 I COMMAND [conn54] command db26.$cmd command: createIndexes { createIndexes: "reindex_background_2", indexes: [ { key: { text: "text" }, name: "text_text", background: true } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:50 reslen:173 locks:{ Global: { acquireCount: { r: 52, w: 52 } }, Database: { acquireCount: { w: 52, W: 2 }, acquireWaitCount: { w: 10, W: 2 }, timeAcquiringMicros: { w: 163903, W: 60666 } }, Collection: { acquireCount: { w: 51 } }, Metadata: { acquireCount: { w: 1, W: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 1280ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.519-0400 m31100| 2015-07-09T13:57:32.518-0400 I INDEX [conn57] build index on: db26.reindex_background_3 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_3", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.525-0400 m31101| 2015-07-09T13:57:32.524-0400 I INDEX [repl index builder 80] build index on: db26.reindex_background_7 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_7", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.528-0400 m31100| 2015-07-09T13:57:32.528-0400 I INDEX [conn49] build index on: db26.reindex_background_14 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_14", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.550-0400 m31100| 2015-07-09T13:57:32.549-0400 I INDEX [conn54] build index on: db26.reindex_background_2 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_2", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.555-0400 m31102| 2015-07-09T13:57:32.555-0400 I INDEX [repl index builder 80] build index on: db26.reindex_background_7 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_7", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.589-0400 m31101| 2015-07-09T13:57:32.588-0400 I INDEX [repl index builder 81] build index on: db26.reindex_background_0 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_0", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.620-0400 m31102| 2015-07-09T13:57:32.620-0400 I INDEX [repl index builder 81] build index on: db26.reindex_background_0 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_0", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.648-0400 m31102| 2015-07-09T13:57:32.648-0400 I INDEX [repl index builder 75] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.654-0400 m31101| 2015-07-09T13:57:32.653-0400 I INDEX [repl index builder 82] build index on: db26.reindex_background_10 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_10", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.685-0400 m31102| 2015-07-09T13:57:32.685-0400 I INDEX [repl index builder 82] build index on: db26.reindex_background_10 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_10", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.686-0400 m31101| 2015-07-09T13:57:32.686-0400 I INDEX [repl index builder 75] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.720-0400 m31101| 2015-07-09T13:57:32.719-0400 I INDEX [repl index builder 83] build index on: db26.reindex_background_12 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_12", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.747-0400 m31102| 2015-07-09T13:57:32.746-0400 I INDEX [repl index builder 83] build index on: db26.reindex_background_12 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_12", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.786-0400 m31101| 2015-07-09T13:57:32.785-0400 I INDEX [repl index builder 84] build index on: db26.reindex_background_13 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_13", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.816-0400 m31102| 2015-07-09T13:57:32.815-0400 I INDEX [repl index builder 84] build index on: db26.reindex_background_13 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_13", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.851-0400 m31101| 2015-07-09T13:57:32.850-0400 I INDEX [repl index builder 85] build index on: db26.reindex_background_1 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_1", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.879-0400 m31102| 2015-07-09T13:57:32.879-0400 I INDEX [repl index builder 85] build index on: db26.reindex_background_1 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_1", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.919-0400 m31101| 2015-07-09T13:57:32.918-0400 I INDEX [repl index builder 86] build index on: db26.reindex_background_8 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_8", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.943-0400 m31102| 2015-07-09T13:57:32.942-0400 I INDEX [repl index builder 86] build index on: db26.reindex_background_8 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_8", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:32.984-0400 m31101| 2015-07-09T13:57:32.984-0400 I INDEX [repl index builder 87] build index on: db26.reindex_background_14 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_14", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.011-0400 m31102| 2015-07-09T13:57:33.010-0400 I INDEX [repl index builder 87] build index on: db26.reindex_background_14 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_14", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.045-0400 m31101| 2015-07-09T13:57:33.045-0400 I INDEX [repl index builder 88] build index on: db26.reindex_background_3 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_3", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.077-0400 m31102| 2015-07-09T13:57:33.077-0400 I INDEX [repl index builder 88] build index on: db26.reindex_background_3 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_3", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.111-0400 m31101| 2015-07-09T13:57:33.110-0400 I INDEX [repl index builder 89] build index on: db26.reindex_background_2 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_2", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.140-0400 m31102| 2015-07-09T13:57:33.140-0400 I INDEX [repl index builder 89] build index on: db26.reindex_background_2 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_2", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.238-0400 m31102| 2015-07-09T13:57:33.238-0400 I INDEX [repl index builder 76] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.254-0400 m31101| 2015-07-09T13:57:33.254-0400 I INDEX [repl index builder 76] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.363-0400 m31100| 2015-07-09T13:57:33.363-0400 I INDEX [conn46] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.366-0400 m31101| 2015-07-09T13:57:33.366-0400 I INDEX [repl index builder 77] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.380-0400 m31100| 2015-07-09T13:57:33.371-0400 I COMMAND [conn46] command db26.$cmd command: createIndexes { createIndexes: "reindex_background_4", indexes: [ { key: { geo: "2dsphere" }, name: "geo_2dsphere", background: true } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:52 reslen:173 locks:{ Global: { acquireCount: { r: 54, w: 54 } }, Database: { acquireCount: { w: 54, W: 2 }, acquireWaitCount: { w: 12, W: 2 }, timeAcquiringMicros: { w: 174526, W: 30184 } }, Collection: { acquireCount: { w: 53 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 1303ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.401-0400 m31100| 2015-07-09T13:57:33.401-0400 I INDEX [conn46] build index on: db26.reindex_background_4 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_4", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.401-0400 m31102| 2015-07-09T13:57:33.401-0400 I INDEX [repl index builder 77] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.415-0400 m31101| 2015-07-09T13:57:33.415-0400 I INDEX [repl index builder 90] build index on: db26.reindex_background_4 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_4", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.423-0400 m31102| 2015-07-09T13:57:33.423-0400 I INDEX [repl index builder 90] build index on: db26.reindex_background_4 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_4", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.452-0400 m31100| 2015-07-09T13:57:33.452-0400 I INDEX [conn59] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.475-0400 m31100| 2015-07-09T13:57:33.475-0400 I COMMAND [conn59] command db26.$cmd command: createIndexes { createIndexes: "reindex_background_9", indexes: [ { key: { geo: "2dsphere" }, name: "geo_2dsphere", background: true } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:48 reslen:173 locks:{ Global: { acquireCount: { r: 50, w: 50 } }, Database: { acquireCount: { w: 50, W: 2 }, acquireWaitCount: { w: 10, W: 2 }, timeAcquiringMicros: { w: 151935, W: 46086 } }, Collection: { acquireCount: { w: 49 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 1229ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.498-0400 m31100| 2015-07-09T13:57:33.498-0400 I INDEX [conn59] build index on: db26.reindex_background_9 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_9", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.521-0400 m31102| 2015-07-09T13:57:33.521-0400 I INDEX [repl index builder 91] build index on: db26.reindex_background_9 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_9", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.524-0400 m31100| 2015-07-09T13:57:33.524-0400 I INDEX [conn52] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.524-0400 m31101| 2015-07-09T13:57:33.524-0400 I INDEX [repl index builder 91] build index on: db26.reindex_background_9 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_9", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.528-0400 m31100| 2015-07-09T13:57:33.528-0400 I INDEX [conn60] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.539-0400 m31101| 2015-07-09T13:57:33.539-0400 I INDEX [repl index builder 78] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.543-0400 m31100| 2015-07-09T13:57:33.543-0400 I COMMAND [conn52] command db26.$cmd command: createIndexes { createIndexes: "reindex_background_5", indexes: [ { key: { geo: "2dsphere" }, name: "geo_2dsphere", background: true } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:53 reslen:173 locks:{ Global: { acquireCount: { r: 55, w: 55 } }, Database: { acquireCount: { w: 55, W: 2 }, acquireWaitCount: { w: 14, W: 2 }, timeAcquiringMicros: { w: 197095, W: 30249 } }, Collection: { acquireCount: { w: 54 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 1344ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.544-0400 m31100| 2015-07-09T13:57:33.544-0400 I COMMAND [conn60] command db26.$cmd command: createIndexes { createIndexes: "reindex_background_11", indexes: [ { key: { geo: "2dsphere" }, name: "geo_2dsphere", background: true } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:52 reslen:173 locks:{ Global: { acquireCount: { r: 54, w: 54 } }, Database: { acquireCount: { w: 54, W: 2 }, acquireWaitCount: { w: 13, W: 2 }, timeAcquiringMicros: { w: 168683, W: 40434 } }, Collection: { acquireCount: { w: 53 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 1325ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.579-0400 m31100| 2015-07-09T13:57:33.579-0400 I INDEX [conn60] build index on: db26.reindex_background_11 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_11", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.585-0400 m31102| 2015-07-09T13:57:33.585-0400 I INDEX [repl index builder 92] build index on: db26.reindex_background_5 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_5", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.590-0400 m31100| 2015-07-09T13:57:33.589-0400 I INDEX [conn52] build index on: db26.reindex_background_5 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_5", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.600-0400 m31102| 2015-07-09T13:57:33.599-0400 I INDEX [repl index builder 78] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.611-0400 m31101| 2015-07-09T13:57:33.610-0400 I INDEX [repl index builder 92] build index on: db26.reindex_background_5 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_5", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.616-0400 m31100| 2015-07-09T13:57:33.615-0400 I INDEX [conn58] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.637-0400 m31100| 2015-07-09T13:57:33.629-0400 I COMMAND [conn58] command db26.$cmd command: createIndexes { createIndexes: "reindex_background_10", indexes: [ { key: { geo: "2dsphere" }, name: "geo_2dsphere", background: true } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:52 reslen:173 locks:{ Global: { acquireCount: { r: 54, w: 54 } }, Database: { acquireCount: { w: 54, W: 2 }, acquireWaitCount: { w: 12, W: 2 }, timeAcquiringMicros: { w: 128232, W: 52797 } }, Collection: { acquireCount: { w: 53 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 1291ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.638-0400 m31101| 2015-07-09T13:57:33.638-0400 I INDEX [repl index builder 79] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.661-0400 m31102| 2015-07-09T13:57:33.660-0400 I INDEX [repl index builder 93] build index on: db26.reindex_background_11 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_11", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.664-0400 m31100| 2015-07-09T13:57:33.664-0400 I INDEX [conn58] build index on: db26.reindex_background_10 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_10", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.667-0400 m31100| 2015-07-09T13:57:33.667-0400 I INDEX [conn72] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.671-0400 m31100| 2015-07-09T13:57:33.671-0400 I INDEX [conn50] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.673-0400 m31100| 2015-07-09T13:57:33.672-0400 I INDEX [conn48] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.677-0400 m31100| 2015-07-09T13:57:33.677-0400 I INDEX [conn20] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.678-0400 m31100| 2015-07-09T13:57:33.678-0400 I INDEX [conn55] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.683-0400 m31100| 2015-07-09T13:57:33.682-0400 I COMMAND [conn72] command db26.$cmd command: createIndexes { createIndexes: "reindex_background_12", indexes: [ { key: { geo: "2dsphere" }, name: "geo_2dsphere", background: true } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:54 reslen:173 locks:{ Global: { acquireCount: { r: 56, w: 56 } }, Database: { acquireCount: { w: 56, W: 2 }, acquireWaitCount: { w: 14, W: 2 }, timeAcquiringMicros: { w: 133346, W: 85946 } }, Collection: { acquireCount: { w: 55 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 1342ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.686-0400 m31100| 2015-07-09T13:57:33.685-0400 I COMMAND [conn50] command db26.$cmd command: createIndexes { createIndexes: "reindex_background_0", indexes: [ { key: { geo: "2dsphere" }, name: "geo_2dsphere", background: true } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:54 reslen:173 locks:{ Global: { acquireCount: { r: 56, w: 56 } }, Database: { acquireCount: { w: 56, W: 2 }, acquireWaitCount: { w: 14, W: 2 }, timeAcquiringMicros: { w: 142364, W: 70783 } }, Collection: { acquireCount: { w: 55 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 1345ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.686-0400 m31100| 2015-07-09T13:57:33.686-0400 I COMMAND [conn48] command db26.$cmd command: createIndexes { createIndexes: "reindex_background_6", indexes: [ { key: { geo: "2dsphere" }, name: "geo_2dsphere", background: true } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:54 reslen:173 locks:{ Global: { acquireCount: { r: 56, w: 56 } }, Database: { acquireCount: { w: 56, W: 2 }, acquireWaitCount: { w: 14, W: 2 }, timeAcquiringMicros: { w: 184798, W: 45830 } }, Collection: { acquireCount: { w: 55 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 1385ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.695-0400 m31100| 2015-07-09T13:57:33.688-0400 I COMMAND [conn20] command db26.$cmd command: createIndexes { createIndexes: "reindex_background_7", indexes: [ { key: { geo: "2dsphere" }, name: "geo_2dsphere", background: true } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:54 reslen:173 locks:{ Global: { acquireCount: { r: 56, w: 56 } }, Database: { acquireCount: { w: 56, W: 2 }, acquireWaitCount: { w: 14, W: 2 }, timeAcquiringMicros: { w: 164276, W: 79580 } }, Collection: { acquireCount: { w: 55 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 1385ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.696-0400 m31100| 2015-07-09T13:57:33.688-0400 I COMMAND [conn55] command db26.$cmd command: createIndexes { createIndexes: "reindex_background_13", indexes: [ { key: { geo: "2dsphere" }, name: "geo_2dsphere", background: true } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:54 reslen:173 locks:{ Global: { acquireCount: { r: 56, w: 56 } }, Database: { acquireCount: { w: 56, W: 2 }, acquireWaitCount: { w: 14, W: 2 }, timeAcquiringMicros: { w: 134245, W: 76864 } }, Collection: { acquireCount: { w: 55 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 1348ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.696-0400 m31100| 2015-07-09T13:57:33.691-0400 I INDEX [conn56] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.696-0400 m31100| 2015-07-09T13:57:33.693-0400 I INDEX [conn33] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.721-0400 m31101| 2015-07-09T13:57:33.721-0400 I INDEX [repl index builder 93] build index on: db26.reindex_background_11 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_11", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.725-0400 m31100| 2015-07-09T13:57:33.725-0400 I INDEX [conn72] build index on: db26.reindex_background_12 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_12", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.730-0400 m31100| 2015-07-09T13:57:33.729-0400 I INDEX [conn50] build index on: db26.reindex_background_0 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_0", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.734-0400 m31102| 2015-07-09T13:57:33.734-0400 I INDEX [repl index builder 94] build index on: db26.reindex_background_10 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_10", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.737-0400 m31100| 2015-07-09T13:57:33.737-0400 I INDEX [conn48] build index on: db26.reindex_background_6 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_6", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.744-0400 m31100| 2015-07-09T13:57:33.744-0400 I INDEX [conn20] build index on: db26.reindex_background_7 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_7", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.751-0400 m31100| 2015-07-09T13:57:33.750-0400 I COMMAND [conn56] command db26.$cmd command: createIndexes { createIndexes: "reindex_background_1", indexes: [ { key: { geo: "2dsphere" }, name: "geo_2dsphere", background: true } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:54 reslen:173 locks:{ Global: { acquireCount: { r: 56, w: 56 } }, Database: { acquireCount: { w: 56, W: 2 }, acquireWaitCount: { w: 14, W: 2 }, timeAcquiringMicros: { w: 134361, W: 129450 } }, Collection: { acquireCount: { w: 55 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 1410ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.752-0400 m31100| 2015-07-09T13:57:33.752-0400 I COMMAND [conn33] command db26.$cmd command: createIndexes { createIndexes: "reindex_background_8", indexes: [ { key: { geo: "2dsphere" }, name: "geo_2dsphere", background: true } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:53 reslen:173 locks:{ Global: { acquireCount: { r: 55, w: 55 } }, Database: { acquireCount: { w: 55, W: 2 }, acquireWaitCount: { w: 13, W: 2 }, timeAcquiringMicros: { w: 146131, W: 114197 } }, Collection: { acquireCount: { w: 54 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 1369ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.758-0400 m31100| 2015-07-09T13:57:33.758-0400 I INDEX [conn55] build index on: db26.reindex_background_13 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_13", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.787-0400 m31100| 2015-07-09T13:57:33.787-0400 I INDEX [conn56] build index on: db26.reindex_background_1 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_1", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.794-0400 m31100| 2015-07-09T13:57:33.793-0400 I INDEX [conn33] build index on: db26.reindex_background_8 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_8", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.797-0400 m31101| 2015-07-09T13:57:33.796-0400 I INDEX [repl index builder 94] build index on: db26.reindex_background_10 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_10", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.798-0400 m31102| 2015-07-09T13:57:33.798-0400 I INDEX [repl index builder 79] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.819-0400 m31102| 2015-07-09T13:57:33.818-0400 I INDEX [repl index builder 95] build index on: db26.reindex_background_12 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_12", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.830-0400 m31100| 2015-07-09T13:57:33.829-0400 I INDEX [conn49] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.831-0400 m31101| 2015-07-09T13:57:33.831-0400 I INDEX [repl index builder 80] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.835-0400 m31100| 2015-07-09T13:57:33.834-0400 I COMMAND [conn49] command db26.$cmd command: createIndexes { createIndexes: "reindex_background_14", indexes: [ { key: { geo: "2dsphere" }, name: "geo_2dsphere", background: true } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:54 reslen:173 locks:{ Global: { acquireCount: { r: 56, w: 56 } }, Database: { acquireCount: { w: 56, W: 2 }, acquireWaitCount: { w: 13, W: 2 }, timeAcquiringMicros: { w: 159556, W: 52015 } }, Collection: { acquireCount: { w: 55 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 1363ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.861-0400 m31100| 2015-07-09T13:57:33.861-0400 I INDEX [conn49] build index on: db26.reindex_background_14 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_14", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.877-0400 m31101| 2015-07-09T13:57:33.877-0400 I INDEX [repl index builder 95] build index on: db26.reindex_background_12 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_12", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.881-0400 m31100| 2015-07-09T13:57:33.881-0400 I INDEX [conn54] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.883-0400 m31102| 2015-07-09T13:57:33.883-0400 I INDEX [repl index builder 96] build index on: db26.reindex_background_0 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_0", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.903-0400 m31100| 2015-07-09T13:57:33.903-0400 I COMMAND [conn54] command db26.$cmd command: createIndexes { createIndexes: "reindex_background_2", indexes: [ { key: { geo: "2dsphere" }, name: "geo_2dsphere", background: true } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:56 reslen:173 locks:{ Global: { acquireCount: { r: 58, w: 58 } }, Database: { acquireCount: { w: 58, W: 2 }, acquireWaitCount: { w: 13, W: 2 }, timeAcquiringMicros: { w: 163788, W: 66035 } }, Collection: { acquireCount: { w: 57 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 1404ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.906-0400 m31100| 2015-07-09T13:57:33.906-0400 I INDEX [conn57] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.938-0400 m31100| 2015-07-09T13:57:33.937-0400 I INDEX [conn54] build index on: db26.reindex_background_2 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_2", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.947-0400 m31100| 2015-07-09T13:57:33.942-0400 I COMMAND [conn57] command db26.$cmd command: createIndexes { createIndexes: "reindex_background_3", indexes: [ { key: { geo: "2dsphere" }, name: "geo_2dsphere", background: true } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:58 reslen:173 locks:{ Global: { acquireCount: { r: 60, w: 60 } }, Database: { acquireCount: { w: 60, W: 2 }, acquireWaitCount: { w: 16, W: 2 }, timeAcquiringMicros: { w: 181381, W: 75236 } }, Collection: { acquireCount: { w: 59 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 1471ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.960-0400 m31102| 2015-07-09T13:57:33.959-0400 I INDEX [repl index builder 97] build index on: db26.reindex_background_6 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_6", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.966-0400 m31101| 2015-07-09T13:57:33.965-0400 I INDEX [repl index builder 96] build index on: db26.reindex_background_0 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_0", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.969-0400 m31100| 2015-07-09T13:57:33.969-0400 I INDEX [conn57] build index on: db26.reindex_background_3 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_3", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:33.986-0400 m31102| 2015-07-09T13:57:33.986-0400 I INDEX [repl index builder 80] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.008-0400 m31101| 2015-07-09T13:57:34.008-0400 I INDEX [repl index builder 81] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.022-0400 m31102| 2015-07-09T13:57:34.021-0400 I INDEX [repl index builder 98] build index on: db26.reindex_background_7 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_7", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.029-0400 m31101| 2015-07-09T13:57:34.029-0400 I INDEX [repl index builder 97] build index on: db26.reindex_background_6 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_6", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.041-0400 m31102| 2015-07-09T13:57:34.041-0400 I INDEX [repl index builder 81] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.088-0400 m31102| 2015-07-09T13:57:34.088-0400 I INDEX [repl index builder 99] build index on: db26.reindex_background_13 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_13", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.099-0400 m31101| 2015-07-09T13:57:34.099-0400 I INDEX [repl index builder 98] build index on: db26.reindex_background_7 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_7", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.113-0400 m31101| 2015-07-09T13:57:34.112-0400 I INDEX [repl index builder 82] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.154-0400 m31102| 2015-07-09T13:57:34.154-0400 I INDEX [repl index builder 100] build index on: db26.reindex_background_1 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_1", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.171-0400 m31101| 2015-07-09T13:57:34.171-0400 I INDEX [repl index builder 99] build index on: db26.reindex_background_13 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_13", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.177-0400 m31102| 2015-07-09T13:57:34.177-0400 I INDEX [repl index builder 82] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.201-0400 m31101| 2015-07-09T13:57:34.200-0400 I INDEX [repl index builder 83] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.217-0400 m31102| 2015-07-09T13:57:34.216-0400 I INDEX [repl index builder 101] build index on: db26.reindex_background_8 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_8", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.225-0400 m31102| 2015-07-09T13:57:34.225-0400 I INDEX [repl index builder 83] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.239-0400 m31101| 2015-07-09T13:57:34.239-0400 I INDEX [repl index builder 100] build index on: db26.reindex_background_1 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_1", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.281-0400 m31102| 2015-07-09T13:57:34.281-0400 I INDEX [repl index builder 102] build index on: db26.reindex_background_14 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_14", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.302-0400 m31101| 2015-07-09T13:57:34.301-0400 I INDEX [repl index builder 101] build index on: db26.reindex_background_8 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_8", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.320-0400 m31101| 2015-07-09T13:57:34.319-0400 I INDEX [repl index builder 84] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.324-0400 m31102| 2015-07-09T13:57:34.324-0400 I INDEX [repl index builder 84] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.348-0400 m31102| 2015-07-09T13:57:34.348-0400 I INDEX [repl index builder 103] build index on: db26.reindex_background_2 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_2", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.369-0400 m31101| 2015-07-09T13:57:34.369-0400 I INDEX [repl index builder 102] build index on: db26.reindex_background_14 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_14", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.404-0400 m31102| 2015-07-09T13:57:34.403-0400 I INDEX [repl index builder 85] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.410-0400 m31102| 2015-07-09T13:57:34.410-0400 I INDEX [repl index builder 104] build index on: db26.reindex_background_3 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_3", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.423-0400 m31101| 2015-07-09T13:57:34.423-0400 I INDEX [repl index builder 85] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.434-0400 m31101| 2015-07-09T13:57:34.434-0400 I INDEX [repl index builder 103] build index on: db26.reindex_background_2 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_2", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.451-0400 m31101| 2015-07-09T13:57:34.451-0400 I INDEX [repl index builder 86] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.475-0400 m30999| 2015-07-09T13:57:34.475-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T13:57:34.474-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30999:1436464534:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.485-0400 m31102| 2015-07-09T13:57:34.485-0400 I INDEX [repl index builder 86] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.492-0400 m31100| 2015-07-09T13:57:34.492-0400 I INDEX [conn46] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.495-0400 m31101| 2015-07-09T13:57:34.495-0400 I INDEX [repl index builder 104] build index on: db26.reindex_background_3 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_3", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.513-0400 m31100| 2015-07-09T13:57:34.505-0400 I COMMAND [conn46] command db26.$cmd command: createIndexes { createIndexes: "reindex_background_4", indexes: [ { key: { integer: 1.0 }, name: "integer_1", background: true } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:44 reslen:173 locks:{ Global: { acquireCount: { r: 46, w: 46 } }, Database: { acquireCount: { w: 46, W: 2 }, acquireWaitCount: { w: 14, W: 2 }, timeAcquiringMicros: { w: 168978, W: 24841 } }, Collection: { acquireCount: { w: 45 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 1125ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.513-0400 m31100| 2015-07-09T13:57:34.507-0400 I COMMAND [conn32] CMD: reIndex db26.reindex_background_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.538-0400 m31101| 2015-07-09T13:57:34.537-0400 I INDEX [repl index builder 87] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.542-0400 m31100| 2015-07-09T13:57:34.542-0400 I INDEX [conn32] build index on: db26.reindex_background_4 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.reindex_background_4" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.542-0400 m31100| 2015-07-09T13:57:34.542-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.546-0400 m31100| 2015-07-09T13:57:34.546-0400 I INDEX [conn32] build index on: db26.reindex_background_4 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_4", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.547-0400 m31100| 2015-07-09T13:57:34.546-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.550-0400 m31100| 2015-07-09T13:57:34.550-0400 I INDEX [conn32] build index on: db26.reindex_background_4 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_4", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.550-0400 m31100| 2015-07-09T13:57:34.550-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.551-0400 m31102| 2015-07-09T13:57:34.550-0400 I INDEX [repl index builder 87] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.553-0400 m31100| 2015-07-09T13:57:34.553-0400 I INDEX [conn32] build index on: db26.reindex_background_4 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_4", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.553-0400 m31100| 2015-07-09T13:57:34.553-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.567-0400 m31101| 2015-07-09T13:57:34.567-0400 I INDEX [repl index builder 105] build index on: db26.reindex_background_4 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_4", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.575-0400 m31102| 2015-07-09T13:57:34.574-0400 I INDEX [repl index builder 105] build index on: db26.reindex_background_4 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_4", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.588-0400 m31101| 2015-07-09T13:57:34.588-0400 I INDEX [repl index builder 89] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.600-0400 m31102| 2015-07-09T13:57:34.600-0400 I INDEX [repl index builder 88] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.604-0400 m31101| 2015-07-09T13:57:34.604-0400 I INDEX [repl index builder 88] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.635-0400 m31102| 2015-07-09T13:57:34.634-0400 I INDEX [repl index builder 89] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.664-0400 m31100| 2015-07-09T13:57:34.663-0400 I INDEX [conn32] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.665-0400 m31100| 2015-07-09T13:57:34.664-0400 I COMMAND [conn32] command db26.reindex_background_4 command: reIndex { reIndex: "reindex_background_4" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:671 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 22639 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 157ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.665-0400 m31100| 2015-07-09T13:57:34.665-0400 I COMMAND [conn32] CMD: reIndex db26.reindex_background_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.694-0400 m31100| 2015-07-09T13:57:34.694-0400 I INDEX [conn32] build index on: db26.reindex_background_4 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.reindex_background_4" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.695-0400 m31100| 2015-07-09T13:57:34.694-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.699-0400 m31100| 2015-07-09T13:57:34.699-0400 I INDEX [conn32] build index on: db26.reindex_background_4 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_4", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.699-0400 m31100| 2015-07-09T13:57:34.699-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.704-0400 m31100| 2015-07-09T13:57:34.703-0400 I INDEX [conn32] build index on: db26.reindex_background_4 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_4", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.704-0400 m31100| 2015-07-09T13:57:34.703-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.707-0400 m31100| 2015-07-09T13:57:34.707-0400 I INDEX [conn32] build index on: db26.reindex_background_4 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_4", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.707-0400 m31100| 2015-07-09T13:57:34.707-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.709-0400 m31101| 2015-07-09T13:57:34.709-0400 I INDEX [repl index builder 90] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.710-0400 m31102| 2015-07-09T13:57:34.710-0400 I INDEX [repl index builder 90] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.797-0400 m31102| 2015-07-09T13:57:34.797-0400 I INDEX [repl index builder 91] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.818-0400 m31100| 2015-07-09T13:57:34.818-0400 I INDEX [conn32] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.819-0400 m31100| 2015-07-09T13:57:34.818-0400 I COMMAND [conn32] command db26.reindex_background_4 command: reIndex { reIndex: "reindex_background_4" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:671 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 21529 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 153ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.823-0400 m31100| 2015-07-09T13:57:34.823-0400 I INDEX [conn59] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.831-0400 m31101| 2015-07-09T13:57:34.831-0400 I INDEX [repl index builder 91] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.839-0400 m31102| 2015-07-09T13:57:34.839-0400 I INDEX [repl index builder 92] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.849-0400 m31100| 2015-07-09T13:57:34.840-0400 I COMMAND [conn59] command db26.$cmd command: createIndexes { createIndexes: "reindex_background_9", indexes: [ { key: { integer: 1.0 }, name: "integer_1", background: true } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:43 reslen:173 locks:{ Global: { acquireCount: { r: 45, w: 45 } }, Database: { acquireCount: { w: 45, W: 2 }, acquireWaitCount: { w: 15, W: 2 }, timeAcquiringMicros: { w: 451860, W: 34843 } }, Collection: { acquireCount: { w: 44 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 1364ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.858-0400 m31101| 2015-07-09T13:57:34.858-0400 I INDEX [repl index builder 92] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.901-0400 m31102| 2015-07-09T13:57:34.901-0400 I INDEX [repl index builder 106] build index on: db26.reindex_background_9 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_9", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.918-0400 m31100| 2015-07-09T13:57:34.917-0400 I INDEX [conn52] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.925-0400 m31101| 2015-07-09T13:57:34.924-0400 I INDEX [repl index builder 106] build index on: db26.reindex_background_9 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_9", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.926-0400 m31102| 2015-07-09T13:57:34.926-0400 I INDEX [repl index builder 94] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.939-0400 m31100| 2015-07-09T13:57:34.938-0400 I INDEX [conn20] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.939-0400 m31100| 2015-07-09T13:57:34.939-0400 I INDEX [conn58] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.942-0400 m31102| 2015-07-09T13:57:34.942-0400 I INDEX [repl index builder 93] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.945-0400 m31101| 2015-07-09T13:57:34.945-0400 I INDEX [repl index builder 93] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.945-0400 m31101| 2015-07-09T13:57:34.945-0400 I INDEX [repl index builder 94] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.955-0400 m31100| 2015-07-09T13:57:34.948-0400 I INDEX [conn50] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.955-0400 m31100| 2015-07-09T13:57:34.949-0400 I INDEX [conn60] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.956-0400 m31100| 2015-07-09T13:57:34.949-0400 I COMMAND [conn52] command db26.$cmd command: createIndexes { createIndexes: "reindex_background_5", indexes: [ { key: { integer: 1.0 }, name: "integer_1", background: true } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:44 reslen:173 locks:{ Global: { acquireCount: { r: 46, w: 46 } }, Database: { acquireCount: { w: 46, W: 2 }, acquireWaitCount: { w: 14, W: 2 }, timeAcquiringMicros: { w: 406034, W: 57035 } }, Collection: { acquireCount: { w: 45 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 1395ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.969-0400 m31100| 2015-07-09T13:57:34.968-0400 I COMMAND [conn20] command db26.$cmd command: createIndexes { createIndexes: "reindex_background_7", indexes: [ { key: { integer: 1.0 }, name: "integer_1", background: true } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:40 reslen:173 locks:{ Global: { acquireCount: { r: 42, w: 42 } }, Database: { acquireCount: { w: 42, W: 2 }, acquireWaitCount: { w: 12, W: 2 }, timeAcquiringMicros: { w: 348512, W: 77745 } }, Collection: { acquireCount: { w: 41 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 1279ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.972-0400 m31101| 2015-07-09T13:57:34.971-0400 I INDEX [repl index builder 95] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.972-0400 m31100| 2015-07-09T13:57:34.971-0400 I COMMAND [conn58] command db26.$cmd command: createIndexes { createIndexes: "reindex_background_10", indexes: [ { key: { integer: 1.0 }, name: "integer_1", background: true } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:42 reslen:173 locks:{ Global: { acquireCount: { r: 44, w: 44 } }, Database: { acquireCount: { w: 44, W: 2 }, acquireWaitCount: { w: 13, W: 2 }, timeAcquiringMicros: { w: 393339, W: 49281 } }, Collection: { acquireCount: { w: 43 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 1334ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.973-0400 m31100| 2015-07-09T13:57:34.972-0400 I COMMAND [conn50] command db26.$cmd command: createIndexes { createIndexes: "reindex_background_0", indexes: [ { key: { integer: 1.0 }, name: "integer_1", background: true } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:40 reslen:173 locks:{ Global: { acquireCount: { r: 42, w: 42 } }, Database: { acquireCount: { w: 42, W: 2 }, acquireWaitCount: { w: 12, W: 2 }, timeAcquiringMicros: { w: 377610, W: 62225 } }, Collection: { acquireCount: { w: 41 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 1286ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.977-0400 m31100| 2015-07-09T13:57:34.974-0400 I COMMAND [conn60] command db26.$cmd command: createIndexes { createIndexes: "reindex_background_11", indexes: [ { key: { integer: 1.0 }, name: "integer_1", background: true } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:45 reslen:173 locks:{ Global: { acquireCount: { r: 47, w: 47 } }, Database: { acquireCount: { w: 47, W: 2 }, acquireWaitCount: { w: 16, W: 2 }, timeAcquiringMicros: { w: 450277, W: 46114 } }, Collection: { acquireCount: { w: 46 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 1423ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:34.980-0400 m31100| 2015-07-09T13:57:34.979-0400 I INDEX [conn55] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.000-0400 m31100| 2015-07-09T13:57:35.000-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63199 #134 (78 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.003-0400 m31102| 2015-07-09T13:57:35.002-0400 I INDEX [repl index builder 107] build index on: db26.reindex_background_5 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_5", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.004-0400 m31102| 2015-07-09T13:57:35.003-0400 I INDEX [repl index builder 95] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.004-0400 m31100| 2015-07-09T13:57:35.002-0400 I INDEX [conn72] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.004-0400 m31100| 2015-07-09T13:57:35.004-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63200 #135 (79 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.008-0400 m31101| 2015-07-09T13:57:35.007-0400 I INDEX [repl index builder 107] build index on: db26.reindex_background_5 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_5", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.009-0400 m31100| 2015-07-09T13:57:35.009-0400 I INDEX [conn49] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.010-0400 m31100| 2015-07-09T13:57:35.010-0400 I INDEX [conn33] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.024-0400 m31100| 2015-07-09T13:57:35.024-0400 I INDEX [conn48] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.063-0400 m31100| 2015-07-09T13:57:35.061-0400 I COMMAND [conn55] command db26.$cmd command: createIndexes { createIndexes: "reindex_background_13", indexes: [ { key: { integer: 1.0 }, name: "integer_1", background: true } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:40 reslen:173 locks:{ Global: { acquireCount: { r: 42, w: 42 } }, Database: { acquireCount: { w: 42, W: 2 }, acquireWaitCount: { w: 12, W: 2 }, timeAcquiringMicros: { w: 402570, W: 139478 } }, Collection: { acquireCount: { w: 41 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 1367ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.065-0400 m31100| 2015-07-09T13:57:35.065-0400 I COMMAND [conn72] command db26.$cmd command: createIndexes { createIndexes: "reindex_background_12", indexes: [ { key: { integer: 1.0 }, name: "integer_1", background: true } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:42 reslen:173 locks:{ Global: { acquireCount: { r: 44, w: 44 } }, Database: { acquireCount: { w: 44, W: 2 }, acquireWaitCount: { w: 14, W: 2 }, timeAcquiringMicros: { w: 386513, W: 92120 } }, Collection: { acquireCount: { w: 43 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 1381ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.067-0400 m31100| 2015-07-09T13:57:35.067-0400 I COMMAND [conn49] command db26.$cmd command: createIndexes { createIndexes: "reindex_background_14", indexes: [ { key: { integer: 1.0 }, name: "integer_1", background: true } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:39 reslen:173 locks:{ Global: { acquireCount: { r: 41, w: 41 } }, Database: { acquireCount: { w: 41, W: 2 }, acquireWaitCount: { w: 11, W: 2 }, timeAcquiringMicros: { w: 322441, W: 76845 } }, Collection: { acquireCount: { w: 40 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 1230ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.068-0400 m31100| 2015-07-09T13:57:35.067-0400 I COMMAND [conn33] command db26.$cmd command: createIndexes { createIndexes: "reindex_background_8", indexes: [ { key: { integer: 1.0 }, name: "integer_1", background: true } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:42 reslen:173 locks:{ Global: { acquireCount: { r: 44, w: 44 } }, Database: { acquireCount: { w: 44, W: 2 }, acquireWaitCount: { w: 13, W: 2 }, timeAcquiringMicros: { w: 351904, W: 91412 } }, Collection: { acquireCount: { w: 43 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 1314ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.068-0400 m31100| 2015-07-09T13:57:35.068-0400 I COMMAND [conn32] CMD: reIndex db26.reindex_background_14 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.069-0400 m31100| 2015-07-09T13:57:35.068-0400 I COMMAND [conn48] command db26.$cmd command: createIndexes { createIndexes: "reindex_background_6", indexes: [ { key: { integer: 1.0 }, name: "integer_1", background: true } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:43 reslen:173 locks:{ Global: { acquireCount: { r: 45, w: 45 } }, Database: { acquireCount: { w: 45, W: 2 }, acquireWaitCount: { w: 15, W: 2 }, timeAcquiringMicros: { w: 402513, W: 78199 } }, Collection: { acquireCount: { w: 44 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 1380ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.069-0400 m31100| 2015-07-09T13:57:35.069-0400 I COMMAND [conn36] CMD: reIndex db26.reindex_background_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.070-0400 m31100| 2015-07-09T13:57:35.070-0400 I COMMAND [conn39] CMD: reIndex db26.reindex_background_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.071-0400 m31100| 2015-07-09T13:57:35.071-0400 I INDEX [conn54] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.078-0400 m31100| 2015-07-09T13:57:35.077-0400 I INDEX [conn56] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.081-0400 m31100| 2015-07-09T13:57:35.081-0400 I INDEX [conn57] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.082-0400 m31102| 2015-07-09T13:57:35.082-0400 I INDEX [repl index builder 96] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.092-0400 m31100| 2015-07-09T13:57:35.091-0400 I INDEX [conn32] build index on: db26.reindex_background_14 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.reindex_background_14" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.092-0400 m31100| 2015-07-09T13:57:35.091-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.099-0400 m31100| 2015-07-09T13:57:35.098-0400 I INDEX [conn32] build index on: db26.reindex_background_14 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_14", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.099-0400 m31100| 2015-07-09T13:57:35.098-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.102-0400 m31101| 2015-07-09T13:57:35.101-0400 I INDEX [repl index builder 108] build index on: db26.reindex_background_7 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_7", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.105-0400 m31100| 2015-07-09T13:57:35.103-0400 I INDEX [conn32] build index on: db26.reindex_background_14 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_14", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.105-0400 m31100| 2015-07-09T13:57:35.103-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.107-0400 m31100| 2015-07-09T13:57:35.107-0400 I INDEX [conn32] build index on: db26.reindex_background_14 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_14", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.107-0400 m31100| 2015-07-09T13:57:35.107-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.108-0400 m31102| 2015-07-09T13:57:35.107-0400 I INDEX [repl index builder 108] build index on: db26.reindex_background_7 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_7", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.133-0400 m31102| 2015-07-09T13:57:35.133-0400 I INDEX [repl index builder 97] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.135-0400 m31101| 2015-07-09T13:57:35.135-0400 I INDEX [repl index builder 96] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.138-0400 m31102| 2015-07-09T13:57:35.137-0400 I INDEX [repl index builder 98] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.156-0400 m31101| 2015-07-09T13:57:35.155-0400 I INDEX [repl index builder 98] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.178-0400 m31101| 2015-07-09T13:57:35.177-0400 I INDEX [repl index builder 109] build index on: db26.reindex_background_10 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_10", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.179-0400 m31102| 2015-07-09T13:57:35.179-0400 I INDEX [repl index builder 109] build index on: db26.reindex_background_10 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_10", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.182-0400 m31101| 2015-07-09T13:57:35.182-0400 I INDEX [repl index builder 97] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.217-0400 m31102| 2015-07-09T13:57:35.216-0400 I INDEX [repl index builder 99] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.220-0400 m31100| 2015-07-09T13:57:35.220-0400 I INDEX [conn32] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.222-0400 m31100| 2015-07-09T13:57:35.221-0400 I COMMAND [conn32] command db26.reindex_background_14 command: reIndex { reIndex: "reindex_background_14" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:675 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 15641 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 153ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.222-0400 m31100| 2015-07-09T13:57:35.222-0400 I COMMAND [conn32] CMD: reIndex db26.reindex_background_14 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.228-0400 m31100| 2015-07-09T13:57:35.227-0400 I INDEX [conn36] build index on: db26.reindex_background_8 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.reindex_background_8" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.228-0400 m31100| 2015-07-09T13:57:35.227-0400 I INDEX [conn36] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.232-0400 m31101| 2015-07-09T13:57:35.231-0400 I INDEX [repl index builder 100] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.233-0400 m31100| 2015-07-09T13:57:35.233-0400 I INDEX [conn36] build index on: db26.reindex_background_8 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_8", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.233-0400 m31100| 2015-07-09T13:57:35.233-0400 I INDEX [conn36] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.234-0400 m31102| 2015-07-09T13:57:35.233-0400 I INDEX [repl index builder 100] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.235-0400 m31101| 2015-07-09T13:57:35.235-0400 I INDEX [repl index builder 99] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.238-0400 m31100| 2015-07-09T13:57:35.237-0400 I INDEX [conn36] build index on: db26.reindex_background_8 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_8", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.238-0400 m31100| 2015-07-09T13:57:35.238-0400 I INDEX [conn36] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.239-0400 m31102| 2015-07-09T13:57:35.239-0400 I INDEX [repl index builder 101] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.243-0400 m31100| 2015-07-09T13:57:35.242-0400 I INDEX [conn36] build index on: db26.reindex_background_8 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_8", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.243-0400 m31100| 2015-07-09T13:57:35.242-0400 I INDEX [conn36] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.246-0400 m31101| 2015-07-09T13:57:35.246-0400 I INDEX [repl index builder 110] build index on: db26.reindex_background_0 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_0", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.251-0400 m31102| 2015-07-09T13:57:35.251-0400 I INDEX [repl index builder 110] build index on: db26.reindex_background_0 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_0", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.272-0400 m31101| 2015-07-09T13:57:35.272-0400 I INDEX [repl index builder 101] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.315-0400 m31102| 2015-07-09T13:57:35.314-0400 I INDEX [repl index builder 111] build index on: db26.reindex_background_11 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_11", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.315-0400 m31101| 2015-07-09T13:57:35.314-0400 I INDEX [repl index builder 111] build index on: db26.reindex_background_11 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_11", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.325-0400 m31102| 2015-07-09T13:57:35.325-0400 I INDEX [repl index builder 103] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.326-0400 m31102| 2015-07-09T13:57:35.326-0400 I INDEX [repl index builder 102] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.340-0400 m31102| 2015-07-09T13:57:35.339-0400 I INDEX [repl index builder 104] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.355-0400 m31100| 2015-07-09T13:57:35.355-0400 I INDEX [conn36] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.357-0400 m31100| 2015-07-09T13:57:35.356-0400 I COMMAND [conn36] command db26.reindex_background_8 command: reIndex { reIndex: "reindex_background_8" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:671 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 151879 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 287ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.363-0400 m31100| 2015-07-09T13:57:35.363-0400 I INDEX [conn39] build index on: db26.reindex_background_6 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.reindex_background_6" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.364-0400 m31100| 2015-07-09T13:57:35.363-0400 I INDEX [conn39] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.366-0400 m31101| 2015-07-09T13:57:35.366-0400 I INDEX [repl index builder 103] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.368-0400 m31100| 2015-07-09T13:57:35.368-0400 I INDEX [conn39] build index on: db26.reindex_background_6 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_6", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.369-0400 m31100| 2015-07-09T13:57:35.368-0400 I INDEX [conn39] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.373-0400 m31101| 2015-07-09T13:57:35.373-0400 I INDEX [repl index builder 102] build index done. scanned 1000 total records. 1 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.374-0400 m31100| 2015-07-09T13:57:35.374-0400 I INDEX [conn39] build index on: db26.reindex_background_6 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_6", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.374-0400 m31100| 2015-07-09T13:57:35.374-0400 I INDEX [conn39] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.380-0400 m31100| 2015-07-09T13:57:35.379-0400 I INDEX [conn39] build index on: db26.reindex_background_6 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_6", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.380-0400 m31100| 2015-07-09T13:57:35.379-0400 I INDEX [conn39] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.381-0400 m31102| 2015-07-09T13:57:35.380-0400 I INDEX [repl index builder 112] build index on: db26.reindex_background_13 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_13", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.393-0400 m31101| 2015-07-09T13:57:35.393-0400 I INDEX [repl index builder 112] build index on: db26.reindex_background_13 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_13", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.434-0400 m31101| 2015-07-09T13:57:35.433-0400 I INDEX [repl index builder 105] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.447-0400 m31102| 2015-07-09T13:57:35.446-0400 I INDEX [repl index builder 113] build index on: db26.reindex_background_12 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_12", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.453-0400 m31102| 2015-07-09T13:57:35.453-0400 I INDEX [repl index builder 105] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.455-0400 m31101| 2015-07-09T13:57:35.455-0400 I INDEX [repl index builder 104] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.462-0400 m31101| 2015-07-09T13:57:35.462-0400 I INDEX [repl index builder 113] build index on: db26.reindex_background_12 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_12", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.485-0400 m31100| 2015-07-09T13:57:35.485-0400 I INDEX [conn39] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.489-0400 m31100| 2015-07-09T13:57:35.486-0400 I COMMAND [conn39] command db26.reindex_background_6 command: reIndex { reIndex: "reindex_background_6" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:671 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 286266 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 416ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.490-0400 m31100| 2015-07-09T13:57:35.487-0400 I COMMAND [conn54] command db26.$cmd command: createIndexes { createIndexes: "reindex_background_2", indexes: [ { key: { integer: 1.0 }, name: "integer_1", background: true } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:38 reslen:173 locks:{ Global: { acquireCount: { r: 40, w: 40 } }, Database: { acquireCount: { w: 40, W: 2 }, acquireWaitCount: { w: 12, W: 2 }, timeAcquiringMicros: { w: 325952, W: 440986 } }, Collection: { acquireCount: { w: 39 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 1581ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.490-0400 m31100| 2015-07-09T13:57:35.488-0400 I COMMAND [conn56] command db26.$cmd command: createIndexes { createIndexes: "reindex_background_1", indexes: [ { key: { integer: 1.0 }, name: "integer_1", background: true } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:44 reslen:173 locks:{ Global: { acquireCount: { r: 46, w: 46 } }, Database: { acquireCount: { w: 46, W: 2 }, acquireWaitCount: { w: 16, W: 2 }, timeAcquiringMicros: { w: 345307, W: 439559 } }, Collection: { acquireCount: { w: 45 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 1736ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.491-0400 m31100| 2015-07-09T13:57:35.490-0400 I COMMAND [conn38] CMD: reIndex db26.reindex_background_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.491-0400 m31100| 2015-07-09T13:57:35.491-0400 I COMMAND [conn57] command db26.$cmd command: createIndexes { createIndexes: "reindex_background_3", indexes: [ { key: { integer: 1.0 }, name: "integer_1", background: true } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:37 reslen:173 locks:{ Global: { acquireCount: { r: 39, w: 39 } }, Database: { acquireCount: { w: 39, W: 2 }, acquireWaitCount: { w: 10, W: 2 }, timeAcquiringMicros: { w: 318239, W: 429583 } }, Collection: { acquireCount: { w: 38 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 1548ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.492-0400 m31100| 2015-07-09T13:57:35.492-0400 I COMMAND [conn34] CMD: reIndex db26.reindex_background_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.497-0400 m31100| 2015-07-09T13:57:35.497-0400 I INDEX [conn32] build index on: db26.reindex_background_14 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.reindex_background_14" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.497-0400 m31100| 2015-07-09T13:57:35.497-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.502-0400 m31100| 2015-07-09T13:57:35.501-0400 I INDEX [conn32] build index on: db26.reindex_background_14 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_14", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.503-0400 m31100| 2015-07-09T13:57:35.501-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.508-0400 m31102| 2015-07-09T13:57:35.507-0400 I INDEX [repl index builder 114] build index on: db26.reindex_background_14 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_14", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.508-0400 m31100| 2015-07-09T13:57:35.507-0400 I INDEX [conn32] build index on: db26.reindex_background_14 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_14", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.508-0400 m31100| 2015-07-09T13:57:35.507-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.514-0400 m31100| 2015-07-09T13:57:35.514-0400 I INDEX [conn32] build index on: db26.reindex_background_14 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_14", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.514-0400 m31100| 2015-07-09T13:57:35.514-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.529-0400 m31101| 2015-07-09T13:57:35.528-0400 I INDEX [repl index builder 114] build index on: db26.reindex_background_14 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_14", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.571-0400 m31102| 2015-07-09T13:57:35.570-0400 I INDEX [repl index builder 115] build index on: db26.reindex_background_8 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_8", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.596-0400 m31101| 2015-07-09T13:57:35.595-0400 I INDEX [repl index builder 115] build index on: db26.reindex_background_8 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_8", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.605-0400 m31101| 2015-07-09T13:57:35.605-0400 I INDEX [repl index builder 107] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.611-0400 m31101| 2015-07-09T13:57:35.610-0400 I INDEX [repl index builder 106] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.622-0400 m31102| 2015-07-09T13:57:35.622-0400 I INDEX [repl index builder 106] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.626-0400 m31100| 2015-07-09T13:57:35.626-0400 I INDEX [conn32] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.637-0400 m31100| 2015-07-09T13:57:35.636-0400 I QUERY [conn58] query db26.reindex_background_10 query: { $text: { $search: "ipsum" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 } cursorid:2251271485378 ntoreturn:0 ntoskip:0 nscanned:1000 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:101 reslen:24159 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 9 }, timeAcquiringMicros: { r: 572041 } }, Collection: { acquireCount: { r: 9 } } } 567ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.639-0400 m31100| 2015-07-09T13:57:35.636-0400 I QUERY [conn46] query db26.reindex_background_0 query: { $text: { $search: "ipsum" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 } cursorid:2264119997415 ntoreturn:0 ntoskip:0 nscanned:1000 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:101 reslen:24159 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 9 }, timeAcquiringMicros: { r: 571150 } }, Collection: { acquireCount: { r: 9 } } } 567ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.639-0400 m31100| 2015-07-09T13:57:35.637-0400 I QUERY [conn60] query db26.reindex_background_5 query: { $text: { $search: "ipsum" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 } cursorid:2267934596215 ntoreturn:0 ntoskip:0 nscanned:1000 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:101 reslen:24159 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 9 }, timeAcquiringMicros: { r: 556988 } }, Collection: { acquireCount: { r: 9 } } } 567ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.640-0400 m31100| 2015-07-09T13:57:35.637-0400 I QUERY [conn20] query db26.reindex_background_11 query: { $text: { $search: "ipsum" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 } cursorid:2237717376089 ntoreturn:0 ntoskip:0 nscanned:1000 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:101 reslen:24159 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 9 }, timeAcquiringMicros: { r: 577947 } }, Collection: { acquireCount: { r: 9 } } } 568ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.640-0400 m31100| 2015-07-09T13:57:35.637-0400 I COMMAND [conn32] command db26.reindex_background_14 command: reIndex { reIndex: "reindex_background_14" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:675 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 268800 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 415ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.640-0400 m31100| 2015-07-09T13:57:35.637-0400 I QUERY [conn52] query db26.reindex_background_7 query: { $text: { $search: "ipsum" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 } cursorid:2256034542894 ntoreturn:0 ntoskip:0 nscanned:1000 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:101 reslen:24159 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 9 }, timeAcquiringMicros: { r: 576162 } }, Collection: { acquireCount: { r: 9 } } } 568ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.641-0400 m31100| 2015-07-09T13:57:35.638-0400 I QUERY [conn50] query db26.reindex_background_4 query: { $text: { $search: "ipsum" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 } cursorid:2225297221543 ntoreturn:0 ntoskip:0 nscanned:1000 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:101 reslen:24159 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 9 }, timeAcquiringMicros: { r: 578931 } }, Collection: { acquireCount: { r: 9 } } } 569ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.641-0400 m31100| 2015-07-09T13:57:35.640-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63201 #136 (80 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.642-0400 m31100| 2015-07-09T13:57:35.641-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63202 #137 (81 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.644-0400 m31102| 2015-07-09T13:57:35.644-0400 I INDEX [repl index builder 116] build index on: db26.reindex_background_6 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_6", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.651-0400 m31100| 2015-07-09T13:57:35.650-0400 I INDEX [conn38] build index on: db26.reindex_background_1 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.reindex_background_1" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.651-0400 m31100| 2015-07-09T13:57:35.650-0400 I INDEX [conn38] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.652-0400 m31102| 2015-07-09T13:57:35.652-0400 I INDEX [repl index builder 107] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.655-0400 m31100| 2015-07-09T13:57:35.655-0400 I INDEX [conn38] build index on: db26.reindex_background_1 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_1", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.655-0400 m31100| 2015-07-09T13:57:35.655-0400 I INDEX [conn38] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.656-0400 m31102| 2015-07-09T13:57:35.655-0400 I INDEX [repl index builder 108] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.656-0400 m31101| 2015-07-09T13:57:35.656-0400 I INDEX [repl index builder 109] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.660-0400 m31100| 2015-07-09T13:57:35.659-0400 I INDEX [conn38] build index on: db26.reindex_background_1 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_1", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.660-0400 m31100| 2015-07-09T13:57:35.659-0400 I INDEX [conn38] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.664-0400 m31100| 2015-07-09T13:57:35.663-0400 I INDEX [conn38] build index on: db26.reindex_background_1 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_1", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.664-0400 m31100| 2015-07-09T13:57:35.663-0400 I INDEX [conn38] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.667-0400 m31101| 2015-07-09T13:57:35.667-0400 I INDEX [repl index builder 116] build index on: db26.reindex_background_6 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_6", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.687-0400 m31101| 2015-07-09T13:57:35.687-0400 I INDEX [repl index builder 108] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.714-0400 m31102| 2015-07-09T13:57:35.714-0400 I INDEX [repl index builder 117] build index on: db26.reindex_background_2 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_2", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.736-0400 m31101| 2015-07-09T13:57:35.735-0400 I INDEX [repl index builder 117] build index on: db26.reindex_background_2 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_2", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.761-0400 m31102| 2015-07-09T13:57:35.761-0400 I INDEX [repl index builder 109] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.766-0400 m31100| 2015-07-09T13:57:35.766-0400 I INDEX [conn38] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.767-0400 m31100| 2015-07-09T13:57:35.766-0400 I COMMAND [conn38] command db26.reindex_background_1 command: reIndex { reIndex: "reindex_background_1" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:671 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 149457 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 276ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.767-0400 m31100| 2015-07-09T13:57:35.767-0400 I COMMAND [conn38] CMD: reIndex db26.reindex_background_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.773-0400 m31100| 2015-07-09T13:57:35.772-0400 I INDEX [conn34] build index on: db26.reindex_background_3 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.reindex_background_3" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.773-0400 m31100| 2015-07-09T13:57:35.772-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.777-0400 m31101| 2015-07-09T13:57:35.777-0400 I INDEX [repl index builder 110] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.779-0400 m31102| 2015-07-09T13:57:35.778-0400 I INDEX [repl index builder 118] build index on: db26.reindex_background_1 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_1", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.780-0400 m31100| 2015-07-09T13:57:35.779-0400 I INDEX [conn34] build index on: db26.reindex_background_3 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_3", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.780-0400 m31100| 2015-07-09T13:57:35.779-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.784-0400 m31100| 2015-07-09T13:57:35.782-0400 I INDEX [conn34] build index on: db26.reindex_background_3 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_3", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.784-0400 m31100| 2015-07-09T13:57:35.782-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.788-0400 m31100| 2015-07-09T13:57:35.787-0400 I INDEX [conn34] build index on: db26.reindex_background_3 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_3", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.788-0400 m31100| 2015-07-09T13:57:35.787-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.819-0400 m31102| 2015-07-09T13:57:35.819-0400 I INDEX [repl index builder 110] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.822-0400 m31101| 2015-07-09T13:57:35.821-0400 I INDEX [repl index builder 118] build index on: db26.reindex_background_1 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_1", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.843-0400 m31102| 2015-07-09T13:57:35.842-0400 I INDEX [repl index builder 119] build index on: db26.reindex_background_3 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_3", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.883-0400 m31101| 2015-07-09T13:57:35.883-0400 I INDEX [repl index builder 119] build index on: db26.reindex_background_3 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_3", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.895-0400 m30998| 2015-07-09T13:57:35.895-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T13:57:35.894-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30998:1436464535:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.895-0400 m31100| 2015-07-09T13:57:35.895-0400 I INDEX [conn34] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.916-0400 m31100| 2015-07-09T13:57:35.907-0400 I QUERY [conn86] getmore db26.reindex_background_13 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:2273874475064 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 8 }, timeAcquiringMicros: { r: 812875 } }, Collection: { acquireCount: { r: 8 } } } 831ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.916-0400 m31100| 2015-07-09T13:57:35.907-0400 I QUERY [conn59] query db26.reindex_background_9 query: { $text: { $search: "ipsum" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 } cursorid:2243383312709 ntoreturn:0 ntoskip:0 nscanned:1000 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:101 reslen:24159 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 9 }, timeAcquiringMicros: { r: 837847 } }, Collection: { acquireCount: { r: 9 } } } 837ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.917-0400 m31100| 2015-07-09T13:57:35.907-0400 I COMMAND [conn34] command db26.reindex_background_3 command: reIndex { reIndex: "reindex_background_3" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:671 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 274668 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 415ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.917-0400 m31100| 2015-07-09T13:57:35.915-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63203 #138 (82 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.917-0400 m31100| 2015-07-09T13:57:35.915-0400 I COMMAND [conn34] CMD: reIndex db26.reindex_background_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.917-0400 m31100| 2015-07-09T13:57:35.915-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63204 #139 (83 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.922-0400 m31102| 2015-07-09T13:57:35.922-0400 I INDEX [repl index builder 111] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.924-0400 m31100| 2015-07-09T13:57:35.924-0400 I INDEX [conn38] build index on: db26.reindex_background_1 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.reindex_background_1" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.925-0400 m31100| 2015-07-09T13:57:35.924-0400 I INDEX [conn38] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.932-0400 m31101| 2015-07-09T13:57:35.931-0400 I INDEX [repl index builder 111] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.932-0400 m31100| 2015-07-09T13:57:35.931-0400 I INDEX [conn38] build index on: db26.reindex_background_1 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_1", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.932-0400 m31100| 2015-07-09T13:57:35.931-0400 I INDEX [conn38] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.936-0400 m31100| 2015-07-09T13:57:35.936-0400 I INDEX [conn38] build index on: db26.reindex_background_1 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_1", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.937-0400 m31100| 2015-07-09T13:57:35.936-0400 I INDEX [conn38] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.941-0400 m31100| 2015-07-09T13:57:35.941-0400 I INDEX [conn38] build index on: db26.reindex_background_1 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_1", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.941-0400 m31100| 2015-07-09T13:57:35.941-0400 I INDEX [conn38] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.979-0400 m31101| 2015-07-09T13:57:35.979-0400 I INDEX [repl index builder 112] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:35.996-0400 m31102| 2015-07-09T13:57:35.995-0400 I INDEX [repl index builder 112] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.007-0400 m31101| 2015-07-09T13:57:36.006-0400 I INDEX [repl index builder 113] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.019-0400 m31102| 2015-07-09T13:57:36.019-0400 I INDEX [repl index builder 113] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.045-0400 m31100| 2015-07-09T13:57:36.045-0400 I INDEX [conn38] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.052-0400 m31100| 2015-07-09T13:57:36.052-0400 I COMMAND [conn38] command db26.reindex_background_1 command: reIndex { reIndex: "reindex_background_1" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:671 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 149983 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 281ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.058-0400 m31100| 2015-07-09T13:57:36.056-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63205 #140 (84 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.065-0400 m31100| 2015-07-09T13:57:36.064-0400 I INDEX [conn34] build index on: db26.reindex_background_3 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.reindex_background_3" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.065-0400 m31100| 2015-07-09T13:57:36.064-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.069-0400 m31100| 2015-07-09T13:57:36.069-0400 I INDEX [conn34] build index on: db26.reindex_background_3 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_3", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.069-0400 m31100| 2015-07-09T13:57:36.069-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.075-0400 m31100| 2015-07-09T13:57:36.074-0400 I INDEX [conn34] build index on: db26.reindex_background_3 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_3", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.075-0400 m31100| 2015-07-09T13:57:36.075-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.079-0400 m31100| 2015-07-09T13:57:36.079-0400 I INDEX [conn34] build index on: db26.reindex_background_3 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_3", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.080-0400 m31100| 2015-07-09T13:57:36.079-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.096-0400 m31101| 2015-07-09T13:57:36.094-0400 I INDEX [repl index builder 114] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.104-0400 m31102| 2015-07-09T13:57:36.104-0400 I INDEX [repl index builder 114] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.114-0400 m31102| 2015-07-09T13:57:36.114-0400 I INDEX [repl index builder 115] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.136-0400 m31101| 2015-07-09T13:57:36.135-0400 I INDEX [repl index builder 115] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.143-0400 m31101| 2015-07-09T13:57:36.142-0400 I INDEX [repl index builder 116] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.143-0400 m31102| 2015-07-09T13:57:36.143-0400 I INDEX [repl index builder 117] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.146-0400 m31102| 2015-07-09T13:57:36.146-0400 I INDEX [repl index builder 116] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.151-0400 m31101| 2015-07-09T13:57:36.151-0400 I INDEX [repl index builder 117] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.166-0400 m31102| 2015-07-09T13:57:36.166-0400 I INDEX [repl index builder 118] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.167-0400 m31102| 2015-07-09T13:57:36.167-0400 I INDEX [repl index builder 119] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.170-0400 m31101| 2015-07-09T13:57:36.170-0400 I INDEX [repl index builder 118] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.173-0400 m31101| 2015-07-09T13:57:36.173-0400 I INDEX [repl index builder 119] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.191-0400 m31100| 2015-07-09T13:57:36.191-0400 I INDEX [conn34] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.223-0400 m31100| 2015-07-09T13:57:36.191-0400 I COMMAND [conn34] command db26.reindex_background_3 command: reIndex { reIndex: "reindex_background_3" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:671 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 142413 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 276ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.225-0400 m31100| 2015-07-09T13:57:36.193-0400 I QUERY [conn74] getmore db26.reindex_background_12 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:2282696853767 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 8 }, timeAcquiringMicros: { r: 1089408 } }, Collection: { acquireCount: { r: 8 } } } 1116ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.233-0400 m31100| 2015-07-09T13:57:36.208-0400 I QUERY [conn59] query db26.reindex_background_13 query: { $text: { $search: "ipsum" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 } cursorid:2273472724239 ntoreturn:0 ntoskip:0 nscanned:1000 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:101 reslen:24159 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 268331 } }, Collection: { acquireCount: { r: 9 } } } 158ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.233-0400 m31100| 2015-07-09T13:57:36.212-0400 I QUERY [conn137] getmore db26.reindex_background_8 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:2229666136908 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 3 }, timeAcquiringMicros: { r: 539587 } }, Collection: { acquireCount: { r: 8 } } } 310ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.234-0400 m31100| 2015-07-09T13:57:36.219-0400 I QUERY [conn138] getmore db26.reindex_background_2 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:2233951506690 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 274080 } }, Collection: { acquireCount: { r: 8 } } } 170ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.234-0400 m31100| 2015-07-09T13:57:36.222-0400 I QUERY [conn139] getmore db26.reindex_background_6 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:2246867920766 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 273855 } }, Collection: { acquireCount: { r: 8 } } } 173ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.234-0400 m31100| 2015-07-09T13:57:36.223-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63206 #141 (85 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.234-0400 m31100| 2015-07-09T13:57:36.230-0400 I COMMAND [conn34] CMD: reIndex db26.reindex_background_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.235-0400 m31100| 2015-07-09T13:57:36.231-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63207 #142 (86 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.250-0400 m31100| 2015-07-09T13:57:36.250-0400 I INDEX [conn34] build index on: db26.reindex_background_3 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.reindex_background_3" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.251-0400 m31100| 2015-07-09T13:57:36.250-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.257-0400 m31100| 2015-07-09T13:57:36.256-0400 I INDEX [conn34] build index on: db26.reindex_background_3 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_3", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.257-0400 m31100| 2015-07-09T13:57:36.256-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.261-0400 m31100| 2015-07-09T13:57:36.261-0400 I INDEX [conn34] build index on: db26.reindex_background_3 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_3", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.262-0400 m31100| 2015-07-09T13:57:36.261-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.265-0400 m31100| 2015-07-09T13:57:36.265-0400 I INDEX [conn34] build index on: db26.reindex_background_3 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_3", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.265-0400 m31100| 2015-07-09T13:57:36.265-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.390-0400 m31100| 2015-07-09T13:57:36.390-0400 I INDEX [conn34] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.401-0400 m31100| 2015-07-09T13:57:36.401-0400 I COMMAND [conn34] command db26.reindex_background_3 command: reIndex { reIndex: "reindex_background_3" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:671 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 11815 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 170ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.404-0400 m31100| 2015-07-09T13:57:36.403-0400 I QUERY [conn140] getmore db26.reindex_background_14 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:2222535883392 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 296073 } }, Collection: { acquireCount: { r: 8 } } } 207ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.436-0400 m31100| 2015-07-09T13:57:36.427-0400 I QUERY [conn135] getmore db26.reindex_background_7 query: { $text: { $search: "ipsum" } } cursorid:2256034542894 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 4 }, timeAcquiringMicros: { r: 688272 } }, Collection: { acquireCount: { r: 8 } } } 529ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.450-0400 m31100| 2015-07-09T13:57:36.449-0400 I QUERY [conn134] getmore db26.reindex_background_10 query: { $text: { $search: "ipsum" } } cursorid:2251271485378 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 4 }, timeAcquiringMicros: { r: 702257 } }, Collection: { acquireCount: { r: 8 } } } 543ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.456-0400 m31100| 2015-07-09T13:57:36.452-0400 I QUERY [conn43] getmore db26.reindex_background_5 query: { $text: { $search: "ipsum" } } cursorid:2267934596215 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 4 }, timeAcquiringMicros: { r: 691112 } }, Collection: { acquireCount: { r: 9 } } } 555ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.497-0400 m31100| 2015-07-09T13:57:36.486-0400 I QUERY [conn86] getmore db26.reindex_background_9 query: { $text: { $search: "ipsum" } } cursorid:2243383312709 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:9 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 20 } }, Database: { acquireCount: { r: 10 }, acquireWaitCount: { r: 3 }, timeAcquiringMicros: { r: 430873 } }, Collection: { acquireCount: { r: 10 } } } 437ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.497-0400 m31100| 2015-07-09T13:57:36.486-0400 I COMMAND [conn38] CMD: reIndex db26.reindex_background_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.497-0400 m31100| 2015-07-09T13:57:36.490-0400 I COMMAND [conn35] CMD: reIndex db26.reindex_background_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.504-0400 m31100| 2015-07-09T13:57:36.503-0400 I QUERY [conn44] getmore db26.reindex_background_11 query: { $text: { $search: "ipsum" } } cursorid:2237717376089 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 4 }, timeAcquiringMicros: { r: 689123 } }, Collection: { acquireCount: { r: 8 } } } 606ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.507-0400 m31100| 2015-07-09T13:57:36.503-0400 I QUERY [conn42] getmore db26.reindex_background_0 query: { $text: { $search: "ipsum" } } cursorid:2264119997415 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 4 }, timeAcquiringMicros: { r: 704216 } }, Collection: { acquireCount: { r: 9 } } } 596ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.509-0400 m31100| 2015-07-09T13:57:36.509-0400 I COMMAND [conn34] CMD: reIndex db26.reindex_background_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.517-0400 m31100| 2015-07-09T13:57:36.517-0400 I INDEX [conn38] build index on: db26.reindex_background_7 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.reindex_background_7" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.518-0400 m31100| 2015-07-09T13:57:36.517-0400 I INDEX [conn38] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.524-0400 m31100| 2015-07-09T13:57:36.524-0400 I INDEX [conn38] build index on: db26.reindex_background_7 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_7", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.525-0400 m31100| 2015-07-09T13:57:36.524-0400 I INDEX [conn38] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.528-0400 m31100| 2015-07-09T13:57:36.528-0400 I INDEX [conn38] build index on: db26.reindex_background_7 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_7", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.528-0400 m31100| 2015-07-09T13:57:36.528-0400 I INDEX [conn38] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.535-0400 m31100| 2015-07-09T13:57:36.535-0400 I INDEX [conn38] build index on: db26.reindex_background_7 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_7", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.535-0400 m31100| 2015-07-09T13:57:36.535-0400 I INDEX [conn38] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.640-0400 m31100| 2015-07-09T13:57:36.640-0400 I INDEX [conn38] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.641-0400 m31100| 2015-07-09T13:57:36.641-0400 I COMMAND [conn38] command db26.reindex_background_7 command: reIndex { reIndex: "reindex_background_7" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:671 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 6975 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 154ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.646-0400 m31100| 2015-07-09T13:57:36.646-0400 I INDEX [conn35] build index on: db26.reindex_background_8 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.reindex_background_8" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.647-0400 m31100| 2015-07-09T13:57:36.646-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.651-0400 m31100| 2015-07-09T13:57:36.651-0400 I INDEX [conn35] build index on: db26.reindex_background_8 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_8", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.652-0400 m31100| 2015-07-09T13:57:36.651-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.658-0400 m31100| 2015-07-09T13:57:36.658-0400 I INDEX [conn35] build index on: db26.reindex_background_8 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_8", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.658-0400 m31100| 2015-07-09T13:57:36.658-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.664-0400 m31100| 2015-07-09T13:57:36.664-0400 I INDEX [conn35] build index on: db26.reindex_background_8 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_8", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.664-0400 m31100| 2015-07-09T13:57:36.664-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.769-0400 m31100| 2015-07-09T13:57:36.768-0400 I INDEX [conn35] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.773-0400 m31100| 2015-07-09T13:57:36.769-0400 I COMMAND [conn35] command db26.reindex_background_8 command: reIndex { reIndex: "reindex_background_8" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:671 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 137140 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 279ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.773-0400 m31100| 2015-07-09T13:57:36.769-0400 I COMMAND [conn59] command db26.reindex_background_13 command: listIndexes { listIndexes: "reindex_background_13" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:737 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 265331 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 277ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.774-0400 m31100| 2015-07-09T13:57:36.769-0400 I COMMAND [conn60] command db26.reindex_background_9 command: listIndexes { listIndexes: "reindex_background_9" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:732 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 265171 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 275ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.774-0400 m31100| 2015-07-09T13:57:36.769-0400 I COMMAND [conn20] command db26.reindex_background_11 command: listIndexes { listIndexes: "reindex_background_11" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:737 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 260059 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 260ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.774-0400 m31100| 2015-07-09T13:57:36.771-0400 I COMMAND [conn48] command db26.reindex_background_2 command: listIndexes { listIndexes: "reindex_background_2" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:732 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 255714 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 255ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.775-0400 m31100| 2015-07-09T13:57:36.772-0400 I COMMAND [conn46] command db26.reindex_background_0 command: listIndexes { listIndexes: "reindex_background_0" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:732 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 260678 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 260ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.779-0400 m31100| 2015-07-09T13:57:36.776-0400 I COMMAND [conn35] CMD: reIndex db26.reindex_background_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.779-0400 m31100| 2015-07-09T13:57:36.777-0400 I COMMAND [conn40] CMD: reIndex db26.reindex_background_11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.779-0400 m31100| 2015-07-09T13:57:36.778-0400 I COMMAND [conn132] CMD: reIndex db26.reindex_background_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.779-0400 m31100| 2015-07-09T13:57:36.779-0400 I COMMAND [conn37] CMD: reIndex db26.reindex_background_13 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.779-0400 m31100| 2015-07-09T13:57:36.779-0400 I COMMAND [conn38] CMD: reIndex db26.reindex_background_9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.789-0400 m31100| 2015-07-09T13:57:36.788-0400 I INDEX [conn34] build index on: db26.reindex_background_3 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.reindex_background_3" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.789-0400 m31100| 2015-07-09T13:57:36.788-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.794-0400 m31100| 2015-07-09T13:57:36.793-0400 I INDEX [conn34] build index on: db26.reindex_background_3 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_3", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.794-0400 m31100| 2015-07-09T13:57:36.793-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.799-0400 m31100| 2015-07-09T13:57:36.799-0400 I INDEX [conn34] build index on: db26.reindex_background_3 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_3", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.799-0400 m31100| 2015-07-09T13:57:36.799-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.800-0400 m31100| 2015-07-09T13:57:36.799-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T13:57:36.797-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31100:1436464536:197041335', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.805-0400 m31100| 2015-07-09T13:57:36.804-0400 I INDEX [conn34] build index on: db26.reindex_background_3 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_3", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.805-0400 m31100| 2015-07-09T13:57:36.805-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:36.907-0400 m31100| 2015-07-09T13:57:36.907-0400 I INDEX [conn34] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.149-0400 m31100| 2015-07-09T13:57:36.914-0400 I QUERY [conn138] getmore db26.reindex_background_6 query: { $text: { $search: "ipsum" } } cursorid:2246724152222 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 396063 } }, Collection: { acquireCount: { r: 9 } } } 487ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.149-0400 m31100| 2015-07-09T13:57:36.920-0400 I COMMAND [conn34] command db26.reindex_background_3 command: reIndex { reIndex: "reindex_background_3" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:671 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 273625 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 411ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.149-0400 m31100| 2015-07-09T13:57:36.920-0400 I QUERY [conn136] getmore db26.reindex_background_4 query: { $text: { $search: "ipsum" } } cursorid:2225297221543 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 6 }, timeAcquiringMicros: { r: 1088671 } }, Collection: { acquireCount: { r: 9 } } } 1018ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.149-0400 m31100| 2015-07-09T13:57:36.924-0400 I COMMAND [conn34] CMD: reIndex db26.reindex_background_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.150-0400 m31100| 2015-07-09T13:57:36.930-0400 I INDEX [conn35] build index on: db26.reindex_background_8 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.reindex_background_8" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.150-0400 m31100| 2015-07-09T13:57:36.931-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.150-0400 m31100| 2015-07-09T13:57:36.934-0400 I INDEX [conn35] build index on: db26.reindex_background_8 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_8", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.150-0400 m31100| 2015-07-09T13:57:36.935-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.150-0400 m31100| 2015-07-09T13:57:36.939-0400 I INDEX [conn35] build index on: db26.reindex_background_8 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_8", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.150-0400 m31100| 2015-07-09T13:57:36.939-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.151-0400 m31100| 2015-07-09T13:57:36.943-0400 I INDEX [conn35] build index on: db26.reindex_background_8 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_8", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.151-0400 m31100| 2015-07-09T13:57:36.943-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.151-0400 m31100| 2015-07-09T13:57:37.065-0400 I INDEX [conn35] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.151-0400 m31100| 2015-07-09T13:57:37.065-0400 I COMMAND [conn35] command db26.reindex_background_8 command: reIndex { reIndex: "reindex_background_8" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:671 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 148928 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 288ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.151-0400 m31100| 2015-07-09T13:57:37.071-0400 I INDEX [conn40] build index on: db26.reindex_background_11 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.reindex_background_11" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.151-0400 m31100| 2015-07-09T13:57:37.071-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.152-0400 m31100| 2015-07-09T13:57:37.076-0400 I INDEX [conn40] build index on: db26.reindex_background_11 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_11", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.152-0400 m31100| 2015-07-09T13:57:37.076-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.152-0400 m31100| 2015-07-09T13:57:37.082-0400 I INDEX [conn40] build index on: db26.reindex_background_11 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_11", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.152-0400 m31100| 2015-07-09T13:57:37.082-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.152-0400 m31100| 2015-07-09T13:57:37.086-0400 I INDEX [conn40] build index on: db26.reindex_background_11 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_11", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.152-0400 m31100| 2015-07-09T13:57:37.086-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.202-0400 m31100| 2015-07-09T13:57:37.202-0400 I INDEX [conn40] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.202-0400 m31100| 2015-07-09T13:57:37.202-0400 I COMMAND [conn40] command db26.reindex_background_11 command: reIndex { reIndex: "reindex_background_11" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:675 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 288071 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 424ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.204-0400 m31100| 2015-07-09T13:57:37.204-0400 I COMMAND [conn40] CMD: reIndex db26.reindex_background_11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.210-0400 m31100| 2015-07-09T13:57:37.209-0400 I INDEX [conn132] build index on: db26.reindex_background_0 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.reindex_background_0" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.210-0400 m31100| 2015-07-09T13:57:37.209-0400 I INDEX [conn132] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.217-0400 m31100| 2015-07-09T13:57:37.217-0400 I INDEX [conn132] build index on: db26.reindex_background_0 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_0", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.217-0400 m31100| 2015-07-09T13:57:37.217-0400 I INDEX [conn132] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.219-0400 m31200| 2015-07-09T13:57:37.219-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T13:57:37.212-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31200:1436464537:809424560', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.224-0400 m31100| 2015-07-09T13:57:37.222-0400 I INDEX [conn132] build index on: db26.reindex_background_0 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_0", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.224-0400 m31100| 2015-07-09T13:57:37.222-0400 I INDEX [conn132] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.227-0400 m31100| 2015-07-09T13:57:37.227-0400 I INDEX [conn132] build index on: db26.reindex_background_0 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_0", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.228-0400 m31100| 2015-07-09T13:57:37.227-0400 I INDEX [conn132] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.328-0400 m31100| 2015-07-09T13:57:37.328-0400 I INDEX [conn132] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.329-0400 m31100| 2015-07-09T13:57:37.328-0400 I COMMAND [conn132] command db26.reindex_background_0 command: reIndex { reIndex: "reindex_background_0" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:671 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 423499 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 549ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.329-0400 m31100| 2015-07-09T13:57:37.329-0400 I COMMAND [conn132] CMD: reIndex db26.reindex_background_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.335-0400 m31100| 2015-07-09T13:57:37.335-0400 I INDEX [conn37] build index on: db26.reindex_background_13 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.reindex_background_13" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.335-0400 m31100| 2015-07-09T13:57:37.335-0400 I INDEX [conn37] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.341-0400 m31100| 2015-07-09T13:57:37.340-0400 I INDEX [conn37] build index on: db26.reindex_background_13 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_13", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.341-0400 m31100| 2015-07-09T13:57:37.340-0400 I INDEX [conn37] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.346-0400 m31100| 2015-07-09T13:57:37.345-0400 I INDEX [conn37] build index on: db26.reindex_background_13 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_13", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.346-0400 m31100| 2015-07-09T13:57:37.346-0400 I INDEX [conn37] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.352-0400 m31100| 2015-07-09T13:57:37.351-0400 I INDEX [conn37] build index on: db26.reindex_background_13 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_13", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.352-0400 m31100| 2015-07-09T13:57:37.351-0400 I INDEX [conn37] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.452-0400 m31100| 2015-07-09T13:57:37.451-0400 I INDEX [conn37] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.453-0400 m31100| 2015-07-09T13:57:37.452-0400 I COMMAND [conn37] command db26.reindex_background_13 command: reIndex { reIndex: "reindex_background_13" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:675 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 1050768 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 673ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.459-0400 m31100| 2015-07-09T13:57:37.459-0400 I INDEX [conn38] build index on: db26.reindex_background_9 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.reindex_background_9" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.460-0400 m31100| 2015-07-09T13:57:37.459-0400 I INDEX [conn38] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.464-0400 m31100| 2015-07-09T13:57:37.463-0400 I INDEX [conn38] build index on: db26.reindex_background_9 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_9", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.465-0400 m31100| 2015-07-09T13:57:37.463-0400 I INDEX [conn38] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.471-0400 m31100| 2015-07-09T13:57:37.470-0400 I INDEX [conn38] build index on: db26.reindex_background_9 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_9", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.471-0400 m31100| 2015-07-09T13:57:37.470-0400 I INDEX [conn38] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.474-0400 m31100| 2015-07-09T13:57:37.474-0400 I INDEX [conn38] build index on: db26.reindex_background_9 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_9", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.474-0400 m31100| 2015-07-09T13:57:37.474-0400 I INDEX [conn38] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.581-0400 m31100| 2015-07-09T13:57:37.580-0400 I INDEX [conn38] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.585-0400 m31100| 2015-07-09T13:57:37.582-0400 I COMMAND [conn38] command db26.reindex_background_9 command: reIndex { reIndex: "reindex_background_9" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:671 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 1174313 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 802ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.588-0400 m31100| 2015-07-09T13:57:37.588-0400 I COMMAND [conn48] command db26.reindex_background_4 command: listIndexes { listIndexes: "reindex_background_4" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:732 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 1161241 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 660ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.589-0400 m31100| 2015-07-09T13:57:37.588-0400 I COMMAND [conn73] command db26.reindex_background_6 command: listIndexes { listIndexes: "reindex_background_6" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:732 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 1159276 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 658ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.589-0400 m31100| 2015-07-09T13:57:37.589-0400 I COMMAND [conn35] CMD: reIndex db26.reindex_background_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.591-0400 m31100| 2015-07-09T13:57:37.590-0400 I QUERY [conn135] getmore db26.reindex_background_1 query: { $text: { $search: "ipsum" } } cursorid:2260971036458 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 3 }, timeAcquiringMicros: { r: 1554402 } }, Collection: { acquireCount: { r: 8 } } } 1139ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.597-0400 m31100| 2015-07-09T13:57:37.596-0400 I INDEX [conn34] build index on: db26.reindex_background_3 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.reindex_background_3" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.597-0400 m31100| 2015-07-09T13:57:37.596-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.602-0400 m31100| 2015-07-09T13:57:37.600-0400 I INDEX [conn34] build index on: db26.reindex_background_3 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_3", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.602-0400 m31100| 2015-07-09T13:57:37.600-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.605-0400 m31100| 2015-07-09T13:57:37.605-0400 I INDEX [conn34] build index on: db26.reindex_background_3 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_3", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.606-0400 m31100| 2015-07-09T13:57:37.605-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.612-0400 m31100| 2015-07-09T13:57:37.612-0400 I INDEX [conn34] build index on: db26.reindex_background_3 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_3", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.612-0400 m31100| 2015-07-09T13:57:37.612-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.709-0400 m31100| 2015-07-09T13:57:37.709-0400 I INDEX [conn34] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.710-0400 m31100| 2015-07-09T13:57:37.710-0400 I COMMAND [conn34] command db26.reindex_background_3 command: reIndex { reIndex: "reindex_background_3" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:671 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 1166848 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 785ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.717-0400 m31100| 2015-07-09T13:57:37.717-0400 I INDEX [conn40] build index on: db26.reindex_background_11 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.reindex_background_11" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.718-0400 m31100| 2015-07-09T13:57:37.717-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.723-0400 m31100| 2015-07-09T13:57:37.721-0400 I INDEX [conn40] build index on: db26.reindex_background_11 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_11", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.723-0400 m31100| 2015-07-09T13:57:37.721-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.728-0400 m31100| 2015-07-09T13:57:37.728-0400 I INDEX [conn40] build index on: db26.reindex_background_11 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_11", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.728-0400 m31100| 2015-07-09T13:57:37.728-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.734-0400 m31100| 2015-07-09T13:57:37.733-0400 I INDEX [conn40] build index on: db26.reindex_background_11 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_11", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.734-0400 m31100| 2015-07-09T13:57:37.733-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.835-0400 m31100| 2015-07-09T13:57:37.835-0400 I INDEX [conn40] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.836-0400 m31100| 2015-07-09T13:57:37.835-0400 I COMMAND [conn40] command db26.reindex_background_11 command: reIndex { reIndex: "reindex_background_11" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:675 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 1006891 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 631ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.837-0400 m31100| 2015-07-09T13:57:37.837-0400 I COMMAND [conn40] CMD: reIndex db26.reindex_background_11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.842-0400 m31100| 2015-07-09T13:57:37.841-0400 I INDEX [conn132] build index on: db26.reindex_background_0 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.reindex_background_0" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.842-0400 m31100| 2015-07-09T13:57:37.841-0400 I INDEX [conn132] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.846-0400 m31100| 2015-07-09T13:57:37.845-0400 I INDEX [conn132] build index on: db26.reindex_background_0 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_0", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.847-0400 m31100| 2015-07-09T13:57:37.845-0400 I INDEX [conn132] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.851-0400 m31100| 2015-07-09T13:57:37.851-0400 I INDEX [conn132] build index on: db26.reindex_background_0 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_0", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.852-0400 m31100| 2015-07-09T13:57:37.851-0400 I INDEX [conn132] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.856-0400 m31100| 2015-07-09T13:57:37.856-0400 I INDEX [conn132] build index on: db26.reindex_background_0 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_0", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.856-0400 m31100| 2015-07-09T13:57:37.856-0400 I INDEX [conn132] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.956-0400 m31100| 2015-07-09T13:57:37.955-0400 I INDEX [conn132] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.960-0400 m31100| 2015-07-09T13:57:37.959-0400 I COMMAND [conn132] command db26.reindex_background_0 command: reIndex { reIndex: "reindex_background_0" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:671 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 1007095 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 629ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.961-0400 m31100| 2015-07-09T13:57:37.960-0400 I QUERY [conn134] getmore db26.reindex_background_12 query: { $text: { $search: "ipsum" } } cursorid:2281762688757 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 4 }, timeAcquiringMicros: { r: 1928509 } }, Collection: { acquireCount: { r: 8 } } } 1503ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.964-0400 m31100| 2015-07-09T13:57:37.961-0400 I COMMAND [conn60] command db26.reindex_background_1 command: listIndexes { listIndexes: "reindex_background_1" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:732 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 358093 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 361ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.965-0400 m31100| 2015-07-09T13:57:37.964-0400 I COMMAND [conn34] CMD: reIndex db26.reindex_background_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.974-0400 m31100| 2015-07-09T13:57:37.973-0400 I INDEX [conn35] build index on: db26.reindex_background_4 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.reindex_background_4" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.974-0400 m31100| 2015-07-09T13:57:37.973-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.977-0400 m31100| 2015-07-09T13:57:37.976-0400 I INDEX [conn35] build index on: db26.reindex_background_4 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_4", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.977-0400 m31100| 2015-07-09T13:57:37.976-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.982-0400 m31100| 2015-07-09T13:57:37.981-0400 I INDEX [conn35] build index on: db26.reindex_background_4 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_4", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.982-0400 m31100| 2015-07-09T13:57:37.981-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.988-0400 m31100| 2015-07-09T13:57:37.987-0400 I INDEX [conn35] build index on: db26.reindex_background_4 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_4", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:37.988-0400 m31100| 2015-07-09T13:57:37.987-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.089-0400 m31100| 2015-07-09T13:57:38.089-0400 I INDEX [conn35] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.090-0400 m31100| 2015-07-09T13:57:38.090-0400 I COMMAND [conn35] command db26.reindex_background_4 command: reIndex { reIndex: "reindex_background_4" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:671 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 377944 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 500ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.097-0400 m31100| 2015-07-09T13:57:38.096-0400 I INDEX [conn40] build index on: db26.reindex_background_11 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.reindex_background_11" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.097-0400 m31100| 2015-07-09T13:57:38.097-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.103-0400 m31100| 2015-07-09T13:57:38.102-0400 I INDEX [conn40] build index on: db26.reindex_background_11 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_11", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.103-0400 m31100| 2015-07-09T13:57:38.102-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.107-0400 m31100| 2015-07-09T13:57:38.106-0400 I INDEX [conn40] build index on: db26.reindex_background_11 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_11", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.107-0400 m31100| 2015-07-09T13:57:38.106-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.112-0400 m31100| 2015-07-09T13:57:38.112-0400 I INDEX [conn40] build index on: db26.reindex_background_11 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_11", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.112-0400 m31100| 2015-07-09T13:57:38.112-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.221-0400 m31100| 2015-07-09T13:57:38.220-0400 I INDEX [conn40] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.223-0400 m31100| 2015-07-09T13:57:38.221-0400 I COMMAND [conn40] command db26.reindex_background_11 command: reIndex { reIndex: "reindex_background_11" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:675 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 252655 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 384ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.225-0400 m31100| 2015-07-09T13:57:38.225-0400 I COMMAND [conn40] CMD: reIndex db26.reindex_background_11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.227-0400 m31100| 2015-07-09T13:57:38.226-0400 I COMMAND [conn72] command db26.reindex_background_12 command: listIndexes { listIndexes: "reindex_background_12" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:737 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 249545 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 251ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.229-0400 m31100| 2015-07-09T13:57:38.228-0400 I COMMAND [conn35] CMD: reIndex db26.reindex_background_12 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.240-0400 m31100| 2015-07-09T13:57:38.240-0400 I INDEX [conn34] build index on: db26.reindex_background_1 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.reindex_background_1" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.240-0400 m31100| 2015-07-09T13:57:38.240-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.245-0400 m31100| 2015-07-09T13:57:38.245-0400 I INDEX [conn34] build index on: db26.reindex_background_1 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_1", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.246-0400 m31100| 2015-07-09T13:57:38.245-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.250-0400 m31100| 2015-07-09T13:57:38.249-0400 I INDEX [conn34] build index on: db26.reindex_background_1 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_1", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.250-0400 m31100| 2015-07-09T13:57:38.250-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.258-0400 m31100| 2015-07-09T13:57:38.258-0400 I INDEX [conn34] build index on: db26.reindex_background_1 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_1", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.259-0400 m31100| 2015-07-09T13:57:38.258-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.364-0400 m31100| 2015-07-09T13:57:38.363-0400 I INDEX [conn34] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.367-0400 m31100| 2015-07-09T13:57:38.367-0400 I COMMAND [conn34] command db26.reindex_background_1 command: reIndex { reIndex: "reindex_background_1" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:671 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 268378 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 402ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.368-0400 m31100| 2015-07-09T13:57:38.368-0400 I COMMAND [conn34] CMD: reIndex db26.reindex_background_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.380-0400 m31100| 2015-07-09T13:57:38.379-0400 I INDEX [conn40] build index on: db26.reindex_background_11 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.reindex_background_11" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.380-0400 m31100| 2015-07-09T13:57:38.379-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.384-0400 m31100| 2015-07-09T13:57:38.384-0400 I INDEX [conn40] build index on: db26.reindex_background_11 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_11", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.385-0400 m31100| 2015-07-09T13:57:38.384-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.390-0400 m31100| 2015-07-09T13:57:38.389-0400 I INDEX [conn40] build index on: db26.reindex_background_11 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_11", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.391-0400 m31100| 2015-07-09T13:57:38.389-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.397-0400 m31100| 2015-07-09T13:57:38.396-0400 I INDEX [conn40] build index on: db26.reindex_background_11 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_11", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.397-0400 m31100| 2015-07-09T13:57:38.396-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.502-0400 m31100| 2015-07-09T13:57:38.501-0400 I INDEX [conn40] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.502-0400 m31100| 2015-07-09T13:57:38.502-0400 I COMMAND [conn40] command db26.reindex_background_11 command: reIndex { reIndex: "reindex_background_11" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:675 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 147378 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 276ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.508-0400 m31100| 2015-07-09T13:57:38.507-0400 I INDEX [conn35] build index on: db26.reindex_background_12 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.reindex_background_12" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.509-0400 m31100| 2015-07-09T13:57:38.507-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.515-0400 m31100| 2015-07-09T13:57:38.515-0400 I INDEX [conn35] build index on: db26.reindex_background_12 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_12", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.515-0400 m31100| 2015-07-09T13:57:38.515-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.519-0400 m31100| 2015-07-09T13:57:38.519-0400 I INDEX [conn35] build index on: db26.reindex_background_12 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_12", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.520-0400 m31100| 2015-07-09T13:57:38.519-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.524-0400 m31100| 2015-07-09T13:57:38.523-0400 I INDEX [conn35] build index on: db26.reindex_background_12 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_12", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.524-0400 m31100| 2015-07-09T13:57:38.523-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.623-0400 m31100| 2015-07-09T13:57:38.622-0400 I INDEX [conn35] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.627-0400 m31100| 2015-07-09T13:57:38.623-0400 I COMMAND [conn35] command db26.reindex_background_12 command: reIndex { reIndex: "reindex_background_12" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:675 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 273596 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 394ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.636-0400 m31100| 2015-07-09T13:57:38.635-0400 I INDEX [conn34] build index on: db26.reindex_background_1 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.reindex_background_1" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.636-0400 m31100| 2015-07-09T13:57:38.636-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.642-0400 m31100| 2015-07-09T13:57:38.641-0400 I INDEX [conn34] build index on: db26.reindex_background_1 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_1", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.642-0400 m31100| 2015-07-09T13:57:38.641-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.646-0400 m31100| 2015-07-09T13:57:38.646-0400 I INDEX [conn34] build index on: db26.reindex_background_1 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_1", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.647-0400 m31100| 2015-07-09T13:57:38.646-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.652-0400 m31100| 2015-07-09T13:57:38.652-0400 I INDEX [conn34] build index on: db26.reindex_background_1 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_1", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.652-0400 m31100| 2015-07-09T13:57:38.652-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.753-0400 m31100| 2015-07-09T13:57:38.753-0400 I INDEX [conn34] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.758-0400 m31100| 2015-07-09T13:57:38.756-0400 I COMMAND [conn34] command db26.reindex_background_1 command: reIndex { reIndex: "reindex_background_1" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:671 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 261899 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 387ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.759-0400 m31100| 2015-07-09T13:57:38.757-0400 I QUERY [conn140] getmore db26.reindex_background_10 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:2252097842851 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 8 }, timeAcquiringMicros: { r: 2719886 } }, Collection: { acquireCount: { r: 8 } } } 1987ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.759-0400 m31100| 2015-07-09T13:57:38.758-0400 I COMMAND [conn34] CMD: reIndex db26.reindex_background_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.763-0400 m31100| 2015-07-09T13:57:38.759-0400 I QUERY [conn137] getmore db26.reindex_background_14 query: { $text: { $search: "ipsum" } } cursorid:2221597293878 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 8 }, timeAcquiringMicros: { r: 2699144 } }, Collection: { acquireCount: { r: 8 } } } 1989ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.764-0400 m31100| 2015-07-09T13:57:38.760-0400 I QUERY [conn44] getmore db26.reindex_background_7 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:2255787024844 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 6 }, timeAcquiringMicros: { r: 2317240 } }, Collection: { acquireCount: { r: 8 } } } 1178ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.767-0400 m31100| 2015-07-09T13:57:38.766-0400 I QUERY [conn134] getmore db26.reindex_background_8 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:2230896185580 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 4 }, timeAcquiringMicros: { r: 782601 } }, Collection: { acquireCount: { r: 8 } } } 542ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.769-0400 m31100| 2015-07-09T13:57:38.768-0400 I QUERY [conn43] getmore db26.reindex_background_9 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:2242238827585 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 3 }, timeAcquiringMicros: { r: 521996 } }, Collection: { acquireCount: { r: 8 } } } 402ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.771-0400 m31100| 2015-07-09T13:57:38.770-0400 I QUERY [conn136] getmore db26.reindex_background_2 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:2235133182626 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 5 }, timeAcquiringMicros: { r: 1158067 } }, Collection: { acquireCount: { r: 8 } } } 810ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.780-0400 m31100| 2015-07-09T13:57:38.779-0400 I INDEX [conn34] build index on: db26.reindex_background_1 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.reindex_background_1" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.780-0400 m31100| 2015-07-09T13:57:38.779-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.783-0400 m31100| 2015-07-09T13:57:38.783-0400 I INDEX [conn34] build index on: db26.reindex_background_1 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_1", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.784-0400 m31100| 2015-07-09T13:57:38.783-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.789-0400 m31100| 2015-07-09T13:57:38.789-0400 I INDEX [conn34] build index on: db26.reindex_background_1 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_1", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.789-0400 m31100| 2015-07-09T13:57:38.789-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.794-0400 m31100| 2015-07-09T13:57:38.794-0400 I INDEX [conn34] build index on: db26.reindex_background_1 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_1", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.794-0400 m31100| 2015-07-09T13:57:38.794-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.904-0400 m31100| 2015-07-09T13:57:38.904-0400 I INDEX [conn34] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.907-0400 m31100| 2015-07-09T13:57:38.907-0400 I COMMAND [conn34] command db26.reindex_background_1 command: reIndex { reIndex: "reindex_background_1" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:671 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 1 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 148ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.908-0400 m31100| 2015-07-09T13:57:38.907-0400 I QUERY [conn86] getmore db26.reindex_background_5 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:2269792616764 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 7 }, timeAcquiringMicros: { r: 2451977 } }, Collection: { acquireCount: { r: 8 } } } 1321ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.909-0400 m31100| 2015-07-09T13:57:38.908-0400 I COMMAND [conn73] command db26.reindex_background_14 command: listIndexes { listIndexes: "reindex_background_14" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:737 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 129030 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 131ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.914-0400 m31100| 2015-07-09T13:57:38.911-0400 I QUERY [conn141] getmore db26.reindex_background_3 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:2277996258082 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 4 }, timeAcquiringMicros: { r: 658236 } }, Collection: { acquireCount: { r: 8 } } } 545ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.914-0400 m31100| 2015-07-09T13:57:38.912-0400 I QUERY [conn135] getmore db26.reindex_background_13 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:2272654912408 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 5 }, timeAcquiringMicros: { r: 918315 } }, Collection: { acquireCount: { r: 8 } } } 690ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.915-0400 m31100| 2015-07-09T13:57:38.913-0400 I QUERY [conn138] getmore db26.reindex_background_6 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:2248053667038 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 4 }, timeAcquiringMicros: { r: 658422 } }, Collection: { acquireCount: { r: 8 } } } 546ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.926-0400 m31100| 2015-07-09T13:57:38.918-0400 I QUERY [conn139] getmore db26.reindex_background_4 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:2225079264614 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 3 }, timeAcquiringMicros: { r: 519966 } }, Collection: { acquireCount: { r: 8 } } } 291ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.927-0400 m31100| 2015-07-09T13:57:38.921-0400 I QUERY [conn42] getmore db26.reindex_background_0 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:2264946191043 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 3 }, timeAcquiringMicros: { r: 521552 } }, Collection: { acquireCount: { r: 8 } } } 295ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:38.986-0400 m31100| 2015-07-09T13:57:38.986-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63209 #143 (87 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.079-0400 m31100| 2015-07-09T13:57:39.079-0400 I QUERY [conn43] getmore db26.reindex_background_11 query: { $text: { $search: "ipsum" } } cursorid:2238726890991 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 } }, Collection: { acquireCount: { r: 9 } } } 107ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.090-0400 m31100| 2015-07-09T13:57:39.089-0400 I QUERY [conn135] getmore db26.reindex_background_5 query: { $text: { $search: "ipsum" } } cursorid:2268677248449 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 } }, Collection: { acquireCount: { r: 9 } } } 144ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.095-0400 m31100| 2015-07-09T13:57:39.094-0400 I COMMAND [conn34] CMD: reIndex db26.reindex_background_11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.096-0400 m31100| 2015-07-09T13:57:39.095-0400 I QUERY [conn139] getmore db26.reindex_background_2 query: { $text: { $search: "ipsum" } } cursorid:2234017158160 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:9 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 20 } }, Database: { acquireCount: { r: 10 } }, Collection: { acquireCount: { r: 10 } } } 144ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.097-0400 m31100| 2015-07-09T13:57:39.096-0400 I QUERY [conn142] getmore db26.reindex_background_13 query: { $text: { $search: "ipsum" } } cursorid:2273804858553 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:9 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 20 } }, Database: { acquireCount: { r: 10 } }, Collection: { acquireCount: { r: 10 } } } 111ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.104-0400 m31100| 2015-07-09T13:57:39.104-0400 I QUERY [conn86] getmore db26.reindex_background_7 query: { $text: { $search: "ipsum" } } cursorid:2256921377944 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 } }, Collection: { acquireCount: { r: 9 } } } 146ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.113-0400 m31100| 2015-07-09T13:57:39.112-0400 I INDEX [conn34] build index on: db26.reindex_background_11 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.reindex_background_11" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.113-0400 m31100| 2015-07-09T13:57:39.112-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.116-0400 m31100| 2015-07-09T13:57:39.115-0400 I INDEX [conn34] build index on: db26.reindex_background_11 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_11", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.116-0400 m31100| 2015-07-09T13:57:39.115-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.123-0400 m31100| 2015-07-09T13:57:39.122-0400 I INDEX [conn34] build index on: db26.reindex_background_11 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_11", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.123-0400 m31100| 2015-07-09T13:57:39.122-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.126-0400 m31100| 2015-07-09T13:57:39.126-0400 I INDEX [conn34] build index on: db26.reindex_background_11 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_11", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.126-0400 m31100| 2015-07-09T13:57:39.126-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.225-0400 m31100| 2015-07-09T13:57:39.225-0400 I INDEX [conn34] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.226-0400 m31100| 2015-07-09T13:57:39.225-0400 I COMMAND [conn34] command db26.reindex_background_11 command: reIndex { reIndex: "reindex_background_11" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:675 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 10415 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 130ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.226-0400 m31100| 2015-07-09T13:57:39.225-0400 I COMMAND [conn20] command db26.reindex_background_5 command: listIndexes { listIndexes: "reindex_background_5" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:732 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 129887 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 130ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.226-0400 m31100| 2015-07-09T13:57:39.225-0400 I COMMAND [conn46] command db26.reindex_background_2 command: listIndexes { listIndexes: "reindex_background_2" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:732 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 121873 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 122ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.227-0400 m31100| 2015-07-09T13:57:39.227-0400 I COMMAND [conn57] command db26.reindex_background_7 command: listIndexes { listIndexes: "reindex_background_7" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:732 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 116373 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 116ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.255-0400 m31100| 2015-07-09T13:57:39.245-0400 I QUERY [conn136] getmore db26.reindex_background_8 query: { $text: { $search: "ipsum" } } cursorid:2229731526188 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 140560 } }, Collection: { acquireCount: { r: 9 } } } 304ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.256-0400 m31100| 2015-07-09T13:57:39.251-0400 I COMMAND [conn60] command db26.reindex_background_13 command: listIndexes { listIndexes: "reindex_background_13" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:737 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 124435 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 141ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.261-0400 m31100| 2015-07-09T13:57:39.261-0400 I COMMAND [conn35] CMD: reIndex db26.reindex_background_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.270-0400 m31100| 2015-07-09T13:57:39.267-0400 I COMMAND [conn34] CMD: reIndex db26.reindex_background_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.271-0400 m31100| 2015-07-09T13:57:39.270-0400 I QUERY [conn44] getmore db26.reindex_background_9 query: { $text: { $search: "ipsum" } } cursorid:2241995773465 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:9 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 20 } }, Database: { acquireCount: { r: 10 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 170292 } }, Collection: { acquireCount: { r: 10 } } } 332ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.282-0400 m31100| 2015-07-09T13:57:39.282-0400 I INDEX [conn35] build index on: db26.reindex_background_8 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.reindex_background_8" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.283-0400 m31100| 2015-07-09T13:57:39.282-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.287-0400 m31100| 2015-07-09T13:57:39.286-0400 I INDEX [conn35] build index on: db26.reindex_background_8 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_8", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.288-0400 m31100| 2015-07-09T13:57:39.286-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.292-0400 m31100| 2015-07-09T13:57:39.292-0400 I INDEX [conn35] build index on: db26.reindex_background_8 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_8", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.293-0400 m31100| 2015-07-09T13:57:39.292-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.297-0400 m31100| 2015-07-09T13:57:39.297-0400 I INDEX [conn35] build index on: db26.reindex_background_8 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_8", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.297-0400 m31100| 2015-07-09T13:57:39.297-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.407-0400 m31100| 2015-07-09T13:57:39.406-0400 I INDEX [conn35] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.411-0400 m31100| 2015-07-09T13:57:39.411-0400 I COMMAND [conn35] command db26.reindex_background_8 command: reIndex { reIndex: "reindex_background_8" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:671 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 13664 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 149ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.415-0400 m31100| 2015-07-09T13:57:39.414-0400 I COMMAND [conn20] command db26.reindex_background_9 command: listIndexes { listIndexes: "reindex_background_9" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:732 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 134590 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 138ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.418-0400 m31100| 2015-07-09T13:57:39.417-0400 I QUERY [conn137] getmore db26.reindex_background_12 query: { $text: { $search: "ipsum" } } cursorid:2282766436163 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 263072 } }, Collection: { acquireCount: { r: 9 } } } 430ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.424-0400 m31100| 2015-07-09T13:57:39.423-0400 I QUERY [conn140] getmore db26.reindex_background_6 query: { $text: { $search: "ipsum" } } cursorid:2246937797160 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 279460 } }, Collection: { acquireCount: { r: 8 } } } 423ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.424-0400 m31100| 2015-07-09T13:57:39.424-0400 I COMMAND [conn40] CMD: reIndex db26.reindex_background_9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.425-0400 m31100| 2015-07-09T13:57:39.425-0400 I QUERY [conn138] getmore db26.reindex_background_4 query: { $text: { $search: "ipsum" } } cursorid:2226052063536 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:9 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 20 } }, Database: { acquireCount: { r: 10 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 277417 } }, Collection: { acquireCount: { r: 10 } } } 464ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.427-0400 m31100| 2015-07-09T13:57:39.426-0400 I QUERY [conn42] getmore db26.reindex_background_10 query: { $text: { $search: "ipsum" } } cursorid:2250946690676 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 272397 } }, Collection: { acquireCount: { r: 9 } } } 480ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.438-0400 m31100| 2015-07-09T13:57:39.438-0400 I INDEX [conn34] build index on: db26.reindex_background_7 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.reindex_background_7" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.438-0400 m31100| 2015-07-09T13:57:39.438-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.444-0400 m31100| 2015-07-09T13:57:39.444-0400 I INDEX [conn34] build index on: db26.reindex_background_7 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_7", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.444-0400 m31100| 2015-07-09T13:57:39.444-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.449-0400 m31100| 2015-07-09T13:57:39.449-0400 I INDEX [conn34] build index on: db26.reindex_background_7 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_7", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.449-0400 m31100| 2015-07-09T13:57:39.449-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.452-0400 m31100| 2015-07-09T13:57:39.452-0400 I INDEX [conn34] build index on: db26.reindex_background_7 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_7", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.453-0400 m31100| 2015-07-09T13:57:39.452-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.557-0400 m31100| 2015-07-09T13:57:39.556-0400 I INDEX [conn34] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.562-0400 m31100| 2015-07-09T13:57:39.561-0400 I COMMAND [conn72] command db26.reindex_background_4 command: listIndexes { listIndexes: "reindex_background_4" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:732 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 123156 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 123ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.563-0400 m31100| 2015-07-09T13:57:39.561-0400 I COMMAND [conn71] command db26.reindex_background_12 command: listIndexes { listIndexes: "reindex_background_12" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:737 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 122177 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 122ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.563-0400 m31100| 2015-07-09T13:57:39.562-0400 I COMMAND [conn46] command db26.reindex_background_6 command: listIndexes { listIndexes: "reindex_background_6" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:732 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 125801 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 127ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.563-0400 m31100| 2015-07-09T13:57:39.562-0400 I COMMAND [conn73] command db26.reindex_background_10 command: listIndexes { listIndexes: "reindex_background_10" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:737 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 124659 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 127ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.564-0400 m31100| 2015-07-09T13:57:39.564-0400 I COMMAND [conn34] command db26.reindex_background_7 command: reIndex { reIndex: "reindex_background_7" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:671 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 159514 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 296ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.565-0400 m31100| 2015-07-09T13:57:39.565-0400 I QUERY [conn143] getmore db26.reindex_background_1 query: { $text: { $search: "ipsum" } } cursorid:2260507301532 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 3 }, timeAcquiringMicros: { r: 400354 } }, Collection: { acquireCount: { r: 9 } } } 524ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.565-0400 m31100| 2015-07-09T13:57:39.565-0400 I COMMAND [conn35] CMD: reIndex db26.reindex_background_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.566-0400 m31100| 2015-07-09T13:57:39.565-0400 I COMMAND [conn34] CMD: reIndex db26.reindex_background_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.570-0400 m31100| 2015-07-09T13:57:39.569-0400 I QUERY [conn74] getmore db26.reindex_background_14 query: { $text: { $search: "ipsum" } } cursorid:2222601041024 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 3 }, timeAcquiringMicros: { r: 406845 } }, Collection: { acquireCount: { r: 9 } } } 504ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.580-0400 m31100| 2015-07-09T13:57:39.579-0400 I INDEX [conn40] build index on: db26.reindex_background_9 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.reindex_background_9" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.580-0400 m31100| 2015-07-09T13:57:39.579-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.586-0400 m31100| 2015-07-09T13:57:39.586-0400 I INDEX [conn40] build index on: db26.reindex_background_9 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_9", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.586-0400 m31100| 2015-07-09T13:57:39.586-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.592-0400 m31100| 2015-07-09T13:57:39.591-0400 I INDEX [conn40] build index on: db26.reindex_background_9 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_9", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.592-0400 m31100| 2015-07-09T13:57:39.592-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.599-0400 m31100| 2015-07-09T13:57:39.598-0400 I INDEX [conn40] build index on: db26.reindex_background_9 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_9", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.599-0400 m31100| 2015-07-09T13:57:39.598-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.742-0400 m31100| 2015-07-09T13:57:39.741-0400 I INDEX [conn40] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.744-0400 m31100| 2015-07-09T13:57:39.744-0400 I COMMAND [conn40] command db26.reindex_background_9 command: reIndex { reIndex: "reindex_background_9" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:671 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 149082 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 319ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.750-0400 m31100| 2015-07-09T13:57:39.748-0400 I COMMAND [conn60] command db26.reindex_background_1 command: listIndexes { listIndexes: "reindex_background_1" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:732 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 171621 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 176ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.750-0400 m31100| 2015-07-09T13:57:39.749-0400 I COMMAND [conn72] command db26.reindex_background_14 command: listIndexes { listIndexes: "reindex_background_14" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:737 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 164739 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 169ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.750-0400 m31100| 2015-07-09T13:57:39.749-0400 I QUERY [conn134] getmore db26.reindex_background_0 query: { $text: { $search: "ipsum" } } cursorid:2263796775505 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 4 }, timeAcquiringMicros: { r: 565205 } }, Collection: { acquireCount: { r: 9 } } } 761ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.757-0400 m31100| 2015-07-09T13:57:39.757-0400 I INDEX [conn35] build index on: db26.reindex_background_6 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.reindex_background_6" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.757-0400 m31100| 2015-07-09T13:57:39.757-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.763-0400 m31100| 2015-07-09T13:57:39.762-0400 I INDEX [conn35] build index on: db26.reindex_background_6 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_6", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.763-0400 m31100| 2015-07-09T13:57:39.762-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.768-0400 m31100| 2015-07-09T13:57:39.767-0400 I INDEX [conn35] build index on: db26.reindex_background_6 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_6", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.768-0400 m31100| 2015-07-09T13:57:39.768-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.772-0400 m31100| 2015-07-09T13:57:39.771-0400 I INDEX [conn35] build index on: db26.reindex_background_6 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_6", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.772-0400 m31100| 2015-07-09T13:57:39.771-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.877-0400 m31100| 2015-07-09T13:57:39.877-0400 I INDEX [conn35] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.877-0400 m31100| 2015-07-09T13:57:39.877-0400 I COMMAND [conn35] command db26.reindex_background_6 command: reIndex { reIndex: "reindex_background_6" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:671 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 186304 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 311ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.884-0400 m31100| 2015-07-09T13:57:39.883-0400 I INDEX [conn34] build index on: db26.reindex_background_7 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.reindex_background_7" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.884-0400 m31100| 2015-07-09T13:57:39.883-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.890-0400 m31100| 2015-07-09T13:57:39.889-0400 I INDEX [conn34] build index on: db26.reindex_background_7 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_7", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.890-0400 m31100| 2015-07-09T13:57:39.890-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.895-0400 m31100| 2015-07-09T13:57:39.895-0400 I INDEX [conn34] build index on: db26.reindex_background_7 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_7", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.895-0400 m31100| 2015-07-09T13:57:39.895-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.901-0400 m31100| 2015-07-09T13:57:39.900-0400 I INDEX [conn34] build index on: db26.reindex_background_7 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_7", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:39.901-0400 m31100| 2015-07-09T13:57:39.900-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.003-0400 m31100| 2015-07-09T13:57:40.002-0400 I INDEX [conn34] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.006-0400 m31100| 2015-07-09T13:57:40.006-0400 I COMMAND [conn34] command db26.reindex_background_7 command: reIndex { reIndex: "reindex_background_7" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:671 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 311581 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 440ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.029-0400 m31100| 2015-07-09T13:57:40.013-0400 I QUERY [conn135] getmore db26.reindex_background_5 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:2268101847193 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 4 }, timeAcquiringMicros: { r: 722448 } }, Collection: { acquireCount: { r: 8 } } } 605ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.029-0400 m31100| 2015-07-09T13:57:40.016-0400 I QUERY [conn142] getmore db26.reindex_background_11 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:2239243572776 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 4 }, timeAcquiringMicros: { r: 714566 } }, Collection: { acquireCount: { r: 8 } } } 608ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.030-0400 m31100| 2015-07-09T13:57:40.018-0400 I COMMAND [conn73] command db26.reindex_background_0 command: listIndexes { listIndexes: "reindex_background_0" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:732 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 250800 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 261ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.030-0400 m31100| 2015-07-09T13:57:40.018-0400 I QUERY [conn86] getmore db26.reindex_background_3 query: { $text: { $search: "ipsum" } } cursorid:2278061125225 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 5 }, timeAcquiringMicros: { r: 824334 } }, Collection: { acquireCount: { r: 8 } } } 789ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.038-0400 m31100| 2015-07-09T13:57:40.024-0400 I QUERY [conn44] getmore db26.reindex_background_13 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:2272781482643 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 433979 } }, Collection: { acquireCount: { r: 8 } } } 281ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.051-0400 m31100| 2015-07-09T13:57:40.042-0400 I QUERY [conn139] getmore db26.reindex_background_2 query: { $text: { $search: "ipsum" } } cursorid:2235277863219 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 4 }, timeAcquiringMicros: { r: 705976 } }, Collection: { acquireCount: { r: 8 } } } 632ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.051-0400 m31100| 2015-07-09T13:57:40.050-0400 I COMMAND [conn35] CMD: reIndex db26.reindex_background_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.060-0400 m31100| 2015-07-09T13:57:40.059-0400 I INDEX [conn35] build index on: db26.reindex_background_0 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.reindex_background_0" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.060-0400 m31100| 2015-07-09T13:57:40.059-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.064-0400 m31100| 2015-07-09T13:57:40.063-0400 I INDEX [conn35] build index on: db26.reindex_background_0 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_0", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.064-0400 m31100| 2015-07-09T13:57:40.063-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.068-0400 m31100| 2015-07-09T13:57:40.068-0400 I INDEX [conn35] build index on: db26.reindex_background_0 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_0", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.069-0400 m31100| 2015-07-09T13:57:40.068-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.074-0400 m31100| 2015-07-09T13:57:40.074-0400 I INDEX [conn35] build index on: db26.reindex_background_0 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_0", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.074-0400 m31100| 2015-07-09T13:57:40.074-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.186-0400 m31100| 2015-07-09T13:57:40.186-0400 I INDEX [conn35] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.189-0400 m31100| 2015-07-09T13:57:40.186-0400 I COMMAND [conn35] command db26.reindex_background_0 command: reIndex { reIndex: "reindex_background_0" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:671 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 3491 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 136ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.191-0400 m31100| 2015-07-09T13:57:40.190-0400 I COMMAND [conn35] CMD: reIndex db26.reindex_background_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.192-0400 m31100| 2015-07-09T13:57:40.192-0400 I COMMAND [conn52] command db26.reindex_background_3 command: listIndexes { listIndexes: "reindex_background_3" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:732 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 136625 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 140ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.193-0400 m31100| 2015-07-09T13:57:40.192-0400 I QUERY [conn44] getmore db26.reindex_background_9 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:2243926043655 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 136271 } }, Collection: { acquireCount: { r: 8 } } } 146ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.197-0400 m31100| 2015-07-09T13:57:40.196-0400 I COMMAND [conn72] command db26.reindex_background_2 command: listIndexes { listIndexes: "reindex_background_2" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:732 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 129913 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 137ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.198-0400 m31100| 2015-07-09T13:57:40.198-0400 I COMMAND [conn132] CMD: reIndex db26.reindex_background_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.199-0400 m31100| 2015-07-09T13:57:40.198-0400 I COMMAND [conn34] CMD: reIndex db26.reindex_background_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.208-0400 m31100| 2015-07-09T13:57:40.207-0400 I INDEX [conn35] build index on: db26.reindex_background_0 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.reindex_background_0" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.208-0400 m31100| 2015-07-09T13:57:40.207-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.212-0400 m31100| 2015-07-09T13:57:40.211-0400 I INDEX [conn35] build index on: db26.reindex_background_0 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_0", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.213-0400 m31100| 2015-07-09T13:57:40.211-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.217-0400 m31100| 2015-07-09T13:57:40.216-0400 I INDEX [conn35] build index on: db26.reindex_background_0 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_0", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.218-0400 m31100| 2015-07-09T13:57:40.217-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.222-0400 m31100| 2015-07-09T13:57:40.221-0400 I INDEX [conn35] build index on: db26.reindex_background_0 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_0", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.222-0400 m31100| 2015-07-09T13:57:40.221-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.325-0400 m31100| 2015-07-09T13:57:40.324-0400 I INDEX [conn35] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.328-0400 m31100| 2015-07-09T13:57:40.325-0400 I COMMAND [conn35] command db26.reindex_background_0 command: reIndex { reIndex: "reindex_background_0" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:671 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 10353 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 134ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.328-0400 m31100| 2015-07-09T13:57:40.325-0400 I QUERY [conn140] getmore db26.reindex_background_10 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:2252502957902 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 269644 } }, Collection: { acquireCount: { r: 8 } } } 282ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.337-0400 m31100| 2015-07-09T13:57:40.336-0400 I INDEX [conn132] build index on: db26.reindex_background_2 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.reindex_background_2" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.337-0400 m31100| 2015-07-09T13:57:40.337-0400 I INDEX [conn132] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.342-0400 m31100| 2015-07-09T13:57:40.342-0400 I INDEX [conn132] build index on: db26.reindex_background_2 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_2", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.343-0400 m31100| 2015-07-09T13:57:40.342-0400 I INDEX [conn132] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.347-0400 m31100| 2015-07-09T13:57:40.347-0400 I INDEX [conn132] build index on: db26.reindex_background_2 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_2", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.347-0400 m31100| 2015-07-09T13:57:40.347-0400 I INDEX [conn132] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.350-0400 m31100| 2015-07-09T13:57:40.350-0400 I INDEX [conn132] build index on: db26.reindex_background_2 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_2", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.351-0400 m31100| 2015-07-09T13:57:40.350-0400 I INDEX [conn132] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.449-0400 m31100| 2015-07-09T13:57:40.449-0400 I INDEX [conn132] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.450-0400 m31100| 2015-07-09T13:57:40.449-0400 I COMMAND [conn132] command db26.reindex_background_2 command: reIndex { reIndex: "reindex_background_2" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:671 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 133845 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 251ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.451-0400 m31100| 2015-07-09T13:57:40.450-0400 I COMMAND [conn132] CMD: reIndex db26.reindex_background_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.456-0400 m31100| 2015-07-09T13:57:40.456-0400 I INDEX [conn34] build index on: db26.reindex_background_3 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.reindex_background_3" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.456-0400 m31100| 2015-07-09T13:57:40.456-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.461-0400 m31100| 2015-07-09T13:57:40.461-0400 I INDEX [conn34] build index on: db26.reindex_background_3 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_3", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.462-0400 m31100| 2015-07-09T13:57:40.461-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.467-0400 m31100| 2015-07-09T13:57:40.466-0400 I INDEX [conn34] build index on: db26.reindex_background_3 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_3", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.467-0400 m31100| 2015-07-09T13:57:40.466-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.471-0400 m31100| 2015-07-09T13:57:40.471-0400 I INDEX [conn34] build index on: db26.reindex_background_3 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_3", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.471-0400 m31100| 2015-07-09T13:57:40.471-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.596-0400 m31100| 2015-07-09T13:57:40.595-0400 I INDEX [conn34] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.598-0400 m31100| 2015-07-09T13:57:40.596-0400 I COMMAND [conn34] command db26.reindex_background_3 command: reIndex { reIndex: "reindex_background_3" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:671 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 250920 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 397ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.605-0400 m30999| 2015-07-09T13:57:40.604-0400 I NETWORK [conn166] end connection 127.0.0.1:63193 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.608-0400 m31100| 2015-07-09T13:57:40.608-0400 I INDEX [conn132] build index on: db26.reindex_background_2 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.reindex_background_2" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.608-0400 m31100| 2015-07-09T13:57:40.608-0400 I INDEX [conn132] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.613-0400 m31100| 2015-07-09T13:57:40.612-0400 I INDEX [conn132] build index on: db26.reindex_background_2 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_2", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.614-0400 m31100| 2015-07-09T13:57:40.613-0400 I INDEX [conn132] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.619-0400 m31100| 2015-07-09T13:57:40.619-0400 I INDEX [conn132] build index on: db26.reindex_background_2 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_2", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.619-0400 m31100| 2015-07-09T13:57:40.619-0400 I INDEX [conn132] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.623-0400 m31100| 2015-07-09T13:57:40.622-0400 I INDEX [conn132] build index on: db26.reindex_background_2 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_2", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.623-0400 m31100| 2015-07-09T13:57:40.622-0400 I INDEX [conn132] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.721-0400 m31100| 2015-07-09T13:57:40.721-0400 I INDEX [conn132] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.757-0400 m31100| 2015-07-09T13:57:40.721-0400 I COMMAND [conn132] command db26.reindex_background_2 command: reIndex { reIndex: "reindex_background_2" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:671 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 152317 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 270ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.758-0400 m31100| 2015-07-09T13:57:40.726-0400 I QUERY [conn73] query db26.reindex_background_6 query: { $text: { $search: "ipsum" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 } cursorid:2248194276444 ntoreturn:0 ntoskip:0 nscanned:1000 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:101 reslen:24159 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 4 }, timeAcquiringMicros: { r: 660264 } }, Collection: { acquireCount: { r: 9 } } } 537ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.758-0400 m31100| 2015-07-09T13:57:40.727-0400 I QUERY [conn71] query db26.reindex_background_12 query: { $text: { $search: "ipsum" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 } cursorid:2281334151231 ntoreturn:0 ntoskip:0 nscanned:1000 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:101 reslen:24159 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 4 }, timeAcquiringMicros: { r: 653428 } }, Collection: { acquireCount: { r: 9 } } } 537ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.758-0400 m31100| 2015-07-09T13:57:40.728-0400 I QUERY [conn60] query db26.reindex_background_13 query: { $text: { $search: "ipsum" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 } cursorid:2273046797011 ntoreturn:0 ntoskip:0 nscanned:1000 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:101 reslen:24159 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 4 }, timeAcquiringMicros: { r: 664936 } }, Collection: { acquireCount: { r: 9 } } } 541ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.759-0400 m31100| 2015-07-09T13:57:40.729-0400 I QUERY [conn48] query db26.reindex_background_8 query: { $text: { $search: "ipsum" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 } cursorid:2231024050426 ntoreturn:0 ntoskip:0 nscanned:1000 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:101 reslen:24159 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 4 }, timeAcquiringMicros: { r: 664790 } }, Collection: { acquireCount: { r: 9 } } } 540ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.759-0400 m31100| 2015-07-09T13:57:40.730-0400 I QUERY [conn86] getmore db26.reindex_background_1 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:2260914486006 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 4 }, timeAcquiringMicros: { r: 671808 } }, Collection: { acquireCount: { r: 8 } } } 681ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.759-0400 m31100| 2015-07-09T13:57:40.732-0400 I QUERY [conn59] query db26.reindex_background_11 query: { $text: { $search: "ipsum" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 } cursorid:2239483855487 ntoreturn:0 ntoskip:0 nscanned:1000 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:101 reslen:24159 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 4 }, timeAcquiringMicros: { r: 665619 } }, Collection: { acquireCount: { r: 9 } } } 543ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.760-0400 m31100| 2015-07-09T13:57:40.733-0400 I QUERY [conn52] query db26.reindex_background_9 query: { $text: { $search: "ipsum" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 } cursorid:2243476771110 ntoreturn:0 ntoskip:0 nscanned:1000 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:101 reslen:24159 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 3 }, timeAcquiringMicros: { r: 517663 } }, Collection: { acquireCount: { r: 9 } } } 407ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.760-0400 m31100| 2015-07-09T13:57:40.737-0400 I QUERY [conn142] getmore db26.reindex_background_7 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:2256741622713 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 4 }, timeAcquiringMicros: { r: 660135 } }, Collection: { acquireCount: { r: 8 } } } 548ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.760-0400 m31100| 2015-07-09T13:57:40.738-0400 I QUERY [conn46] query db26.reindex_background_10 query: { $text: { $search: "ipsum" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 } cursorid:2251702555743 ntoreturn:0 ntoskip:0 nscanned:1000 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:101 reslen:24159 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 397808 } }, Collection: { acquireCount: { r: 9 } } } 140ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.761-0400 m31100| 2015-07-09T13:57:40.738-0400 I QUERY [conn49] query db26.reindex_background_4 query: { $text: { $search: "ipsum" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 } cursorid:2225124341700 ntoreturn:0 ntoskip:0 nscanned:1000 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:101 reslen:24159 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 4 }, timeAcquiringMicros: { r: 662154 } }, Collection: { acquireCount: { r: 9 } } } 549ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.761-0400 m31100| 2015-07-09T13:57:40.744-0400 I QUERY [conn138] getmore db26.reindex_background_14 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:2221742941073 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 4 }, timeAcquiringMicros: { r: 677269 } }, Collection: { acquireCount: { r: 8 } } } 557ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.810-0400 m31100| 2015-07-09T13:57:40.805-0400 I QUERY [conn135] getmore db26.reindex_background_5 query: { $text: { $search: "ipsum" } } cursorid:2269397123686 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 4 }, timeAcquiringMicros: { r: 655442 } }, Collection: { acquireCount: { r: 8 } } } 615ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.832-0400 m31100| 2015-07-09T13:57:40.831-0400 I COMMAND [conn34] CMD: reIndex db26.reindex_background_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.847-0400 m31100| 2015-07-09T13:57:40.846-0400 I INDEX [conn34] build index on: db26.reindex_background_5 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.reindex_background_5" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.847-0400 m31100| 2015-07-09T13:57:40.846-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.853-0400 m31100| 2015-07-09T13:57:40.853-0400 I INDEX [conn34] build index on: db26.reindex_background_5 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_5", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.853-0400 m31100| 2015-07-09T13:57:40.853-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.857-0400 m31100| 2015-07-09T13:57:40.856-0400 I INDEX [conn34] build index on: db26.reindex_background_5 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_5", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.857-0400 m31100| 2015-07-09T13:57:40.856-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.863-0400 m31100| 2015-07-09T13:57:40.862-0400 I INDEX [conn34] build index on: db26.reindex_background_5 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_5", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.863-0400 m31100| 2015-07-09T13:57:40.863-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.968-0400 m31100| 2015-07-09T13:57:40.967-0400 I INDEX [conn34] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.973-0400 m31100| 2015-07-09T13:57:40.968-0400 I COMMAND [conn34] command db26.reindex_background_5 command: reIndex { reIndex: "reindex_background_5" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:671 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 9081 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 136ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:40.974-0400 m31100| 2015-07-09T13:57:40.972-0400 I QUERY [conn48] query db26.reindex_background_2 query: { $text: { $search: "ipsum" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 } cursorid:2234062374003 ntoreturn:0 ntoskip:0 nscanned:1000 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:101 reslen:24159 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 135898 } }, Collection: { acquireCount: { r: 9 } } } 143ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.010-0400 m31100| 2015-07-09T13:57:41.007-0400 I QUERY [conn44] getmore db26.reindex_background_13 query: { $text: { $search: "ipsum" } } cursorid:2273046797011 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 157920 } }, Collection: { acquireCount: { r: 9 } } } 250ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.010-0400 m31100| 2015-07-09T13:57:41.009-0400 I QUERY [conn137] getmore db26.reindex_background_12 query: { $text: { $search: "ipsum" } } cursorid:2281334151231 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 147635 } }, Collection: { acquireCount: { r: 8 } } } 236ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.011-0400 m31100| 2015-07-09T13:57:41.011-0400 I QUERY [conn134] getmore db26.reindex_background_8 query: { $text: { $search: "ipsum" } } cursorid:2231024050426 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 137358 } }, Collection: { acquireCount: { r: 8 } } } 246ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.019-0400 m31100| 2015-07-09T13:57:41.018-0400 I QUERY [conn86] getmore db26.reindex_background_9 query: { $text: { $search: "ipsum" } } cursorid:2243476771110 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 132135 } }, Collection: { acquireCount: { r: 9 } } } 260ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.019-0400 m31100| 2015-07-09T13:57:41.019-0400 I QUERY [conn143] getmore db26.reindex_background_7 query: { $text: { $search: "ipsum" } } cursorid:2255656690395 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 132051 } }, Collection: { acquireCount: { r: 8 } } } 237ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.021-0400 m31100| 2015-07-09T13:57:41.020-0400 I COMMAND [conn132] CMD: reIndex db26.reindex_background_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.030-0400 m31100| 2015-07-09T13:57:41.030-0400 I INDEX [conn132] build index on: db26.reindex_background_8 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.reindex_background_8" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.031-0400 m31100| 2015-07-09T13:57:41.030-0400 I INDEX [conn132] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.035-0400 m31100| 2015-07-09T13:57:41.034-0400 I INDEX [conn132] build index on: db26.reindex_background_8 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_8", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.036-0400 m31100| 2015-07-09T13:57:41.035-0400 I INDEX [conn132] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.040-0400 m31100| 2015-07-09T13:57:41.040-0400 I INDEX [conn132] build index on: db26.reindex_background_8 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_8", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.040-0400 m31100| 2015-07-09T13:57:41.040-0400 I INDEX [conn132] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.043-0400 m31100| 2015-07-09T13:57:41.043-0400 I INDEX [conn132] build index on: db26.reindex_background_8 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_8", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.043-0400 m31100| 2015-07-09T13:57:41.043-0400 I INDEX [conn132] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.158-0400 m31100| 2015-07-09T13:57:41.157-0400 I INDEX [conn132] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.165-0400 m31100| 2015-07-09T13:57:41.165-0400 I COMMAND [conn132] command db26.reindex_background_8 command: reIndex { reIndex: "reindex_background_8" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:671 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 4656 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 144ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.171-0400 m31100| 2015-07-09T13:57:41.171-0400 I COMMAND [conn52] command db26.reindex_background_9 command: listIndexes { listIndexes: "reindex_background_9" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:732 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 138725 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 144ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.172-0400 m31100| 2015-07-09T13:57:41.171-0400 I COMMAND [conn48] command db26.reindex_background_12 command: listIndexes { listIndexes: "reindex_background_12" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:737 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 138964 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 145ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.180-0400 m30998| 2015-07-09T13:57:41.180-0400 I NETWORK [conn161] end connection 127.0.0.1:63186 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.181-0400 m31100| 2015-07-09T13:57:41.178-0400 I QUERY [conn74] getmore db26.reindex_background_14 query: { $text: { $search: "ipsum" } } cursorid:2221173197356 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 265570 } }, Collection: { acquireCount: { r: 9 } } } 378ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.181-0400 m31100| 2015-07-09T13:57:41.179-0400 I COMMAND [conn34] CMD: reIndex db26.reindex_background_9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.181-0400 m31100| 2015-07-09T13:57:41.179-0400 I COMMAND [conn132] CMD: reIndex db26.reindex_background_12 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.182-0400 m31100| 2015-07-09T13:57:41.181-0400 I QUERY [conn42] getmore db26.reindex_background_4 query: { $text: { $search: "ipsum" } } cursorid:2225124341700 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 274791 } }, Collection: { acquireCount: { r: 9 } } } 434ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.182-0400 m31100| 2015-07-09T13:57:41.182-0400 I QUERY [conn140] getmore db26.reindex_background_6 query: { $text: { $search: "ipsum" } } cursorid:2248194276444 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:9 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 20 } }, Database: { acquireCount: { r: 10 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 298834 } }, Collection: { acquireCount: { r: 10 } } } 441ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.183-0400 m31100| 2015-07-09T13:57:41.182-0400 I COMMAND [conn59] command db26.reindex_background_7 command: listIndexes { listIndexes: "reindex_background_7" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:732 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 136937 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 149ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.187-0400 m31100| 2015-07-09T13:57:41.187-0400 I COMMAND [conn40] CMD: reIndex db26.reindex_background_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.188-0400 m31100| 2015-07-09T13:57:41.188-0400 I QUERY [conn138] getmore db26.reindex_background_0 query: { $text: { $search: "ipsum" } } cursorid:2264550979483 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 273359 } }, Collection: { acquireCount: { r: 8 } } } 365ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.198-0400 m31100| 2015-07-09T13:57:41.198-0400 I INDEX [conn34] build index on: db26.reindex_background_9 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.reindex_background_9" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.199-0400 m31100| 2015-07-09T13:57:41.198-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.204-0400 m31100| 2015-07-09T13:57:41.203-0400 I INDEX [conn34] build index on: db26.reindex_background_9 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_9", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.204-0400 m31100| 2015-07-09T13:57:41.203-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.208-0400 m31100| 2015-07-09T13:57:41.208-0400 I INDEX [conn34] build index on: db26.reindex_background_9 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_9", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.208-0400 m31100| 2015-07-09T13:57:41.208-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.215-0400 m31100| 2015-07-09T13:57:41.214-0400 I INDEX [conn34] build index on: db26.reindex_background_9 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_9", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.215-0400 m31100| 2015-07-09T13:57:41.214-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.339-0400 m31100| 2015-07-09T13:57:41.339-0400 I INDEX [conn34] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.339-0400 m31100| 2015-07-09T13:57:41.339-0400 I COMMAND [conn34] command db26.reindex_background_9 command: reIndex { reIndex: "reindex_background_9" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:671 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 10129 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 159ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.348-0400 m31100| 2015-07-09T13:57:41.348-0400 I INDEX [conn132] build index on: db26.reindex_background_12 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.reindex_background_12" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.349-0400 m31100| 2015-07-09T13:57:41.348-0400 I INDEX [conn132] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.353-0400 m31100| 2015-07-09T13:57:41.352-0400 I INDEX [conn132] build index on: db26.reindex_background_12 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_12", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.353-0400 m31100| 2015-07-09T13:57:41.352-0400 I INDEX [conn132] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.359-0400 m31100| 2015-07-09T13:57:41.358-0400 I INDEX [conn132] build index on: db26.reindex_background_12 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_12", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.359-0400 m31100| 2015-07-09T13:57:41.359-0400 I INDEX [conn132] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.362-0400 m31100| 2015-07-09T13:57:41.362-0400 I INDEX [conn132] build index on: db26.reindex_background_12 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_12", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.362-0400 m31100| 2015-07-09T13:57:41.362-0400 I INDEX [conn132] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.468-0400 m31100| 2015-07-09T13:57:41.468-0400 I INDEX [conn132] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.472-0400 m31100| 2015-07-09T13:57:41.469-0400 I COMMAND [conn132] command db26.reindex_background_12 command: reIndex { reIndex: "reindex_background_12" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:675 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 156557 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 289ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.473-0400 m31100| 2015-07-09T13:57:41.471-0400 I COMMAND [conn72] command db26.reindex_background_4 command: listIndexes { listIndexes: "reindex_background_4" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:732 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 278241 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 278ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.474-0400 m31100| 2015-07-09T13:57:41.473-0400 I COMMAND [conn48] command db26.reindex_background_14 command: listIndexes { listIndexes: "reindex_background_14" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:737 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 284622 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 286ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.474-0400 m31100| 2015-07-09T13:57:41.473-0400 I QUERY [conn136] getmore db26.reindex_background_2 query: { $text: { $search: "ipsum" } } cursorid:2234062374003 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 417689 } }, Collection: { acquireCount: { r: 8 } } } 496ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.476-0400 m31100| 2015-07-09T13:57:41.476-0400 I COMMAND [conn132] CMD: reIndex db26.reindex_background_12 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.477-0400 m31100| 2015-07-09T13:57:41.477-0400 I COMMAND [conn49] command db26.reindex_background_6 command: listIndexes { listIndexes: "reindex_background_6" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:732 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 283298 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 283ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.479-0400 m31100| 2015-07-09T13:57:41.478-0400 I QUERY [conn139] getmore db26.reindex_background_10 query: { $text: { $search: "ipsum" } } cursorid:2251702555743 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:9 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 20 } }, Database: { acquireCount: { r: 10 }, acquireWaitCount: { r: 3 }, timeAcquiringMicros: { r: 557571 } }, Collection: { acquireCount: { r: 10 } } } 723ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.479-0400 m31100| 2015-07-09T13:57:41.478-0400 I COMMAND [conn35] CMD: reIndex db26.reindex_background_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.481-0400 m31100| 2015-07-09T13:57:41.480-0400 I COMMAND [conn71] command db26.reindex_background_0 command: listIndexes { listIndexes: "reindex_background_0" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:732 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 284636 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 285ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.483-0400 m31100| 2015-07-09T13:57:41.482-0400 I QUERY [conn142] getmore db26.reindex_background_11 query: { $text: { $search: "ipsum" } } cursorid:2239483855487 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 3 }, timeAcquiringMicros: { r: 561338 } }, Collection: { acquireCount: { r: 9 } } } 738ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.490-0400 m31100| 2015-07-09T13:57:41.489-0400 I INDEX [conn40] build index on: db26.reindex_background_7 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.reindex_background_7" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.490-0400 m31100| 2015-07-09T13:57:41.489-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.494-0400 m31100| 2015-07-09T13:57:41.493-0400 I INDEX [conn40] build index on: db26.reindex_background_7 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_7", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.494-0400 m31100| 2015-07-09T13:57:41.494-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.499-0400 m31100| 2015-07-09T13:57:41.499-0400 I INDEX [conn40] build index on: db26.reindex_background_7 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_7", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.500-0400 m31100| 2015-07-09T13:57:41.499-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.506-0400 m31100| 2015-07-09T13:57:41.506-0400 I INDEX [conn40] build index on: db26.reindex_background_7 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_7", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.506-0400 m31100| 2015-07-09T13:57:41.506-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.641-0400 m31100| 2015-07-09T13:57:41.640-0400 I INDEX [conn40] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.645-0400 m31100| 2015-07-09T13:57:41.642-0400 I COMMAND [conn40] command db26.reindex_background_7 command: reIndex { reIndex: "reindex_background_7" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:671 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 295030 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 455ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.646-0400 m31100| 2015-07-09T13:57:41.643-0400 I COMMAND [conn72] command db26.reindex_background_2 command: listIndexes { listIndexes: "reindex_background_2" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:732 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 160058 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 161ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.646-0400 m31100| 2015-07-09T13:57:41.644-0400 I COMMAND [conn46] command db26.reindex_background_10 command: listIndexes { listIndexes: "reindex_background_10" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:737 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 154432 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 156ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.647-0400 m31100| 2015-07-09T13:57:41.644-0400 I COMMAND [conn52] command db26.reindex_background_11 command: listIndexes { listIndexes: "reindex_background_11" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:737 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 153001 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 155ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.656-0400 m31100| 2015-07-09T13:57:41.655-0400 I INDEX [conn132] build index on: db26.reindex_background_12 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.reindex_background_12" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.656-0400 m31100| 2015-07-09T13:57:41.655-0400 I INDEX [conn132] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.661-0400 m31100| 2015-07-09T13:57:41.660-0400 I INDEX [conn132] build index on: db26.reindex_background_12 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_12", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.661-0400 m31100| 2015-07-09T13:57:41.660-0400 I INDEX [conn132] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.662-0400 m30999| 2015-07-09T13:57:41.660-0400 I NETWORK [conn164] end connection 127.0.0.1:63191 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.666-0400 m31100| 2015-07-09T13:57:41.665-0400 I INDEX [conn132] build index on: db26.reindex_background_12 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_12", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.666-0400 m31100| 2015-07-09T13:57:41.666-0400 I INDEX [conn132] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.671-0400 m31100| 2015-07-09T13:57:41.671-0400 I INDEX [conn132] build index on: db26.reindex_background_12 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_12", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.672-0400 m31100| 2015-07-09T13:57:41.671-0400 I INDEX [conn132] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.780-0400 m31100| 2015-07-09T13:57:41.779-0400 I INDEX [conn132] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.780-0400 m31100| 2015-07-09T13:57:41.780-0400 I COMMAND [conn132] command db26.reindex_background_12 command: reIndex { reIndex: "reindex_background_12" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:675 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 172772 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 303ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.787-0400 m31100| 2015-07-09T13:57:41.786-0400 I INDEX [conn35] build index on: db26.reindex_background_6 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.reindex_background_6" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.787-0400 m31100| 2015-07-09T13:57:41.786-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.792-0400 m31100| 2015-07-09T13:57:41.791-0400 I INDEX [conn35] build index on: db26.reindex_background_6 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_6", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.792-0400 m31100| 2015-07-09T13:57:41.791-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.797-0400 m31100| 2015-07-09T13:57:41.796-0400 I INDEX [conn35] build index on: db26.reindex_background_6 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_6", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.797-0400 m31100| 2015-07-09T13:57:41.797-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.803-0400 m31100| 2015-07-09T13:57:41.802-0400 I INDEX [conn35] build index on: db26.reindex_background_6 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_6", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.803-0400 m31100| 2015-07-09T13:57:41.802-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.915-0400 m31100| 2015-07-09T13:57:41.914-0400 I INDEX [conn35] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.938-0400 m31100| 2015-07-09T13:57:41.915-0400 I COMMAND [conn35] command db26.reindex_background_6 command: reIndex { reIndex: "reindex_background_6" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:671 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 301409 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 436ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.939-0400 m31100| 2015-07-09T13:57:41.924-0400 I QUERY [conn143] getmore db26.reindex_background_13 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:2273511678948 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 435724 } }, Collection: { acquireCount: { r: 8 } } } 282ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:41.955-0400 m31100| 2015-07-09T13:57:41.953-0400 I QUERY [conn135] getmore db26.reindex_background_1 query: { $text: { $search: "ipsum" } } cursorid:2259252732339 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 5 }, timeAcquiringMicros: { r: 1003998 } }, Collection: { acquireCount: { r: 8 } } } 982ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.000-0400 m31100| 2015-07-09T13:57:41.994-0400 I QUERY [conn44] getmore db26.reindex_background_5 query: { $text: { $search: "ipsum" } } cursorid:2268715910562 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 4 }, timeAcquiringMicros: { r: 866420 } }, Collection: { acquireCount: { r: 8 } } } 835ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.049-0400 m30999| 2015-07-09T13:57:42.049-0400 I NETWORK [conn160] end connection 127.0.0.1:63184 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.108-0400 m31100| 2015-07-09T13:57:42.108-0400 I QUERY [conn136] getmore db26.reindex_background_0 query: { $text: { $search: "ipsum" } } cursorid:2263875545650 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 } }, Collection: { acquireCount: { r: 8 } } } 140ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.154-0400 m30998| 2015-07-09T13:57:42.153-0400 I NETWORK [conn164] end connection 127.0.0.1:63194 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.219-0400 m31100| 2015-07-09T13:57:42.192-0400 I QUERY [conn44] getmore db26.reindex_background_13 query: { $text: { $search: "ipsum" } } cursorid:2273730536666 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 } }, Collection: { acquireCount: { r: 9 } } } 191ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.219-0400 m31100| 2015-07-09T13:57:42.199-0400 I QUERY [conn143] getmore db26.reindex_background_5 query: { $text: { $search: "ipsum" } } cursorid:2269605494286 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 } }, Collection: { acquireCount: { r: 9 } } } 106ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.220-0400 m31100| 2015-07-09T13:57:42.204-0400 I QUERY [conn42] getmore db26.reindex_background_4 query: { $text: { $search: "ipsum" } } cursorid:2226536688111 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:9 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 20 } }, Database: { acquireCount: { r: 10 } }, Collection: { acquireCount: { r: 10 } } } 176ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.228-0400 m31100| 2015-07-09T13:57:42.213-0400 I QUERY [conn135] getmore db26.reindex_background_7 query: { $text: { $search: "ipsum" } } cursorid:2256859376891 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 } }, Collection: { acquireCount: { r: 9 } } } 152ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.229-0400 m31100| 2015-07-09T13:57:42.215-0400 I QUERY [conn139] getmore db26.reindex_background_10 query: { $text: { $search: "ipsum" } } cursorid:2251021266455 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 } }, Collection: { acquireCount: { r: 9 } } } 133ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.241-0400 m31100| 2015-07-09T13:57:42.240-0400 I COMMAND [conn35] CMD: reIndex db26.reindex_background_10 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.243-0400 m31100| 2015-07-09T13:57:42.243-0400 I QUERY [conn74] getmore db26.reindex_background_14 query: { $text: { $search: "ipsum" } } cursorid:2222392501475 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:11 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 24 } }, Database: { acquireCount: { r: 12 } }, Collection: { acquireCount: { r: 12 } } } 215ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.244-0400 m31100| 2015-07-09T13:57:42.243-0400 I COMMAND [conn40] CMD: reIndex db26.reindex_background_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.256-0400 m31100| 2015-07-09T13:57:42.256-0400 I INDEX [conn35] build index on: db26.reindex_background_10 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.reindex_background_10" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.258-0400 m31100| 2015-07-09T13:57:42.256-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.258-0400 m30998| 2015-07-09T13:57:42.258-0400 I NETWORK [conn159] end connection 127.0.0.1:63182 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.264-0400 m31100| 2015-07-09T13:57:42.263-0400 I INDEX [conn35] build index on: db26.reindex_background_10 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_10", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.264-0400 m31100| 2015-07-09T13:57:42.264-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.268-0400 m31100| 2015-07-09T13:57:42.267-0400 I INDEX [conn35] build index on: db26.reindex_background_10 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_10", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.268-0400 m31100| 2015-07-09T13:57:42.268-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.271-0400 m31100| 2015-07-09T13:57:42.271-0400 I INDEX [conn35] build index on: db26.reindex_background_10 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_10", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.271-0400 m31100| 2015-07-09T13:57:42.271-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.373-0400 m31100| 2015-07-09T13:57:42.373-0400 I INDEX [conn35] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.376-0400 m31100| 2015-07-09T13:57:42.374-0400 I COMMAND [conn35] command db26.reindex_background_10 command: reIndex { reIndex: "reindex_background_10" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:675 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 8365 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 133ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.377-0400 m31100| 2015-07-09T13:57:42.374-0400 I COMMAND [conn60] command db26.reindex_background_7 command: listIndexes { listIndexes: "reindex_background_7" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:732 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 127459 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 127ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.377-0400 m31100| 2015-07-09T13:57:42.375-0400 I COMMAND [conn49] command db26.reindex_background_14 command: listIndexes { listIndexes: "reindex_background_14" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:737 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 124568 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 124ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.378-0400 m31100| 2015-07-09T13:57:42.376-0400 I QUERY [conn134] getmore db26.reindex_background_12 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:2281761586327 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 127644 } }, Collection: { acquireCount: { r: 8 } } } 145ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.386-0400 m31100| 2015-07-09T13:57:42.384-0400 I QUERY [conn142] getmore db26.reindex_background_9 query: { $text: { $search: "ipsum" } } cursorid:2244016970539 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:10 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 22 } }, Database: { acquireCount: { r: 11 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 125120 } }, Collection: { acquireCount: { r: 11 } } } 390ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.392-0400 m30999| 2015-07-09T13:57:42.391-0400 I NETWORK [conn161] end connection 127.0.0.1:63185 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.399-0400 m31100| 2015-07-09T13:57:42.399-0400 I INDEX [conn40] build index on: db26.reindex_background_5 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.reindex_background_5" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.399-0400 m31100| 2015-07-09T13:57:42.399-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.403-0400 m31100| 2015-07-09T13:57:42.403-0400 I INDEX [conn40] build index on: db26.reindex_background_5 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_5", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.403-0400 m31100| 2015-07-09T13:57:42.403-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.408-0400 m31100| 2015-07-09T13:57:42.408-0400 I INDEX [conn40] build index on: db26.reindex_background_5 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_5", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.408-0400 m31100| 2015-07-09T13:57:42.408-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.413-0400 m31100| 2015-07-09T13:57:42.412-0400 I INDEX [conn40] build index on: db26.reindex_background_5 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_5", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.413-0400 m31100| 2015-07-09T13:57:42.413-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.518-0400 m31100| 2015-07-09T13:57:42.517-0400 I INDEX [conn40] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.518-0400 m31100| 2015-07-09T13:57:42.518-0400 I COMMAND [conn40] command db26.reindex_background_5 command: reIndex { reIndex: "reindex_background_5" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:671 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 147383 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 274ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.520-0400 m31100| 2015-07-09T13:57:42.520-0400 I COMMAND [conn60] command db26.reindex_background_9 command: listIndexes { listIndexes: "reindex_background_9" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:732 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 128449 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 130ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.520-0400 m31100| 2015-07-09T13:57:42.520-0400 I COMMAND [conn40] CMD: reIndex db26.reindex_background_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.531-0400 m31100| 2015-07-09T13:57:42.530-0400 I INDEX [conn40] build index on: db26.reindex_background_5 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.reindex_background_5" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.531-0400 m31100| 2015-07-09T13:57:42.530-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.537-0400 m31100| 2015-07-09T13:57:42.536-0400 I INDEX [conn40] build index on: db26.reindex_background_5 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_5", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.537-0400 m31100| 2015-07-09T13:57:42.537-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.539-0400 m30999| 2015-07-09T13:57:42.539-0400 I NETWORK [conn162] end connection 127.0.0.1:63188 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.543-0400 m31100| 2015-07-09T13:57:42.542-0400 I INDEX [conn40] build index on: db26.reindex_background_5 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_5", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.543-0400 m31100| 2015-07-09T13:57:42.543-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.546-0400 m31100| 2015-07-09T13:57:42.546-0400 I INDEX [conn40] build index on: db26.reindex_background_5 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_5", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.547-0400 m31100| 2015-07-09T13:57:42.546-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.648-0400 m31100| 2015-07-09T13:57:42.647-0400 I INDEX [conn40] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.649-0400 m31100| 2015-07-09T13:57:42.649-0400 I COMMAND [conn40] command db26.reindex_background_5 command: reIndex { reIndex: "reindex_background_5" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:671 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 5160 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 128ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.660-0400 m31100| 2015-07-09T13:57:42.659-0400 I QUERY [conn73] query db26.reindex_background_12 query: { $text: { $search: "ipsum" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 } cursorid:2282548955578 ntoreturn:0 ntoskip:0 nscanned:1000 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:101 reslen:24159 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 249945 } }, Collection: { acquireCount: { r: 9 } } } 141ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.677-0400 m30999| 2015-07-09T13:57:42.676-0400 I NETWORK [conn165] end connection 127.0.0.1:63192 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.686-0400 m31100| 2015-07-09T13:57:42.683-0400 I QUERY [conn140] getmore db26.reindex_background_6 query: { $text: { $search: "ipsum" } } cursorid:2248242093050 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 3 }, timeAcquiringMicros: { r: 391484 } }, Collection: { acquireCount: { r: 8 } } } 471ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.690-0400 m31100| 2015-07-09T13:57:42.689-0400 I QUERY [conn135] getmore db26.reindex_background_13 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:2272555862903 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 3 }, timeAcquiringMicros: { r: 399346 } }, Collection: { acquireCount: { r: 8 } } } 315ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.718-0400 m31100| 2015-07-09T13:57:42.718-0400 I QUERY [conn74] getmore db26.reindex_background_2 query: { $text: { $search: "ipsum" } } cursorid:2235031511076 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 3 }, timeAcquiringMicros: { r: 383729 } }, Collection: { acquireCount: { r: 8 } } } 339ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.727-0400 m30998| 2015-07-09T13:57:42.726-0400 I NETWORK [conn163] end connection 127.0.0.1:63190 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.772-0400 m30998| 2015-07-09T13:57:42.771-0400 I NETWORK [conn160] end connection 127.0.0.1:63183 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.779-0400 m31100| 2015-07-09T13:57:42.779-0400 I COMMAND [conn40] CMD: reIndex db26.reindex_background_13 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.781-0400 m30998| 2015-07-09T13:57:42.781-0400 I NETWORK [conn165] end connection 127.0.0.1:63195 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.787-0400 m31100| 2015-07-09T13:57:42.787-0400 I INDEX [conn40] build index on: db26.reindex_background_13 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.reindex_background_13" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.787-0400 m31100| 2015-07-09T13:57:42.787-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.792-0400 m31100| 2015-07-09T13:57:42.791-0400 I INDEX [conn40] build index on: db26.reindex_background_13 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_13", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.792-0400 m31100| 2015-07-09T13:57:42.791-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.796-0400 m31100| 2015-07-09T13:57:42.796-0400 I INDEX [conn40] build index on: db26.reindex_background_13 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_13", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.796-0400 m31100| 2015-07-09T13:57:42.796-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.800-0400 m31100| 2015-07-09T13:57:42.800-0400 I INDEX [conn40] build index on: db26.reindex_background_13 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_13", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.800-0400 m31100| 2015-07-09T13:57:42.800-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.902-0400 m31100| 2015-07-09T13:57:42.901-0400 I INDEX [conn40] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.902-0400 m31100| 2015-07-09T13:57:42.902-0400 I COMMAND [conn40] command db26.reindex_background_13 command: reIndex { reIndex: "reindex_background_13" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:675 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 540 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 122ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.908-0400 m31100| 2015-07-09T13:57:42.907-0400 I QUERY [conn139] getmore db26.reindex_background_10 query: { $text: { $search: "ipsum" } } cursorid:2251672130979 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 121983 } }, Collection: { acquireCount: { r: 8 } } } 187ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.915-0400 m31100| 2015-07-09T13:57:42.914-0400 I QUERY [conn74] getmore db26.reindex_background_14 query: { $text: { $search: "ipsum" } } cursorid:2222372381505 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 122147 } }, Collection: { acquireCount: { r: 8 } } } 186ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.919-0400 m31100| 2015-07-09T13:57:42.919-0400 I COMMAND [conn35] CMD: reIndex db26.reindex_background_10 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.925-0400 m31100| 2015-07-09T13:57:42.924-0400 I INDEX [conn35] build index on: db26.reindex_background_10 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.reindex_background_10" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.925-0400 m31100| 2015-07-09T13:57:42.924-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.932-0400 m31100| 2015-07-09T13:57:42.931-0400 I INDEX [conn35] build index on: db26.reindex_background_10 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_10", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.932-0400 m31100| 2015-07-09T13:57:42.932-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.936-0400 m31100| 2015-07-09T13:57:42.935-0400 I INDEX [conn35] build index on: db26.reindex_background_10 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_10", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.937-0400 m31100| 2015-07-09T13:57:42.935-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.941-0400 m31100| 2015-07-09T13:57:42.941-0400 I INDEX [conn35] build index on: db26.reindex_background_10 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_10", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:42.941-0400 m31100| 2015-07-09T13:57:42.941-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.046-0400 m31100| 2015-07-09T13:57:43.045-0400 I INDEX [conn35] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.047-0400 m31100| 2015-07-09T13:57:43.046-0400 I COMMAND [conn35] command db26.reindex_background_10 command: reIndex { reIndex: "reindex_background_10" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:675 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 127ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.047-0400 m31100| 2015-07-09T13:57:43.046-0400 I COMMAND [conn49] command db26.reindex_background_14 command: listIndexes { listIndexes: "reindex_background_14" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:737 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 123013 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 123ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.047-0400 m31100| 2015-07-09T13:57:43.047-0400 I COMMAND [conn35] CMD: reIndex db26.reindex_background_10 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.048-0400 m31100| 2015-07-09T13:57:43.048-0400 I COMMAND [conn132] CMD: reIndex db26.reindex_background_14 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.054-0400 m31100| 2015-07-09T13:57:43.053-0400 I INDEX [conn35] build index on: db26.reindex_background_10 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.reindex_background_10" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.054-0400 m31100| 2015-07-09T13:57:43.053-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.060-0400 m31100| 2015-07-09T13:57:43.059-0400 I INDEX [conn35] build index on: db26.reindex_background_10 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_10", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.060-0400 m31100| 2015-07-09T13:57:43.059-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.063-0400 m31100| 2015-07-09T13:57:43.063-0400 I INDEX [conn35] build index on: db26.reindex_background_10 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_10", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.064-0400 m31100| 2015-07-09T13:57:43.063-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.068-0400 m31100| 2015-07-09T13:57:43.067-0400 I INDEX [conn35] build index on: db26.reindex_background_10 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_10", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.068-0400 m31100| 2015-07-09T13:57:43.067-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.187-0400 m31100| 2015-07-09T13:57:43.186-0400 I INDEX [conn35] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.187-0400 m31100| 2015-07-09T13:57:43.187-0400 I COMMAND [conn35] command db26.reindex_background_10 command: reIndex { reIndex: "reindex_background_10" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:675 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 406 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 139ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.195-0400 m31100| 2015-07-09T13:57:43.195-0400 I INDEX [conn132] build index on: db26.reindex_background_14 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db26.reindex_background_14" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.195-0400 m31100| 2015-07-09T13:57:43.195-0400 I INDEX [conn132] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.201-0400 m31100| 2015-07-09T13:57:43.200-0400 I INDEX [conn132] build index on: db26.reindex_background_14 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db26.reindex_background_14", background: true, weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.201-0400 m31100| 2015-07-09T13:57:43.200-0400 I INDEX [conn132] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.205-0400 m31100| 2015-07-09T13:57:43.204-0400 I INDEX [conn132] build index on: db26.reindex_background_14 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db26.reindex_background_14", background: true, 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.205-0400 m31100| 2015-07-09T13:57:43.205-0400 I INDEX [conn132] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.212-0400 m31100| 2015-07-09T13:57:43.211-0400 I INDEX [conn132] build index on: db26.reindex_background_14 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db26.reindex_background_14", background: true } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.212-0400 m31100| 2015-07-09T13:57:43.211-0400 I INDEX [conn132] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.214-0400 m30998| 2015-07-09T13:57:43.214-0400 I NETWORK [conn166] end connection 127.0.0.1:63196 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.311-0400 m31100| 2015-07-09T13:57:43.311-0400 I INDEX [conn132] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.312-0400 m31100| 2015-07-09T13:57:43.312-0400 I COMMAND [conn132] command db26.reindex_background_14 command: reIndex { reIndex: "reindex_background_14" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:675 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 139509 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 263ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.319-0400 m31100| 2015-07-09T13:57:43.318-0400 I QUERY [conn60] query db26.reindex_background_13 query: { $text: { $search: "ipsum" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 } cursorid:2272823739418 ntoreturn:0 ntoskip:0 nscanned:1000 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:101 reslen:24159 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 3 }, timeAcquiringMicros: { r: 383649 } }, Collection: { acquireCount: { r: 9 } } } 271ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.332-0400 m30998| 2015-07-09T13:57:43.331-0400 I NETWORK [conn162] end connection 127.0.0.1:63187 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.385-0400 m30999| 2015-07-09T13:57:43.384-0400 I NETWORK [conn163] end connection 127.0.0.1:63189 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.406-0400 m30999| 2015-07-09T13:57:43.406-0400 I COMMAND [conn1] DROP: db26.reindex_background_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.406-0400 m30999| 2015-07-09T13:57:43.406-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.406-0400 m31100| 2015-07-09T13:57:43.406-0400 I COMMAND [conn60] CMD: drop db26.reindex_background_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.410-0400 m30999| 2015-07-09T13:57:43.410-0400 I COMMAND [conn1] DROP: db26.reindex_background_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.410-0400 m30999| 2015-07-09T13:57:43.410-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.411-0400 m31102| 2015-07-09T13:57:43.410-0400 I COMMAND [repl writer worker 14] CMD: drop db26.reindex_background_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.411-0400 m31100| 2015-07-09T13:57:43.410-0400 I COMMAND [conn60] CMD: drop db26.reindex_background_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.411-0400 m31101| 2015-07-09T13:57:43.410-0400 I COMMAND [repl writer worker 1] CMD: drop db26.reindex_background_0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.414-0400 m30999| 2015-07-09T13:57:43.414-0400 I COMMAND [conn1] DROP: db26.reindex_background_10 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.415-0400 m30999| 2015-07-09T13:57:43.414-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.415-0400 m31100| 2015-07-09T13:57:43.414-0400 I COMMAND [conn60] CMD: drop db26.reindex_background_10 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.416-0400 m31102| 2015-07-09T13:57:43.416-0400 I COMMAND [repl writer worker 6] CMD: drop db26.reindex_background_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.418-0400 m30999| 2015-07-09T13:57:43.417-0400 I COMMAND [conn1] DROP: db26.reindex_background_11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.418-0400 m30999| 2015-07-09T13:57:43.417-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.418-0400 m31100| 2015-07-09T13:57:43.418-0400 I COMMAND [conn60] CMD: drop db26.reindex_background_11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.420-0400 m31101| 2015-07-09T13:57:43.419-0400 I COMMAND [repl writer worker 5] CMD: drop db26.reindex_background_1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.421-0400 m30999| 2015-07-09T13:57:43.421-0400 I COMMAND [conn1] DROP: db26.reindex_background_12 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.421-0400 m30999| 2015-07-09T13:57:43.421-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.422-0400 m31100| 2015-07-09T13:57:43.421-0400 I COMMAND [conn60] CMD: drop db26.reindex_background_12 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.422-0400 m31102| 2015-07-09T13:57:43.421-0400 I COMMAND [repl writer worker 12] CMD: drop db26.reindex_background_10 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.423-0400 m31101| 2015-07-09T13:57:43.423-0400 I COMMAND [repl writer worker 12] CMD: drop db26.reindex_background_10 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.424-0400 m31102| 2015-07-09T13:57:43.424-0400 I COMMAND [repl writer worker 2] CMD: drop db26.reindex_background_11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.425-0400 m30999| 2015-07-09T13:57:43.425-0400 I COMMAND [conn1] DROP: db26.reindex_background_13 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.425-0400 m30999| 2015-07-09T13:57:43.425-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.425-0400 m31100| 2015-07-09T13:57:43.425-0400 I COMMAND [conn60] CMD: drop db26.reindex_background_13 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.426-0400 m31101| 2015-07-09T13:57:43.426-0400 I COMMAND [repl writer worker 10] CMD: drop db26.reindex_background_11 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.427-0400 m31102| 2015-07-09T13:57:43.427-0400 I COMMAND [repl writer worker 9] CMD: drop db26.reindex_background_12 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.429-0400 m31101| 2015-07-09T13:57:43.428-0400 I COMMAND [repl writer worker 2] CMD: drop db26.reindex_background_12 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.429-0400 m30999| 2015-07-09T13:57:43.429-0400 I COMMAND [conn1] DROP: db26.reindex_background_14 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.429-0400 m30999| 2015-07-09T13:57:43.429-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.430-0400 m31100| 2015-07-09T13:57:43.429-0400 I COMMAND [conn60] CMD: drop db26.reindex_background_14 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.431-0400 m31102| 2015-07-09T13:57:43.431-0400 I COMMAND [repl writer worker 0] CMD: drop db26.reindex_background_13 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.431-0400 m30999| 2015-07-09T13:57:43.431-0400 I COMMAND [conn1] DROP: db26.reindex_background_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.432-0400 m30999| 2015-07-09T13:57:43.431-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.432-0400 m31101| 2015-07-09T13:57:43.431-0400 I COMMAND [repl writer worker 7] CMD: drop db26.reindex_background_13 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.432-0400 m31100| 2015-07-09T13:57:43.432-0400 I COMMAND [conn60] CMD: drop db26.reindex_background_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.434-0400 m31102| 2015-07-09T13:57:43.434-0400 I COMMAND [repl writer worker 3] CMD: drop db26.reindex_background_14 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.434-0400 m31101| 2015-07-09T13:57:43.434-0400 I COMMAND [repl writer worker 0] CMD: drop db26.reindex_background_14 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.435-0400 m30999| 2015-07-09T13:57:43.435-0400 I COMMAND [conn1] DROP: db26.reindex_background_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.435-0400 m30999| 2015-07-09T13:57:43.435-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.436-0400 m31100| 2015-07-09T13:57:43.435-0400 I COMMAND [conn60] CMD: drop db26.reindex_background_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.437-0400 m31101| 2015-07-09T13:57:43.436-0400 I COMMAND [repl writer worker 9] CMD: drop db26.reindex_background_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.437-0400 m31102| 2015-07-09T13:57:43.437-0400 I COMMAND [repl writer worker 7] CMD: drop db26.reindex_background_2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.439-0400 m30999| 2015-07-09T13:57:43.439-0400 I COMMAND [conn1] DROP: db26.reindex_background_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.439-0400 m30999| 2015-07-09T13:57:43.439-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.439-0400 m31100| 2015-07-09T13:57:43.439-0400 I COMMAND [conn60] CMD: drop db26.reindex_background_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.440-0400 m31101| 2015-07-09T13:57:43.439-0400 I COMMAND [repl writer worker 13] CMD: drop db26.reindex_background_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.441-0400 m31102| 2015-07-09T13:57:43.441-0400 I COMMAND [repl writer worker 11] CMD: drop db26.reindex_background_3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.442-0400 m30999| 2015-07-09T13:57:43.442-0400 I COMMAND [conn1] DROP: db26.reindex_background_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.443-0400 m30999| 2015-07-09T13:57:43.442-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.443-0400 m31100| 2015-07-09T13:57:43.442-0400 I COMMAND [conn60] CMD: drop db26.reindex_background_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.444-0400 m31101| 2015-07-09T13:57:43.444-0400 I COMMAND [repl writer worker 15] CMD: drop db26.reindex_background_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.444-0400 m31102| 2015-07-09T13:57:43.444-0400 I COMMAND [repl writer worker 4] CMD: drop db26.reindex_background_4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.445-0400 m30999| 2015-07-09T13:57:43.445-0400 I COMMAND [conn1] DROP: db26.reindex_background_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.446-0400 m30999| 2015-07-09T13:57:43.445-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.446-0400 m31100| 2015-07-09T13:57:43.445-0400 I COMMAND [conn60] CMD: drop db26.reindex_background_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.447-0400 m31102| 2015-07-09T13:57:43.446-0400 I COMMAND [repl writer worker 1] CMD: drop db26.reindex_background_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.448-0400 m30999| 2015-07-09T13:57:43.448-0400 I COMMAND [conn1] DROP: db26.reindex_background_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.448-0400 m30999| 2015-07-09T13:57:43.448-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.448-0400 m31101| 2015-07-09T13:57:43.448-0400 I COMMAND [repl writer worker 14] CMD: drop db26.reindex_background_5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.449-0400 m31100| 2015-07-09T13:57:43.448-0400 I COMMAND [conn60] CMD: drop db26.reindex_background_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.450-0400 m31102| 2015-07-09T13:57:43.449-0400 I COMMAND [repl writer worker 8] CMD: drop db26.reindex_background_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.451-0400 m31101| 2015-07-09T13:57:43.451-0400 I COMMAND [repl writer worker 3] CMD: drop db26.reindex_background_6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.452-0400 m30999| 2015-07-09T13:57:43.451-0400 I COMMAND [conn1] DROP: db26.reindex_background_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.452-0400 m30999| 2015-07-09T13:57:43.451-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.452-0400 m31100| 2015-07-09T13:57:43.452-0400 I COMMAND [conn60] CMD: drop db26.reindex_background_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.453-0400 m31102| 2015-07-09T13:57:43.452-0400 I COMMAND [repl writer worker 15] CMD: drop db26.reindex_background_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.454-0400 m31101| 2015-07-09T13:57:43.453-0400 I COMMAND [repl writer worker 6] CMD: drop db26.reindex_background_7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.455-0400 m30999| 2015-07-09T13:57:43.454-0400 I COMMAND [conn1] DROP: db26.reindex_background_9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.455-0400 m30999| 2015-07-09T13:57:43.454-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.455-0400 m31100| 2015-07-09T13:57:43.455-0400 I COMMAND [conn60] CMD: drop db26.reindex_background_9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.456-0400 m31102| 2015-07-09T13:57:43.455-0400 I COMMAND [repl writer worker 5] CMD: drop db26.reindex_background_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.456-0400 m31101| 2015-07-09T13:57:43.456-0400 I COMMAND [repl writer worker 8] CMD: drop db26.reindex_background_8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.457-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.457-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.457-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.457-0400 jstests/concurrency/fsm_workloads/reindex_background.js: Workload completed in 19172 ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.457-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.457-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.457-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.457-0400 m30999| 2015-07-09T13:57:43.457-0400 I COMMAND [conn1] DROP: db26.coll26 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.458-0400 m30999| 2015-07-09T13:57:43.457-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:43.457-0400-559eb617ca4787b9985d1c95", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464663457), what: "dropCollection.start", ns: "db26.coll26", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.459-0400 m31101| 2015-07-09T13:57:43.459-0400 I COMMAND [repl writer worker 4] CMD: drop db26.reindex_background_9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.460-0400 m31102| 2015-07-09T13:57:43.459-0400 I COMMAND [repl writer worker 13] CMD: drop db26.reindex_background_9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.514-0400 m30999| 2015-07-09T13:57:43.513-0400 I SHARDING [conn1] distributed lock 'db26.coll26/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb617ca4787b9985d1c96 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.515-0400 m31100| 2015-07-09T13:57:43.514-0400 I COMMAND [conn40] CMD: drop db26.coll26 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.518-0400 m31200| 2015-07-09T13:57:43.517-0400 I COMMAND [conn18] CMD: drop db26.coll26 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.519-0400 m31102| 2015-07-09T13:57:43.519-0400 I COMMAND [repl writer worker 10] CMD: drop db26.coll26 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.519-0400 m31101| 2015-07-09T13:57:43.519-0400 I COMMAND [repl writer worker 11] CMD: drop db26.coll26 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.585-0400 m31100| 2015-07-09T13:57:43.585-0400 I SHARDING [conn40] remotely refreshing metadata for db26.coll26 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559eb603ca4787b9985d1c93, current metadata version is 2|3||559eb603ca4787b9985d1c93 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.587-0400 m31100| 2015-07-09T13:57:43.586-0400 W SHARDING [conn40] no chunks found when reloading db26.coll26, previous version was 0|0||559eb603ca4787b9985d1c93, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.587-0400 m31100| 2015-07-09T13:57:43.587-0400 I SHARDING [conn40] dropping metadata for db26.coll26 at shard version 2|3||559eb603ca4787b9985d1c93, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.588-0400 m31200| 2015-07-09T13:57:43.588-0400 I SHARDING [conn18] remotely refreshing metadata for db26.coll26 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559eb603ca4787b9985d1c93, current metadata version is 2|5||559eb603ca4787b9985d1c93 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.590-0400 m31200| 2015-07-09T13:57:43.589-0400 W SHARDING [conn18] no chunks found when reloading db26.coll26, previous version was 0|0||559eb603ca4787b9985d1c93, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.590-0400 m31200| 2015-07-09T13:57:43.590-0400 I SHARDING [conn18] dropping metadata for db26.coll26 at shard version 2|5||559eb603ca4787b9985d1c93, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.591-0400 m30999| 2015-07-09T13:57:43.590-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:43.590-0400-559eb617ca4787b9985d1c97", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464663590), what: "dropCollection", ns: "db26.coll26", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.645-0400 m30999| 2015-07-09T13:57:43.644-0400 I SHARDING [conn1] distributed lock 'db26.coll26/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.701-0400 m30999| 2015-07-09T13:57:43.700-0400 I COMMAND [conn1] DROP DATABASE: db26 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.701-0400 m30999| 2015-07-09T13:57:43.700-0400 I SHARDING [conn1] DBConfig::dropDatabase: db26 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.701-0400 m30999| 2015-07-09T13:57:43.700-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:43.700-0400-559eb617ca4787b9985d1c98", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464663700), what: "dropDatabase.start", ns: "db26", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.808-0400 m30999| 2015-07-09T13:57:43.807-0400 I SHARDING [conn1] DBConfig::dropDatabase: db26 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.808-0400 m31100| 2015-07-09T13:57:43.808-0400 I COMMAND [conn28] dropDatabase db26 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.808-0400 m31100| 2015-07-09T13:57:43.808-0400 I COMMAND [conn28] dropDatabase db26 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.809-0400 m30999| 2015-07-09T13:57:43.808-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:43.808-0400-559eb617ca4787b9985d1c99", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464663808), what: "dropDatabase", ns: "db26", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.809-0400 m31101| 2015-07-09T13:57:43.809-0400 I COMMAND [repl writer worker 1] dropDatabase db26 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.809-0400 m31101| 2015-07-09T13:57:43.809-0400 I COMMAND [repl writer worker 1] dropDatabase db26 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.810-0400 m31102| 2015-07-09T13:57:43.809-0400 I COMMAND [repl writer worker 14] dropDatabase db26 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.810-0400 m31102| 2015-07-09T13:57:43.809-0400 I COMMAND [repl writer worker 14] dropDatabase db26 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.898-0400 m31100| 2015-07-09T13:57:43.897-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.902-0400 m31102| 2015-07-09T13:57:43.902-0400 I COMMAND [repl writer worker 2] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.902-0400 m31101| 2015-07-09T13:57:43.902-0400 I COMMAND [repl writer worker 10] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.913-0400 m31201| 2015-07-09T13:57:43.913-0400 I COMMAND [repl writer worker 1] CMD: drop db26.coll26 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.916-0400 m31202| 2015-07-09T13:57:43.914-0400 I COMMAND [repl writer worker 15] CMD: drop db26.coll26 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.932-0400 m31200| 2015-07-09T13:57:43.932-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.934-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.934-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.934-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.935-0400 jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous_noindex.js [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.935-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.935-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.935-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.940-0400 m31202| 2015-07-09T13:57:43.940-0400 I COMMAND [repl writer worker 12] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.941-0400 m31201| 2015-07-09T13:57:43.941-0400 I COMMAND [repl writer worker 2] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.942-0400 m30999| 2015-07-09T13:57:43.942-0400 I SHARDING [conn1] distributed lock 'db27/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb617ca4787b9985d1c9a [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.949-0400 m30999| 2015-07-09T13:57:43.949-0400 I SHARDING [conn1] Placing [db27] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:43.949-0400 m30999| 2015-07-09T13:57:43.949-0400 I SHARDING [conn1] Enabling sharding for database [db27] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.002-0400 m30999| 2015-07-09T13:57:44.002-0400 I SHARDING [conn1] distributed lock 'db27/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.026-0400 m31100| 2015-07-09T13:57:44.025-0400 I INDEX [conn69] build index on: db27.coll27 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db27.coll27" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.026-0400 m31100| 2015-07-09T13:57:44.025-0400 I INDEX [conn69] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.038-0400 m31100| 2015-07-09T13:57:44.037-0400 I INDEX [conn69] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.039-0400 m30999| 2015-07-09T13:57:44.039-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db27.coll27", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.042-0400 m30999| 2015-07-09T13:57:44.042-0400 I SHARDING [conn1] distributed lock 'db27.coll27/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb618ca4787b9985d1c9b [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.043-0400 m30999| 2015-07-09T13:57:44.043-0400 I SHARDING [conn1] enable sharding on: db27.coll27 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.044-0400 m30999| 2015-07-09T13:57:44.043-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:44.043-0400-559eb618ca4787b9985d1c9c", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464664043), what: "shardCollection.start", ns: "db27.coll27", details: { shardKey: { _id: "hashed" }, collection: "db27.coll27", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.045-0400 m31101| 2015-07-09T13:57:44.044-0400 I INDEX [repl writer worker 7] build index on: db27.coll27 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db27.coll27" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.045-0400 m31101| 2015-07-09T13:57:44.045-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.051-0400 m31102| 2015-07-09T13:57:44.051-0400 I INDEX [repl writer worker 0] build index on: db27.coll27 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db27.coll27" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.051-0400 m31102| 2015-07-09T13:57:44.051-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.054-0400 m31101| 2015-07-09T13:57:44.053-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.058-0400 m31102| 2015-07-09T13:57:44.058-0400 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.097-0400 m30999| 2015-07-09T13:57:44.096-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db27.coll27 using new epoch 559eb618ca4787b9985d1c9d [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.204-0400 m30999| 2015-07-09T13:57:44.203-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db27.coll27: 0ms sequenceNumber: 122 version: 1|1||559eb618ca4787b9985d1c9d based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.259-0400 m30999| 2015-07-09T13:57:44.259-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db27.coll27: 0ms sequenceNumber: 123 version: 1|1||559eb618ca4787b9985d1c9d based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.261-0400 m31100| 2015-07-09T13:57:44.261-0400 I SHARDING [conn60] remotely refreshing metadata for db27.coll27 with requested shard version 1|1||559eb618ca4787b9985d1c9d, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.263-0400 m31100| 2015-07-09T13:57:44.262-0400 I SHARDING [conn60] collection db27.coll27 was previously unsharded, new metadata loaded with shard version 1|1||559eb618ca4787b9985d1c9d [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.263-0400 m31100| 2015-07-09T13:57:44.263-0400 I SHARDING [conn60] collection version was loaded at version 1|1||559eb618ca4787b9985d1c9d, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.263-0400 m30999| 2015-07-09T13:57:44.263-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:44.263-0400-559eb618ca4787b9985d1c9e", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464664263), what: "shardCollection", ns: "db27.coll27", details: { version: "1|1||559eb618ca4787b9985d1c9d" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.318-0400 m30999| 2015-07-09T13:57:44.318-0400 I SHARDING [conn1] distributed lock 'db27.coll27/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.319-0400 m30999| 2015-07-09T13:57:44.319-0400 I SHARDING [conn1] moving chunk ns: db27.coll27 moving ( ns: db27.coll27, shard: test-rs0, lastmod: 1|1||000000000000000000000000, min: { _id: 0 }, max: { _id: MaxKey }) test-rs0 -> test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.320-0400 m31100| 2015-07-09T13:57:44.319-0400 I SHARDING [conn40] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.321-0400 m31100| 2015-07-09T13:57:44.320-0400 I SHARDING [conn40] received moveChunk request: { moveChunk: "db27.coll27", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb618ca4787b9985d1c9d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.324-0400 m31100| 2015-07-09T13:57:44.324-0400 I SHARDING [conn40] distributed lock 'db27.coll27/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb618792e00bb67274945 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.325-0400 m31100| 2015-07-09T13:57:44.324-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:44.324-0400-559eb618792e00bb67274946", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436464664324), what: "moveChunk.start", ns: "db27.coll27", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.378-0400 m31100| 2015-07-09T13:57:44.377-0400 I SHARDING [conn40] remotely refreshing metadata for db27.coll27 based on current shard version 1|1||559eb618ca4787b9985d1c9d, current metadata version is 1|1||559eb618ca4787b9985d1c9d [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.379-0400 m31100| 2015-07-09T13:57:44.379-0400 I SHARDING [conn40] metadata of collection db27.coll27 already up to date (shard version : 1|1||559eb618ca4787b9985d1c9d, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.379-0400 m31100| 2015-07-09T13:57:44.379-0400 I SHARDING [conn40] moveChunk request accepted at version 1|1||559eb618ca4787b9985d1c9d [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.380-0400 m31100| 2015-07-09T13:57:44.379-0400 I SHARDING [conn40] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.380-0400 m31200| 2015-07-09T13:57:44.380-0400 I SHARDING [conn16] remotely refreshing metadata for db27.coll27, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.382-0400 m31200| 2015-07-09T13:57:44.381-0400 I SHARDING [conn16] collection db27.coll27 was previously unsharded, new metadata loaded with shard version 0|0||559eb618ca4787b9985d1c9d [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.382-0400 m31200| 2015-07-09T13:57:44.381-0400 I SHARDING [conn16] collection version was loaded at version 1|1||559eb618ca4787b9985d1c9d, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.382-0400 m31200| 2015-07-09T13:57:44.382-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: 0 } -> { _id: MaxKey } for collection db27.coll27 from test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 at epoch 559eb618ca4787b9985d1c9d [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.384-0400 m31100| 2015-07-09T13:57:44.384-0400 I SHARDING [conn40] moveChunk data transfer progress: { active: true, ns: "db27.coll27", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.388-0400 m31100| 2015-07-09T13:57:44.387-0400 I SHARDING [conn40] moveChunk data transfer progress: { active: true, ns: "db27.coll27", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.393-0400 m31100| 2015-07-09T13:57:44.392-0400 I SHARDING [conn40] moveChunk data transfer progress: { active: true, ns: "db27.coll27", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.400-0400 m31200| 2015-07-09T13:57:44.397-0400 I INDEX [migrateThread] build index on: db27.coll27 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db27.coll27" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.400-0400 m31200| 2015-07-09T13:57:44.398-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.402-0400 m31100| 2015-07-09T13:57:44.402-0400 I SHARDING [conn40] moveChunk data transfer progress: { active: true, ns: "db27.coll27", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.407-0400 m31200| 2015-07-09T13:57:44.406-0400 I INDEX [migrateThread] build index on: db27.coll27 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db27.coll27" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.407-0400 m31200| 2015-07-09T13:57:44.406-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.418-0400 m31200| 2015-07-09T13:57:44.418-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.420-0400 m31200| 2015-07-09T13:57:44.419-0400 I SHARDING [migrateThread] Deleter starting delete for: db27.coll27 from { _id: 0 } -> { _id: MaxKey }, with opId: 42110 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.421-0400 m31100| 2015-07-09T13:57:44.421-0400 I SHARDING [conn40] moveChunk data transfer progress: { active: true, ns: "db27.coll27", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.424-0400 m31200| 2015-07-09T13:57:44.424-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db27.coll27 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.441-0400 m31202| 2015-07-09T13:57:44.440-0400 I INDEX [repl writer worker 8] build index on: db27.coll27 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db27.coll27" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.441-0400 m31202| 2015-07-09T13:57:44.440-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.443-0400 m31201| 2015-07-09T13:57:44.442-0400 I INDEX [repl writer worker 9] build index on: db27.coll27 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db27.coll27" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.443-0400 m31201| 2015-07-09T13:57:44.442-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.448-0400 m31202| 2015-07-09T13:57:44.448-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.450-0400 m31201| 2015-07-09T13:57:44.450-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.451-0400 m31200| 2015-07-09T13:57:44.450-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.451-0400 m31200| 2015-07-09T13:57:44.450-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db27.coll27' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.454-0400 m31100| 2015-07-09T13:57:44.454-0400 I SHARDING [conn40] moveChunk data transfer progress: { active: true, ns: "db27.coll27", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.455-0400 m31100| 2015-07-09T13:57:44.454-0400 I SHARDING [conn40] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.455-0400 m31100| 2015-07-09T13:57:44.454-0400 I SHARDING [conn40] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.455-0400 m31100| 2015-07-09T13:57:44.455-0400 I SHARDING [conn40] moveChunk setting version to: 2|0||559eb618ca4787b9985d1c9d [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.463-0400 m31200| 2015-07-09T13:57:44.463-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db27.coll27' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.463-0400 m31200| 2015-07-09T13:57:44.463-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:44.463-0400-559eb618d5a107a5b9c0db1f", server: "bs-osx108-8", clientAddr: "", time: new Date(1436464664463), what: "moveChunk.to", ns: "db27.coll27", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 5: 36, step 2 of 5: 30, step 3 of 5: 1, step 4 of 5: 0, step 5 of 5: 12, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.517-0400 m31100| 2015-07-09T13:57:44.516-0400 I SHARDING [conn40] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db27.coll27", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.517-0400 m31100| 2015-07-09T13:57:44.516-0400 I SHARDING [conn40] moveChunk updating self version to: 2|1||559eb618ca4787b9985d1c9d through { _id: MinKey } -> { _id: 0 } for collection 'db27.coll27' [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.518-0400 m31100| 2015-07-09T13:57:44.517-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:44.517-0400-559eb618792e00bb67274947", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436464664517), what: "moveChunk.commit", ns: "db27.coll27", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.571-0400 m31100| 2015-07-09T13:57:44.570-0400 I SHARDING [conn40] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.571-0400 m31100| 2015-07-09T13:57:44.571-0400 I SHARDING [conn40] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.571-0400 m31100| 2015-07-09T13:57:44.571-0400 I SHARDING [conn40] Deleter starting delete for: db27.coll27 from { _id: 0 } -> { _id: MaxKey }, with opId: 32496 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.572-0400 m31100| 2015-07-09T13:57:44.571-0400 I SHARDING [conn40] rangeDeleter deleted 0 documents for db27.coll27 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.572-0400 m31100| 2015-07-09T13:57:44.571-0400 I SHARDING [conn40] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.573-0400 m31100| 2015-07-09T13:57:44.573-0400 I SHARDING [conn40] distributed lock 'db27.coll27/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.574-0400 m31100| 2015-07-09T13:57:44.573-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:44.573-0400-559eb618792e00bb67274948", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436464664573), what: "moveChunk.from", ns: "db27.coll27", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 6: 0, step 2 of 6: 58, step 3 of 6: 2, step 4 of 6: 72, step 5 of 6: 116, step 6 of 6: 0, to: "test-rs1", from: "test-rs0", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.627-0400 m31100| 2015-07-09T13:57:44.626-0400 I COMMAND [conn40] command db27.coll27 command: moveChunk { moveChunk: "db27.coll27", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb618ca4787b9985d1c9d') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 306ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.628-0400 m30999| 2015-07-09T13:57:44.628-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db27.coll27: 0ms sequenceNumber: 124 version: 2|1||559eb618ca4787b9985d1c9d based on: 1|1||559eb618ca4787b9985d1c9d [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.629-0400 m31100| 2015-07-09T13:57:44.629-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db27.coll27", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb618ca4787b9985d1c9d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.633-0400 m31100| 2015-07-09T13:57:44.632-0400 I SHARDING [conn40] distributed lock 'db27.coll27/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb618792e00bb67274949 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.633-0400 m31100| 2015-07-09T13:57:44.633-0400 I SHARDING [conn40] remotely refreshing metadata for db27.coll27 based on current shard version 2|0||559eb618ca4787b9985d1c9d, current metadata version is 2|0||559eb618ca4787b9985d1c9d [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.634-0400 m31100| 2015-07-09T13:57:44.634-0400 I SHARDING [conn40] updating metadata for db27.coll27 from shard version 2|0||559eb618ca4787b9985d1c9d to shard version 2|1||559eb618ca4787b9985d1c9d [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.634-0400 m31100| 2015-07-09T13:57:44.634-0400 I SHARDING [conn40] collection version was loaded at version 2|1||559eb618ca4787b9985d1c9d, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.634-0400 m31100| 2015-07-09T13:57:44.634-0400 I SHARDING [conn40] splitChunk accepted at version 2|1||559eb618ca4787b9985d1c9d [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.635-0400 m31100| 2015-07-09T13:57:44.635-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:44.635-0400-559eb618792e00bb6727494a", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436464664635), what: "split", ns: "db27.coll27", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559eb618ca4787b9985d1c9d') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559eb618ca4787b9985d1c9d') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.689-0400 m31100| 2015-07-09T13:57:44.689-0400 I SHARDING [conn40] distributed lock 'db27.coll27/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.691-0400 m30999| 2015-07-09T13:57:44.691-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db27.coll27: 0ms sequenceNumber: 125 version: 2|3||559eb618ca4787b9985d1c9d based on: 2|1||559eb618ca4787b9985d1c9d [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.692-0400 m31200| 2015-07-09T13:57:44.691-0400 I SHARDING [conn18] received splitChunk request: { splitChunk: "db27.coll27", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb618ca4787b9985d1c9d') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.696-0400 m31200| 2015-07-09T13:57:44.696-0400 I SHARDING [conn18] distributed lock 'db27.coll27/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb618d5a107a5b9c0db20 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.696-0400 m31200| 2015-07-09T13:57:44.696-0400 I SHARDING [conn18] remotely refreshing metadata for db27.coll27 based on current shard version 0|0||559eb618ca4787b9985d1c9d, current metadata version is 1|1||559eb618ca4787b9985d1c9d [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.698-0400 m31200| 2015-07-09T13:57:44.697-0400 I SHARDING [conn18] updating metadata for db27.coll27 from shard version 0|0||559eb618ca4787b9985d1c9d to shard version 2|0||559eb618ca4787b9985d1c9d [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.698-0400 m31200| 2015-07-09T13:57:44.697-0400 I SHARDING [conn18] collection version was loaded at version 2|3||559eb618ca4787b9985d1c9d, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.698-0400 m31200| 2015-07-09T13:57:44.697-0400 I SHARDING [conn18] splitChunk accepted at version 2|0||559eb618ca4787b9985d1c9d [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.699-0400 m31200| 2015-07-09T13:57:44.698-0400 I SHARDING [conn18] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:44.698-0400-559eb618d5a107a5b9c0db21", server: "bs-osx108-8", clientAddr: "127.0.0.1:62585", time: new Date(1436464664698), what: "split", ns: "db27.coll27", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559eb618ca4787b9985d1c9d') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559eb618ca4787b9985d1c9d') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.753-0400 m31200| 2015-07-09T13:57:44.753-0400 I SHARDING [conn18] distributed lock 'db27.coll27/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.755-0400 m30999| 2015-07-09T13:57:44.755-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db27.coll27: 1ms sequenceNumber: 126 version: 2|5||559eb618ca4787b9985d1c9d based on: 2|3||559eb618ca4787b9985d1c9d [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.762-0400 m31100| 2015-07-09T13:57:44.761-0400 I INDEX [conn60] build index on: db27.coll27 properties: { v: 1, key: { indexed_insert_heterogeneous: 1.0 }, name: "indexed_insert_heterogeneous_1", ns: "db27.coll27" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.762-0400 m31100| 2015-07-09T13:57:44.761-0400 I INDEX [conn60] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.763-0400 m31200| 2015-07-09T13:57:44.762-0400 I INDEX [conn32] build index on: db27.coll27 properties: { v: 1, key: { indexed_insert_heterogeneous: 1.0 }, name: "indexed_insert_heterogeneous_1", ns: "db27.coll27" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.763-0400 m31200| 2015-07-09T13:57:44.762-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.767-0400 m31100| 2015-07-09T13:57:44.767-0400 I INDEX [conn60] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.770-0400 m31200| 2015-07-09T13:57:44.770-0400 I INDEX [conn32] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.773-0400 m31100| 2015-07-09T13:57:44.772-0400 I COMMAND [conn40] CMD: dropIndexes db27.coll27 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.773-0400 m31200| 2015-07-09T13:57:44.772-0400 I COMMAND [conn18] CMD: dropIndexes db27.coll27 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.774-0400 m31101| 2015-07-09T13:57:44.774-0400 I INDEX [repl writer worker 0] build index on: db27.coll27 properties: { v: 1, key: { indexed_insert_heterogeneous: 1.0 }, name: "indexed_insert_heterogeneous_1", ns: "db27.coll27" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.774-0400 m31101| 2015-07-09T13:57:44.774-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.774-0400 Using 20 threads (requested 20) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.787-0400 m31102| 2015-07-09T13:57:44.782-0400 I INDEX [repl writer worker 3] build index on: db27.coll27 properties: { v: 1, key: { indexed_insert_heterogeneous: 1.0 }, name: "indexed_insert_heterogeneous_1", ns: "db27.coll27" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.787-0400 m31102| 2015-07-09T13:57:44.782-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.881-0400 m31202| 2015-07-09T13:57:44.880-0400 I INDEX [repl writer worker 14] build index on: db27.coll27 properties: { v: 1, key: { indexed_insert_heterogeneous: 1.0 }, name: "indexed_insert_heterogeneous_1", ns: "db27.coll27" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.881-0400 m31202| 2015-07-09T13:57:44.880-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.898-0400 m31201| 2015-07-09T13:57:44.894-0400 I INDEX [repl writer worker 13] build index on: db27.coll27 properties: { v: 1, key: { indexed_insert_heterogeneous: 1.0 }, name: "indexed_insert_heterogeneous_1", ns: "db27.coll27" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.898-0400 m31201| 2015-07-09T13:57:44.894-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.898-0400 m31101| 2015-07-09T13:57:44.898-0400 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.912-0400 m31102| 2015-07-09T13:57:44.909-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.913-0400 m31101| 2015-07-09T13:57:44.913-0400 I COMMAND [repl writer worker 9] CMD: dropIndexes db27.coll27 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.919-0400 m31102| 2015-07-09T13:57:44.918-0400 I COMMAND [repl writer worker 7] CMD: dropIndexes db27.coll27 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.937-0400 m31201| 2015-07-09T13:57:44.937-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.940-0400 m31202| 2015-07-09T13:57:44.940-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.948-0400 m31201| 2015-07-09T13:57:44.945-0400 I COMMAND [repl writer worker 10] CMD: dropIndexes db27.coll27 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.948-0400 m31202| 2015-07-09T13:57:44.947-0400 I COMMAND [repl writer worker 0] CMD: dropIndexes db27.coll27 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.972-0400 m30999| 2015-07-09T13:57:44.969-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63210 #167 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.973-0400 m30998| 2015-07-09T13:57:44.969-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63211 #167 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.973-0400 m30999| 2015-07-09T13:57:44.973-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63212 #168 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.974-0400 m30999| 2015-07-09T13:57:44.973-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63213 #169 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.979-0400 m30999| 2015-07-09T13:57:44.979-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63214 #170 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.985-0400 m30999| 2015-07-09T13:57:44.982-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63215 #171 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.985-0400 m30999| 2015-07-09T13:57:44.985-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63216 #172 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:44.990-0400 m30998| 2015-07-09T13:57:44.990-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63217 #168 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:45.002-0400 m30998| 2015-07-09T13:57:45.001-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63218 #169 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:45.016-0400 m30998| 2015-07-09T13:57:45.016-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63219 #170 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:45.016-0400 m30999| 2015-07-09T13:57:45.016-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63221 #173 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:45.016-0400 m30998| 2015-07-09T13:57:45.016-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63220 #171 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:45.019-0400 m30998| 2015-07-09T13:57:45.018-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63222 #172 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:45.023-0400 m30999| 2015-07-09T13:57:45.023-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63223 #174 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:45.038-0400 m30999| 2015-07-09T13:57:45.038-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63224 #175 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:45.039-0400 m30998| 2015-07-09T13:57:45.039-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63225 #173 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:45.040-0400 m30999| 2015-07-09T13:57:45.039-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63227 #176 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:45.044-0400 m30998| 2015-07-09T13:57:45.044-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63226 #174 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:45.044-0400 m30998| 2015-07-09T13:57:45.044-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63228 #175 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:45.051-0400 m30998| 2015-07-09T13:57:45.050-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63229 #176 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:45.061-0400 setting random seed: 3977339486591 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:45.061-0400 setting random seed: 2355641624890 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:45.061-0400 setting random seed: 8017653282731 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:45.061-0400 setting random seed: 2698785620741 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:45.064-0400 setting random seed: 4280893788672 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:45.065-0400 setting random seed: 4855498895049 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:45.065-0400 setting random seed: 171674396842 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:45.069-0400 setting random seed: 175115903839 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:45.072-0400 setting random seed: 1268807966262 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:45.075-0400 setting random seed: 6546836956404 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:45.078-0400 m30998| 2015-07-09T13:57:45.077-0400 I SHARDING [conn169] ChunkManager: time to load chunks for db27.coll27: 0ms sequenceNumber: 31 version: 2|5||559eb618ca4787b9985d1c9d based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:45.111-0400 setting random seed: 2688777521252 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:45.111-0400 setting random seed: 8563948404043 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:45.111-0400 setting random seed: 571580105461 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:45.115-0400 setting random seed: 5331589076668 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:45.116-0400 setting random seed: 8489948920905 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:45.148-0400 setting random seed: 6369967088103 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:45.148-0400 setting random seed: 3172997198998 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:45.148-0400 setting random seed: 7144782268442 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:45.161-0400 setting random seed: 36899605765 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:45.188-0400 setting random seed: 283820736221 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:45.622-0400 m30999| 2015-07-09T13:57:45.622-0400 I NETWORK [conn168] end connection 127.0.0.1:63212 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:45.637-0400 m30998| 2015-07-09T13:57:45.636-0400 I NETWORK [conn167] end connection 127.0.0.1:63211 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:45.700-0400 m30999| 2015-07-09T13:57:45.700-0400 I NETWORK [conn167] end connection 127.0.0.1:63210 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:45.713-0400 m30999| 2015-07-09T13:57:45.712-0400 I NETWORK [conn170] end connection 127.0.0.1:63214 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:45.744-0400 m30999| 2015-07-09T13:57:45.744-0400 I NETWORK [conn172] end connection 127.0.0.1:63216 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:45.773-0400 m30999| 2015-07-09T13:57:45.773-0400 I NETWORK [conn169] end connection 127.0.0.1:63213 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:45.835-0400 m30999| 2015-07-09T13:57:45.835-0400 I NETWORK [conn171] end connection 127.0.0.1:63215 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:45.842-0400 m30998| 2015-07-09T13:57:45.842-0400 I NETWORK [conn169] end connection 127.0.0.1:63218 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:45.903-0400 m30998| 2015-07-09T13:57:45.903-0400 I NETWORK [conn168] end connection 127.0.0.1:63217 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:45.928-0400 m30998| 2015-07-09T13:57:45.927-0400 I NETWORK [conn174] end connection 127.0.0.1:63226 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:45.939-0400 m30998| 2015-07-09T13:57:45.939-0400 I NETWORK [conn172] end connection 127.0.0.1:63222 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:45.961-0400 m30999| 2015-07-09T13:57:45.961-0400 I NETWORK [conn176] end connection 127.0.0.1:63227 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:45.966-0400 m30999| 2015-07-09T13:57:45.966-0400 I NETWORK [conn173] end connection 127.0.0.1:63221 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:45.969-0400 m30998| 2015-07-09T13:57:45.969-0400 I NETWORK [conn170] end connection 127.0.0.1:63219 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:45.983-0400 m30999| 2015-07-09T13:57:45.982-0400 I NETWORK [conn174] end connection 127.0.0.1:63223 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:45.995-0400 m30998| 2015-07-09T13:57:45.994-0400 I NETWORK [conn171] end connection 127.0.0.1:63220 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.000-0400 m30998| 2015-07-09T13:57:46.000-0400 I NETWORK [conn175] end connection 127.0.0.1:63228 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.024-0400 m30998| 2015-07-09T13:57:46.024-0400 I NETWORK [conn176] end connection 127.0.0.1:63229 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.028-0400 m30998| 2015-07-09T13:57:46.028-0400 I NETWORK [conn173] end connection 127.0.0.1:63225 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.032-0400 m30999| 2015-07-09T13:57:46.032-0400 I NETWORK [conn175] end connection 127.0.0.1:63224 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.061-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.061-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.061-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.062-0400 jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous_noindex.js: Workload completed in 1287 ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.062-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.062-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.062-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.062-0400 m30999| 2015-07-09T13:57:46.061-0400 I COMMAND [conn1] DROP: db27.coll27 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.063-0400 m30999| 2015-07-09T13:57:46.061-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:46.061-0400-559eb61aca4787b9985d1c9f", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464666061), what: "dropCollection.start", ns: "db27.coll27", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.119-0400 m30999| 2015-07-09T13:57:46.118-0400 I SHARDING [conn1] distributed lock 'db27.coll27/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb61aca4787b9985d1ca0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.120-0400 m31100| 2015-07-09T13:57:46.120-0400 I COMMAND [conn40] CMD: drop db27.coll27 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.123-0400 m31200| 2015-07-09T13:57:46.123-0400 I COMMAND [conn18] CMD: drop db27.coll27 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.125-0400 m31101| 2015-07-09T13:57:46.125-0400 I COMMAND [repl writer worker 6] CMD: drop db27.coll27 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.127-0400 m31102| 2015-07-09T13:57:46.127-0400 I COMMAND [repl writer worker 9] CMD: drop db27.coll27 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.147-0400 m31202| 2015-07-09T13:57:46.147-0400 I COMMAND [repl writer worker 6] CMD: drop db27.coll27 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.149-0400 m31201| 2015-07-09T13:57:46.149-0400 I COMMAND [repl writer worker 10] CMD: drop db27.coll27 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.199-0400 m31100| 2015-07-09T13:57:46.199-0400 I SHARDING [conn40] remotely refreshing metadata for db27.coll27 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559eb618ca4787b9985d1c9d, current metadata version is 2|3||559eb618ca4787b9985d1c9d [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.201-0400 m31100| 2015-07-09T13:57:46.200-0400 W SHARDING [conn40] no chunks found when reloading db27.coll27, previous version was 0|0||559eb618ca4787b9985d1c9d, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.201-0400 m31100| 2015-07-09T13:57:46.200-0400 I SHARDING [conn40] dropping metadata for db27.coll27 at shard version 2|3||559eb618ca4787b9985d1c9d, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.202-0400 m31200| 2015-07-09T13:57:46.201-0400 I SHARDING [conn18] remotely refreshing metadata for db27.coll27 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559eb618ca4787b9985d1c9d, current metadata version is 2|5||559eb618ca4787b9985d1c9d [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.203-0400 m31200| 2015-07-09T13:57:46.203-0400 W SHARDING [conn18] no chunks found when reloading db27.coll27, previous version was 0|0||559eb618ca4787b9985d1c9d, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.203-0400 m31200| 2015-07-09T13:57:46.203-0400 I SHARDING [conn18] dropping metadata for db27.coll27 at shard version 2|5||559eb618ca4787b9985d1c9d, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.204-0400 m30999| 2015-07-09T13:57:46.204-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:46.204-0400-559eb61aca4787b9985d1ca1", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464666204), what: "dropCollection", ns: "db27.coll27", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.257-0400 m30999| 2015-07-09T13:57:46.257-0400 I SHARDING [conn1] distributed lock 'db27.coll27/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.313-0400 m30999| 2015-07-09T13:57:46.313-0400 I COMMAND [conn1] DROP DATABASE: db27 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.313-0400 m30999| 2015-07-09T13:57:46.313-0400 I SHARDING [conn1] DBConfig::dropDatabase: db27 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.314-0400 m30999| 2015-07-09T13:57:46.313-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:46.313-0400-559eb61aca4787b9985d1ca2", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464666313), what: "dropDatabase.start", ns: "db27", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.420-0400 m30999| 2015-07-09T13:57:46.419-0400 I SHARDING [conn1] DBConfig::dropDatabase: db27 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.420-0400 m31100| 2015-07-09T13:57:46.420-0400 I COMMAND [conn28] dropDatabase db27 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.420-0400 m31100| 2015-07-09T13:57:46.420-0400 I COMMAND [conn28] dropDatabase db27 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.421-0400 m30999| 2015-07-09T13:57:46.421-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:46.421-0400-559eb61aca4787b9985d1ca3", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464666421), what: "dropDatabase", ns: "db27", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.422-0400 m31101| 2015-07-09T13:57:46.421-0400 I COMMAND [repl writer worker 0] dropDatabase db27 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.422-0400 m31101| 2015-07-09T13:57:46.421-0400 I COMMAND [repl writer worker 0] dropDatabase db27 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.422-0400 m31102| 2015-07-09T13:57:46.422-0400 I COMMAND [repl writer worker 15] dropDatabase db27 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.423-0400 m31102| 2015-07-09T13:57:46.422-0400 I COMMAND [repl writer worker 15] dropDatabase db27 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.507-0400 m31100| 2015-07-09T13:57:46.506-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.510-0400 m31102| 2015-07-09T13:57:46.510-0400 I COMMAND [repl writer worker 14] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.510-0400 m31101| 2015-07-09T13:57:46.510-0400 I COMMAND [repl writer worker 8] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.545-0400 m31200| 2015-07-09T13:57:46.545-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.547-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.547-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.547-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.547-0400 jstests/concurrency/fsm_workloads/indexed_insert_ordered_bulk.js [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.548-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.548-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.548-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.551-0400 m31202| 2015-07-09T13:57:46.550-0400 I COMMAND [repl writer worker 3] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.553-0400 m31201| 2015-07-09T13:57:46.552-0400 I COMMAND [repl writer worker 14] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.554-0400 m30999| 2015-07-09T13:57:46.553-0400 I SHARDING [conn1] distributed lock 'db28/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb61aca4787b9985d1ca4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.558-0400 m30999| 2015-07-09T13:57:46.558-0400 I SHARDING [conn1] Placing [db28] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.559-0400 m30999| 2015-07-09T13:57:46.558-0400 I SHARDING [conn1] Enabling sharding for database [db28] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.612-0400 m30999| 2015-07-09T13:57:46.611-0400 I SHARDING [conn1] distributed lock 'db28/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.636-0400 m31100| 2015-07-09T13:57:46.635-0400 I INDEX [conn16] build index on: db28.coll28 properties: { v: 1, key: { indexed_insert_ordered_bulk: 1.0 }, name: "indexed_insert_ordered_bulk_1", ns: "db28.coll28" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.636-0400 m31100| 2015-07-09T13:57:46.635-0400 I INDEX [conn16] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.642-0400 m31100| 2015-07-09T13:57:46.642-0400 I INDEX [conn16] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.644-0400 m30999| 2015-07-09T13:57:46.643-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db28.coll28", key: { indexed_insert_ordered_bulk: 1.0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.646-0400 m30999| 2015-07-09T13:57:46.646-0400 I SHARDING [conn1] distributed lock 'db28.coll28/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb61aca4787b9985d1ca5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.647-0400 m30999| 2015-07-09T13:57:46.647-0400 I SHARDING [conn1] enable sharding on: db28.coll28 with shard key: { indexed_insert_ordered_bulk: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.647-0400 m30999| 2015-07-09T13:57:46.647-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:46.647-0400-559eb61aca4787b9985d1ca6", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464666647), what: "shardCollection.start", ns: "db28.coll28", details: { shardKey: { indexed_insert_ordered_bulk: 1.0 }, collection: "db28.coll28", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 1 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.653-0400 m31101| 2015-07-09T13:57:46.652-0400 I INDEX [repl writer worker 4] build index on: db28.coll28 properties: { v: 1, key: { indexed_insert_ordered_bulk: 1.0 }, name: "indexed_insert_ordered_bulk_1", ns: "db28.coll28" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.653-0400 m31101| 2015-07-09T13:57:46.652-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.658-0400 m31102| 2015-07-09T13:57:46.658-0400 I INDEX [repl writer worker 11] build index on: db28.coll28 properties: { v: 1, key: { indexed_insert_ordered_bulk: 1.0 }, name: "indexed_insert_ordered_bulk_1", ns: "db28.coll28" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.658-0400 m31102| 2015-07-09T13:57:46.658-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.664-0400 m31101| 2015-07-09T13:57:46.664-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.666-0400 m31102| 2015-07-09T13:57:46.665-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.704-0400 m30999| 2015-07-09T13:57:46.703-0400 I SHARDING [conn1] going to create 1 chunk(s) for: db28.coll28 using new epoch 559eb61aca4787b9985d1ca7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.762-0400 m30999| 2015-07-09T13:57:46.762-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db28.coll28: 0ms sequenceNumber: 127 version: 1|0||559eb61aca4787b9985d1ca7 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.816-0400 m30999| 2015-07-09T13:57:46.816-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db28.coll28: 0ms sequenceNumber: 128 version: 1|0||559eb61aca4787b9985d1ca7 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.818-0400 m31100| 2015-07-09T13:57:46.817-0400 I SHARDING [conn47] remotely refreshing metadata for db28.coll28 with requested shard version 1|0||559eb61aca4787b9985d1ca7, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.819-0400 m31100| 2015-07-09T13:57:46.819-0400 I SHARDING [conn47] collection db28.coll28 was previously unsharded, new metadata loaded with shard version 1|0||559eb61aca4787b9985d1ca7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.819-0400 m31100| 2015-07-09T13:57:46.819-0400 I SHARDING [conn47] collection version was loaded at version 1|0||559eb61aca4787b9985d1ca7, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.820-0400 m30999| 2015-07-09T13:57:46.819-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:46.819-0400-559eb61aca4787b9985d1ca8", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464666819), what: "shardCollection", ns: "db28.coll28", details: { version: "1|0||559eb61aca4787b9985d1ca7" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.874-0400 m30999| 2015-07-09T13:57:46.874-0400 I SHARDING [conn1] distributed lock 'db28.coll28/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.898-0400 m31200| 2015-07-09T13:57:46.897-0400 I INDEX [conn32] build index on: db28.coll28 properties: { v: 1, key: { indexed_insert_ordered_bulk: 1.0 }, name: "indexed_insert_ordered_bulk_1", ns: "db28.coll28" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.898-0400 m31200| 2015-07-09T13:57:46.898-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.902-0400 m31200| 2015-07-09T13:57:46.901-0400 I INDEX [conn32] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:46.906-0400 Using 20 threads (requested 20) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.055-0400 m30998| 2015-07-09T13:57:47.055-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63230 #177 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.081-0400 m30999| 2015-07-09T13:57:47.080-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63231 #177 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.081-0400 m31202| 2015-07-09T13:57:47.080-0400 I INDEX [repl writer worker 8] build index on: db28.coll28 properties: { v: 1, key: { indexed_insert_ordered_bulk: 1.0 }, name: "indexed_insert_ordered_bulk_1", ns: "db28.coll28" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.081-0400 m31202| 2015-07-09T13:57:47.081-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.090-0400 m31201| 2015-07-09T13:57:47.090-0400 I INDEX [repl writer worker 8] build index on: db28.coll28 properties: { v: 1, key: { indexed_insert_ordered_bulk: 1.0 }, name: "indexed_insert_ordered_bulk_1", ns: "db28.coll28" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.091-0400 m31201| 2015-07-09T13:57:47.090-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.123-0400 m31202| 2015-07-09T13:57:47.123-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.133-0400 m31201| 2015-07-09T13:57:47.133-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.145-0400 m30999| 2015-07-09T13:57:47.145-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63232 #178 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.146-0400 m30998| 2015-07-09T13:57:47.146-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63233 #178 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.157-0400 m30998| 2015-07-09T13:57:47.157-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63235 #179 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.337-0400 m30999| 2015-07-09T13:57:47.158-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63236 #179 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.338-0400 m30999| 2015-07-09T13:57:47.165-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63239 #180 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.338-0400 m30998| 2015-07-09T13:57:47.167-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63237 #180 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.338-0400 m30998| 2015-07-09T13:57:47.169-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63238 #181 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.339-0400 m30999| 2015-07-09T13:57:47.169-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63240 #181 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.340-0400 m30999| 2015-07-09T13:57:47.169-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63242 #182 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.340-0400 m30999| 2015-07-09T13:57:47.176-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63244 #183 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.340-0400 m30999| 2015-07-09T13:57:47.177-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63245 #184 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.341-0400 m30999| 2015-07-09T13:57:47.181-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63246 #185 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.341-0400 m30998| 2015-07-09T13:57:47.180-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63241 #182 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.341-0400 m30998| 2015-07-09T13:57:47.181-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63243 #183 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.341-0400 m30998| 2015-07-09T13:57:47.188-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63247 #184 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.341-0400 m30998| 2015-07-09T13:57:47.189-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63249 #185 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.342-0400 m30998| 2015-07-09T13:57:47.189-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63250 #186 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.342-0400 m30999| 2015-07-09T13:57:47.191-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63248 #186 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.342-0400 setting random seed: 1343338964506 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.342-0400 setting random seed: 7497986978851 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.342-0400 setting random seed: 7409675777889 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.342-0400 setting random seed: 1347803412936 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.342-0400 setting random seed: 4849610854871 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.342-0400 setting random seed: 1930129616521 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.342-0400 setting random seed: 2531971773132 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.342-0400 m30998| 2015-07-09T13:57:47.210-0400 I SHARDING [conn178] ChunkManager: time to load chunks for db28.coll28: 0ms sequenceNumber: 32 version: 1|0||559eb61aca4787b9985d1ca7 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.343-0400 setting random seed: 4420856982469 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.343-0400 setting random seed: 4606163850985 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.343-0400 setting random seed: 3522128090262 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.343-0400 setting random seed: 8667212231084 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.343-0400 setting random seed: 8981939726509 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.343-0400 setting random seed: 9836035668849 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.343-0400 setting random seed: 4283013395033 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.343-0400 setting random seed: 4251563339494 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.343-0400 setting random seed: 6406840602867 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.343-0400 setting random seed: 5795879559591 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.343-0400 setting random seed: 9461053730919 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.343-0400 setting random seed: 9688479974865 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.344-0400 setting random seed: 6025080149993 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.344-0400 m31100| 2015-07-09T13:57:47.242-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63251 #144 (88 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.344-0400 m31100| 2015-07-09T13:57:47.252-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63252 #145 (89 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.344-0400 m31100| 2015-07-09T13:57:47.252-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63253 #146 (90 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.344-0400 m31100| 2015-07-09T13:57:47.258-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63254 #147 (91 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.344-0400 m31100| 2015-07-09T13:57:47.267-0400 I SHARDING [conn40] request split points lookup for chunk db28.coll28 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.344-0400 m31100| 2015-07-09T13:57:47.267-0400 I SHARDING [conn34] request split points lookup for chunk db28.coll28 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.344-0400 m31100| 2015-07-09T13:57:47.268-0400 W SHARDING [conn40] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.345-0400 m31100| 2015-07-09T13:57:47.268-0400 W SHARDING [conn40] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.345-0400 m31100| 2015-07-09T13:57:47.268-0400 W SHARDING [conn40] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.345-0400 m31100| 2015-07-09T13:57:47.268-0400 W SHARDING [conn40] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.345-0400 m31100| 2015-07-09T13:57:47.268-0400 W SHARDING [conn40] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.345-0400 m31100| 2015-07-09T13:57:47.268-0400 W SHARDING [conn40] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.345-0400 m31100| 2015-07-09T13:57:47.268-0400 W SHARDING [conn40] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.346-0400 m31100| 2015-07-09T13:57:47.268-0400 W SHARDING [conn40] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 10.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.346-0400 m31100| 2015-07-09T13:57:47.268-0400 W SHARDING [conn40] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 12.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.346-0400 m31100| 2015-07-09T13:57:47.268-0400 W SHARDING [conn40] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 13.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.346-0400 m31100| 2015-07-09T13:57:47.268-0400 W SHARDING [conn40] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 15.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.346-0400 m31100| 2015-07-09T13:57:47.268-0400 W SHARDING [conn34] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.346-0400 m31100| 2015-07-09T13:57:47.268-0400 W SHARDING [conn34] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.346-0400 m31100| 2015-07-09T13:57:47.268-0400 W SHARDING [conn34] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.347-0400 m31100| 2015-07-09T13:57:47.269-0400 W SHARDING [conn34] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.347-0400 m31100| 2015-07-09T13:57:47.269-0400 W SHARDING [conn34] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.347-0400 m31100| 2015-07-09T13:57:47.269-0400 W SHARDING [conn34] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.347-0400 m31100| 2015-07-09T13:57:47.269-0400 W SHARDING [conn34] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.347-0400 m31100| 2015-07-09T13:57:47.269-0400 W SHARDING [conn34] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 10.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.347-0400 m31100| 2015-07-09T13:57:47.269-0400 W SHARDING [conn34] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 12.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.347-0400 m31100| 2015-07-09T13:57:47.269-0400 W SHARDING [conn34] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 13.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.348-0400 m31100| 2015-07-09T13:57:47.269-0400 W SHARDING [conn34] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 15.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.348-0400 m31100| 2015-07-09T13:57:47.270-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db28.coll28", keyPattern: { indexed_insert_ordered_bulk: 1.0 }, min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_ordered_bulk: 0.0 }, { indexed_insert_ordered_bulk: 2.0 }, { indexed_insert_ordered_bulk: 3.0 }, { indexed_insert_ordered_bulk: 4.0 }, { indexed_insert_ordered_bulk: 5.0 }, { indexed_insert_ordered_bulk: 6.0 }, { indexed_insert_ordered_bulk: 7.0 }, { indexed_insert_ordered_bulk: 8.0 }, { indexed_insert_ordered_bulk: 9.0 }, { indexed_insert_ordered_bulk: 10.0 }, { indexed_insert_ordered_bulk: 11.0 }, { indexed_insert_ordered_bulk: 12.0 }, { indexed_insert_ordered_bulk: 13.0 }, { indexed_insert_ordered_bulk: 14.0 }, { indexed_insert_ordered_bulk: 15.0 }, { indexed_insert_ordered_bulk: 16.0 }, { indexed_insert_ordered_bulk: 18.0 }, { indexed_insert_ordered_bulk: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb61aca4787b9985d1ca7') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.352-0400 m31100| 2015-07-09T13:57:47.270-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db28.coll28", keyPattern: { indexed_insert_ordered_bulk: 1.0 }, min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_ordered_bulk: 0.0 }, { indexed_insert_ordered_bulk: 2.0 }, { indexed_insert_ordered_bulk: 3.0 }, { indexed_insert_ordered_bulk: 4.0 }, { indexed_insert_ordered_bulk: 5.0 }, { indexed_insert_ordered_bulk: 6.0 }, { indexed_insert_ordered_bulk: 7.0 }, { indexed_insert_ordered_bulk: 8.0 }, { indexed_insert_ordered_bulk: 9.0 }, { indexed_insert_ordered_bulk: 10.0 }, { indexed_insert_ordered_bulk: 11.0 }, { indexed_insert_ordered_bulk: 12.0 }, { indexed_insert_ordered_bulk: 13.0 }, { indexed_insert_ordered_bulk: 14.0 }, { indexed_insert_ordered_bulk: 15.0 }, { indexed_insert_ordered_bulk: 16.0 }, { indexed_insert_ordered_bulk: 18.0 }, { indexed_insert_ordered_bulk: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb61aca4787b9985d1ca7') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.353-0400 m31100| 2015-07-09T13:57:47.274-0400 I SHARDING [conn38] request split points lookup for chunk db28.coll28 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.355-0400 m31100| 2015-07-09T13:57:47.274-0400 I SHARDING [conn37] request split points lookup for chunk db28.coll28 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.355-0400 m31100| 2015-07-09T13:57:47.275-0400 I SHARDING [conn35] request split points lookup for chunk db28.coll28 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.355-0400 m31100| 2015-07-09T13:57:47.275-0400 W SHARDING [conn35] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.356-0400 m31100| 2015-07-09T13:57:47.275-0400 W SHARDING [conn35] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.356-0400 m31100| 2015-07-09T13:57:47.275-0400 W SHARDING [conn35] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.357-0400 m31100| 2015-07-09T13:57:47.275-0400 W SHARDING [conn35] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.357-0400 m31100| 2015-07-09T13:57:47.275-0400 W SHARDING [conn35] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.357-0400 m31100| 2015-07-09T13:57:47.275-0400 W SHARDING [conn35] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.358-0400 m31100| 2015-07-09T13:57:47.275-0400 W SHARDING [conn35] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.358-0400 m31100| 2015-07-09T13:57:47.275-0400 W SHARDING [conn35] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.358-0400 m31100| 2015-07-09T13:57:47.275-0400 W SHARDING [conn35] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 10.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.358-0400 m31100| 2015-07-09T13:57:47.276-0400 W SHARDING [conn35] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 12.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.358-0400 m31100| 2015-07-09T13:57:47.276-0400 W SHARDING [conn35] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 13.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.359-0400 m31100| 2015-07-09T13:57:47.276-0400 W SHARDING [conn35] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 15.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.359-0400 m31100| 2015-07-09T13:57:47.276-0400 I SHARDING [conn15] request split points lookup for chunk db28.coll28 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.359-0400 m31100| 2015-07-09T13:57:47.277-0400 I SHARDING [conn36] request split points lookup for chunk db28.coll28 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.359-0400 m31100| 2015-07-09T13:57:47.277-0400 I SHARDING [conn132] request split points lookup for chunk db28.coll28 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.359-0400 m31100| 2015-07-09T13:57:47.277-0400 I SHARDING [conn34] could not acquire lock 'db28.coll28/bs-osx108-8:31100:1436464536:197041335' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.359-0400 m31100| 2015-07-09T13:57:47.277-0400 W SHARDING [conn132] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.359-0400 m31100| 2015-07-09T13:57:47.277-0400 I SHARDING [conn34] distributed lock 'db28.coll28/bs-osx108-8:31100:1436464536:197041335' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.360-0400 m31100| 2015-07-09T13:57:47.277-0400 I SHARDING [conn40] distributed lock 'db28.coll28/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb61b792e00bb6727494c [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.360-0400 m31100| 2015-07-09T13:57:47.278-0400 W SHARDING [conn132] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.360-0400 m31100| 2015-07-09T13:57:47.278-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db28.coll28", keyPattern: { indexed_insert_ordered_bulk: 1.0 }, min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_ordered_bulk: 0.0 }, { indexed_insert_ordered_bulk: 3.0 }, { indexed_insert_ordered_bulk: 5.0 }, { indexed_insert_ordered_bulk: 7.0 }, { indexed_insert_ordered_bulk: 8.0 }, { indexed_insert_ordered_bulk: 10.0 }, { indexed_insert_ordered_bulk: 11.0 }, { indexed_insert_ordered_bulk: 13.0 }, { indexed_insert_ordered_bulk: 15.0 }, { indexed_insert_ordered_bulk: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb61aca4787b9985d1ca7') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.360-0400 m31100| 2015-07-09T13:57:47.278-0400 W SHARDING [conn34] could not acquire collection lock for db28.coll28 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db28.coll28 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.361-0400 m31100| 2015-07-09T13:57:47.278-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db28.coll28", keyPattern: { indexed_insert_ordered_bulk: 1.0 }, min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_ordered_bulk: 0.0 }, { indexed_insert_ordered_bulk: 2.0 }, { indexed_insert_ordered_bulk: 3.0 }, { indexed_insert_ordered_bulk: 4.0 }, { indexed_insert_ordered_bulk: 5.0 }, { indexed_insert_ordered_bulk: 6.0 }, { indexed_insert_ordered_bulk: 7.0 }, { indexed_insert_ordered_bulk: 8.0 }, { indexed_insert_ordered_bulk: 9.0 }, { indexed_insert_ordered_bulk: 10.0 }, { indexed_insert_ordered_bulk: 11.0 }, { indexed_insert_ordered_bulk: 12.0 }, { indexed_insert_ordered_bulk: 13.0 }, { indexed_insert_ordered_bulk: 14.0 }, { indexed_insert_ordered_bulk: 15.0 }, { indexed_insert_ordered_bulk: 16.0 }, { indexed_insert_ordered_bulk: 17.0 }, { indexed_insert_ordered_bulk: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb61aca4787b9985d1ca7') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.361-0400 m31100| 2015-07-09T13:57:47.278-0400 I SHARDING [conn40] remotely refreshing metadata for db28.coll28 based on current shard version 1|0||559eb61aca4787b9985d1ca7, current metadata version is 1|0||559eb61aca4787b9985d1ca7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.362-0400 m31100| 2015-07-09T13:57:47.278-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db28.coll28", keyPattern: { indexed_insert_ordered_bulk: 1.0 }, min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_ordered_bulk: 0.0 }, { indexed_insert_ordered_bulk: 5.0 }, { indexed_insert_ordered_bulk: 8.0 }, { indexed_insert_ordered_bulk: 10.0 }, { indexed_insert_ordered_bulk: 13.0 }, { indexed_insert_ordered_bulk: 16.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb61aca4787b9985d1ca7') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.362-0400 m31100| 2015-07-09T13:57:47.278-0400 I SHARDING [conn36] received splitChunk request: { splitChunk: "db28.coll28", keyPattern: { indexed_insert_ordered_bulk: 1.0 }, min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_ordered_bulk: 0.0 }, { indexed_insert_ordered_bulk: 3.0 }, { indexed_insert_ordered_bulk: 5.0 }, { indexed_insert_ordered_bulk: 7.0 }, { indexed_insert_ordered_bulk: 8.0 }, { indexed_insert_ordered_bulk: 10.0 }, { indexed_insert_ordered_bulk: 11.0 }, { indexed_insert_ordered_bulk: 13.0 }, { indexed_insert_ordered_bulk: 15.0 }, { indexed_insert_ordered_bulk: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb61aca4787b9985d1ca7') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.364-0400 m30999| 2015-07-09T13:57:47.280-0400 W SHARDING [conn184] splitChunk failed - cmd: { splitChunk: "db28.coll28", keyPattern: { indexed_insert_ordered_bulk: 1.0 }, min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_ordered_bulk: 0.0 }, { indexed_insert_ordered_bulk: 2.0 }, { indexed_insert_ordered_bulk: 3.0 }, { indexed_insert_ordered_bulk: 4.0 }, { indexed_insert_ordered_bulk: 5.0 }, { indexed_insert_ordered_bulk: 6.0 }, { indexed_insert_ordered_bulk: 7.0 }, { indexed_insert_ordered_bulk: 8.0 }, { indexed_insert_ordered_bulk: 9.0 }, { indexed_insert_ordered_bulk: 10.0 }, { indexed_insert_ordered_bulk: 11.0 }, { indexed_insert_ordered_bulk: 12.0 }, { indexed_insert_ordered_bulk: 13.0 }, { indexed_insert_ordered_bulk: 14.0 }, { indexed_insert_ordered_bulk: 15.0 }, { indexed_insert_ordered_bulk: 16.0 }, { indexed_insert_ordered_bulk: 18.0 }, { indexed_insert_ordered_bulk: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb61aca4787b9985d1ca7') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db28.coll28 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.365-0400 m31100| 2015-07-09T13:57:47.279-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db28.coll28", keyPattern: { indexed_insert_ordered_bulk: 1.0 }, min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_ordered_bulk: 0.0 }, { indexed_insert_ordered_bulk: 5.0 }, { indexed_insert_ordered_bulk: 7.0 }, { indexed_insert_ordered_bulk: 9.0 }, { indexed_insert_ordered_bulk: 11.0 }, { indexed_insert_ordered_bulk: 13.0 }, { indexed_insert_ordered_bulk: 16.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb61aca4787b9985d1ca7') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.365-0400 m31100| 2015-07-09T13:57:47.279-0400 W SHARDING [conn132] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.366-0400 m31100| 2015-07-09T13:57:47.281-0400 W SHARDING [conn132] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.366-0400 m31100| 2015-07-09T13:57:47.281-0400 W SHARDING [conn132] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.366-0400 m31100| 2015-07-09T13:57:47.281-0400 W SHARDING [conn132] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.366-0400 m31100| 2015-07-09T13:57:47.281-0400 W SHARDING [conn132] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.366-0400 m31100| 2015-07-09T13:57:47.281-0400 W SHARDING [conn132] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.366-0400 m31100| 2015-07-09T13:57:47.281-0400 W SHARDING [conn132] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.368-0400 m31100| 2015-07-09T13:57:47.281-0400 W SHARDING [conn132] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 10.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.368-0400 m29000| 2015-07-09T13:57:47.281-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63255 #49 (49 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.369-0400 m31100| 2015-07-09T13:57:47.282-0400 W SHARDING [conn132] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 12.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.369-0400 m31100| 2015-07-09T13:57:47.282-0400 W SHARDING [conn132] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 13.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.369-0400 m31100| 2015-07-09T13:57:47.282-0400 W SHARDING [conn132] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 15.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.370-0400 m31100| 2015-07-09T13:57:47.283-0400 I SHARDING [conn132] received splitChunk request: { splitChunk: "db28.coll28", keyPattern: { indexed_insert_ordered_bulk: 1.0 }, min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_ordered_bulk: 0.0 }, { indexed_insert_ordered_bulk: 2.0 }, { indexed_insert_ordered_bulk: 3.0 }, { indexed_insert_ordered_bulk: 4.0 }, { indexed_insert_ordered_bulk: 5.0 }, { indexed_insert_ordered_bulk: 6.0 }, { indexed_insert_ordered_bulk: 7.0 }, { indexed_insert_ordered_bulk: 8.0 }, { indexed_insert_ordered_bulk: 9.0 }, { indexed_insert_ordered_bulk: 10.0 }, { indexed_insert_ordered_bulk: 11.0 }, { indexed_insert_ordered_bulk: 12.0 }, { indexed_insert_ordered_bulk: 13.0 }, { indexed_insert_ordered_bulk: 14.0 }, { indexed_insert_ordered_bulk: 15.0 }, { indexed_insert_ordered_bulk: 16.0 }, { indexed_insert_ordered_bulk: 17.0 }, { indexed_insert_ordered_bulk: 18.0 }, { indexed_insert_ordered_bulk: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb61aca4787b9985d1ca7') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.370-0400 m29000| 2015-07-09T13:57:47.284-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63256 #50 (50 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.370-0400 m31100| 2015-07-09T13:57:47.284-0400 W SHARDING [conn38] could not acquire collection lock for db28.coll28 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db28.coll28 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.370-0400 m31100| 2015-07-09T13:57:47.284-0400 W SHARDING [conn35] could not acquire collection lock for db28.coll28 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db28.coll28 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.371-0400 m30999| 2015-07-09T13:57:47.285-0400 W SHARDING [conn186] splitChunk failed - cmd: { splitChunk: "db28.coll28", keyPattern: { indexed_insert_ordered_bulk: 1.0 }, min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_ordered_bulk: 0.0 }, { indexed_insert_ordered_bulk: 3.0 }, { indexed_insert_ordered_bulk: 5.0 }, { indexed_insert_ordered_bulk: 7.0 }, { indexed_insert_ordered_bulk: 8.0 }, { indexed_insert_ordered_bulk: 10.0 }, { indexed_insert_ordered_bulk: 11.0 }, { indexed_insert_ordered_bulk: 13.0 }, { indexed_insert_ordered_bulk: 15.0 }, { indexed_insert_ordered_bulk: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb61aca4787b9985d1ca7') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db28.coll28 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.372-0400 m30998| 2015-07-09T13:57:47.285-0400 W SHARDING [conn186] splitChunk failed - cmd: { splitChunk: "db28.coll28", keyPattern: { indexed_insert_ordered_bulk: 1.0 }, min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_ordered_bulk: 0.0 }, { indexed_insert_ordered_bulk: 2.0 }, { indexed_insert_ordered_bulk: 3.0 }, { indexed_insert_ordered_bulk: 4.0 }, { indexed_insert_ordered_bulk: 5.0 }, { indexed_insert_ordered_bulk: 6.0 }, { indexed_insert_ordered_bulk: 7.0 }, { indexed_insert_ordered_bulk: 8.0 }, { indexed_insert_ordered_bulk: 9.0 }, { indexed_insert_ordered_bulk: 10.0 }, { indexed_insert_ordered_bulk: 11.0 }, { indexed_insert_ordered_bulk: 12.0 }, { indexed_insert_ordered_bulk: 13.0 }, { indexed_insert_ordered_bulk: 14.0 }, { indexed_insert_ordered_bulk: 15.0 }, { indexed_insert_ordered_bulk: 16.0 }, { indexed_insert_ordered_bulk: 17.0 }, { indexed_insert_ordered_bulk: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb61aca4787b9985d1ca7') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db28.coll28 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.372-0400 m31100| 2015-07-09T13:57:47.286-0400 W SHARDING [conn132] could not acquire collection lock for db28.coll28 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db28.coll28 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.372-0400 m31100| 2015-07-09T13:57:47.286-0400 W SHARDING [conn15] could not acquire collection lock for db28.coll28 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db28.coll28 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.374-0400 m30999| 2015-07-09T13:57:47.286-0400 W SHARDING [conn179] splitChunk failed - cmd: { splitChunk: "db28.coll28", keyPattern: { indexed_insert_ordered_bulk: 1.0 }, min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_ordered_bulk: 0.0 }, { indexed_insert_ordered_bulk: 5.0 }, { indexed_insert_ordered_bulk: 8.0 }, { indexed_insert_ordered_bulk: 10.0 }, { indexed_insert_ordered_bulk: 13.0 }, { indexed_insert_ordered_bulk: 16.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb61aca4787b9985d1ca7') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db28.coll28 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.375-0400 m30998| 2015-07-09T13:57:47.286-0400 W SHARDING [conn177] splitChunk failed - cmd: { splitChunk: "db28.coll28", keyPattern: { indexed_insert_ordered_bulk: 1.0 }, min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_ordered_bulk: 0.0 }, { indexed_insert_ordered_bulk: 2.0 }, { indexed_insert_ordered_bulk: 3.0 }, { indexed_insert_ordered_bulk: 4.0 }, { indexed_insert_ordered_bulk: 5.0 }, { indexed_insert_ordered_bulk: 6.0 }, { indexed_insert_ordered_bulk: 7.0 }, { indexed_insert_ordered_bulk: 8.0 }, { indexed_insert_ordered_bulk: 9.0 }, { indexed_insert_ordered_bulk: 10.0 }, { indexed_insert_ordered_bulk: 11.0 }, { indexed_insert_ordered_bulk: 12.0 }, { indexed_insert_ordered_bulk: 13.0 }, { indexed_insert_ordered_bulk: 14.0 }, { indexed_insert_ordered_bulk: 15.0 }, { indexed_insert_ordered_bulk: 16.0 }, { indexed_insert_ordered_bulk: 17.0 }, { indexed_insert_ordered_bulk: 18.0 }, { indexed_insert_ordered_bulk: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb61aca4787b9985d1ca7') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db28.coll28 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.375-0400 m31100| 2015-07-09T13:57:47.288-0400 W SHARDING [conn37] could not acquire collection lock for db28.coll28 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db28.coll28 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.376-0400 m30999| 2015-07-09T13:57:47.289-0400 W SHARDING [conn177] splitChunk failed - cmd: { splitChunk: "db28.coll28", keyPattern: { indexed_insert_ordered_bulk: 1.0 }, min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_ordered_bulk: 0.0 }, { indexed_insert_ordered_bulk: 5.0 }, { indexed_insert_ordered_bulk: 7.0 }, { indexed_insert_ordered_bulk: 9.0 }, { indexed_insert_ordered_bulk: 11.0 }, { indexed_insert_ordered_bulk: 13.0 }, { indexed_insert_ordered_bulk: 16.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb61aca4787b9985d1ca7') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db28.coll28 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.376-0400 m31100| 2015-07-09T13:57:47.288-0400 W SHARDING [conn36] could not acquire collection lock for db28.coll28 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db28.coll28 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.377-0400 m30998| 2015-07-09T13:57:47.289-0400 W SHARDING [conn180] splitChunk failed - cmd: { splitChunk: "db28.coll28", keyPattern: { indexed_insert_ordered_bulk: 1.0 }, min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_ordered_bulk: 0.0 }, { indexed_insert_ordered_bulk: 3.0 }, { indexed_insert_ordered_bulk: 5.0 }, { indexed_insert_ordered_bulk: 7.0 }, { indexed_insert_ordered_bulk: 8.0 }, { indexed_insert_ordered_bulk: 10.0 }, { indexed_insert_ordered_bulk: 11.0 }, { indexed_insert_ordered_bulk: 13.0 }, { indexed_insert_ordered_bulk: 15.0 }, { indexed_insert_ordered_bulk: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb61aca4787b9985d1ca7') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db28.coll28 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.378-0400 m31100| 2015-07-09T13:57:47.295-0400 I SHARDING [conn15] request split points lookup for chunk db28.coll28 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.378-0400 m31100| 2015-07-09T13:57:47.295-0400 I SHARDING [conn40] metadata of collection db28.coll28 already up to date (shard version : 1|0||559eb61aca4787b9985d1ca7, took 2ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.378-0400 m31100| 2015-07-09T13:57:47.295-0400 I SHARDING [conn36] request split points lookup for chunk db28.coll28 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.379-0400 m31100| 2015-07-09T13:57:47.295-0400 W SHARDING [conn15] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.379-0400 m31100| 2015-07-09T13:57:47.295-0400 W SHARDING [conn15] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.379-0400 m31100| 2015-07-09T13:57:47.295-0400 W SHARDING [conn15] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.379-0400 m31100| 2015-07-09T13:57:47.295-0400 W SHARDING [conn15] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.380-0400 m31100| 2015-07-09T13:57:47.295-0400 W SHARDING [conn15] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.380-0400 m31100| 2015-07-09T13:57:47.295-0400 W SHARDING [conn15] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.380-0400 m31100| 2015-07-09T13:57:47.295-0400 W SHARDING [conn15] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.380-0400 m31100| 2015-07-09T13:57:47.295-0400 W SHARDING [conn15] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.380-0400 m31100| 2015-07-09T13:57:47.295-0400 W SHARDING [conn36] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.380-0400 m31100| 2015-07-09T13:57:47.295-0400 W SHARDING [conn15] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.380-0400 m31100| 2015-07-09T13:57:47.295-0400 W SHARDING [conn36] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.381-0400 m31100| 2015-07-09T13:57:47.296-0400 W SHARDING [conn15] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 10.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.381-0400 m31100| 2015-07-09T13:57:47.296-0400 I SHARDING [conn40] splitChunk accepted at version 1|0||559eb61aca4787b9985d1ca7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.381-0400 m31100| 2015-07-09T13:57:47.296-0400 W SHARDING [conn36] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.383-0400 m31100| 2015-07-09T13:57:47.296-0400 W SHARDING [conn15] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 11.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.383-0400 m31100| 2015-07-09T13:57:47.296-0400 W SHARDING [conn36] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.384-0400 m31100| 2015-07-09T13:57:47.296-0400 W SHARDING [conn15] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 12.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.384-0400 m31100| 2015-07-09T13:57:47.296-0400 W SHARDING [conn36] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.384-0400 m31100| 2015-07-09T13:57:47.296-0400 W SHARDING [conn15] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 13.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.385-0400 m31100| 2015-07-09T13:57:47.297-0400 W SHARDING [conn36] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.385-0400 m31100| 2015-07-09T13:57:47.297-0400 W SHARDING [conn15] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 14.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.385-0400 m31100| 2015-07-09T13:57:47.298-0400 W SHARDING [conn36] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.385-0400 m31100| 2015-07-09T13:57:47.299-0400 W SHARDING [conn15] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 15.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.385-0400 m31100| 2015-07-09T13:57:47.299-0400 W SHARDING [conn36] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.386-0400 m31100| 2015-07-09T13:57:47.299-0400 W SHARDING [conn15] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 16.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.386-0400 m31100| 2015-07-09T13:57:47.299-0400 W SHARDING [conn36] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.386-0400 m31100| 2015-07-09T13:57:47.299-0400 W SHARDING [conn15] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 18.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.386-0400 m31100| 2015-07-09T13:57:47.300-0400 W SHARDING [conn36] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 10.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.386-0400 m31100| 2015-07-09T13:57:47.300-0400 W SHARDING [conn36] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 11.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.386-0400 m31100| 2015-07-09T13:57:47.300-0400 W SHARDING [conn36] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 12.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.386-0400 m31100| 2015-07-09T13:57:47.300-0400 W SHARDING [conn36] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 13.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.387-0400 m31100| 2015-07-09T13:57:47.300-0400 W SHARDING [conn36] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 14.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.387-0400 m31100| 2015-07-09T13:57:47.300-0400 W SHARDING [conn36] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 15.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.387-0400 m31100| 2015-07-09T13:57:47.300-0400 W SHARDING [conn36] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 16.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.388-0400 m31100| 2015-07-09T13:57:47.300-0400 W SHARDING [conn36] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 18.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.389-0400 m31100| 2015-07-09T13:57:47.306-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db28.coll28", keyPattern: { indexed_insert_ordered_bulk: 1.0 }, min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_ordered_bulk: 0.0 }, { indexed_insert_ordered_bulk: 2.0 }, { indexed_insert_ordered_bulk: 3.0 }, { indexed_insert_ordered_bulk: 4.0 }, { indexed_insert_ordered_bulk: 5.0 }, { indexed_insert_ordered_bulk: 6.0 }, { indexed_insert_ordered_bulk: 7.0 }, { indexed_insert_ordered_bulk: 8.0 }, { indexed_insert_ordered_bulk: 9.0 }, { indexed_insert_ordered_bulk: 10.0 }, { indexed_insert_ordered_bulk: 11.0 }, { indexed_insert_ordered_bulk: 12.0 }, { indexed_insert_ordered_bulk: 13.0 }, { indexed_insert_ordered_bulk: 14.0 }, { indexed_insert_ordered_bulk: 15.0 }, { indexed_insert_ordered_bulk: 16.0 }, { indexed_insert_ordered_bulk: 17.0 }, { indexed_insert_ordered_bulk: 18.0 }, { indexed_insert_ordered_bulk: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb61aca4787b9985d1ca7') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.390-0400 m31100| 2015-07-09T13:57:47.307-0400 I SHARDING [conn36] received splitChunk request: { splitChunk: "db28.coll28", keyPattern: { indexed_insert_ordered_bulk: 1.0 }, min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_ordered_bulk: 0.0 }, { indexed_insert_ordered_bulk: 2.0 }, { indexed_insert_ordered_bulk: 3.0 }, { indexed_insert_ordered_bulk: 4.0 }, { indexed_insert_ordered_bulk: 5.0 }, { indexed_insert_ordered_bulk: 6.0 }, { indexed_insert_ordered_bulk: 7.0 }, { indexed_insert_ordered_bulk: 8.0 }, { indexed_insert_ordered_bulk: 9.0 }, { indexed_insert_ordered_bulk: 10.0 }, { indexed_insert_ordered_bulk: 11.0 }, { indexed_insert_ordered_bulk: 12.0 }, { indexed_insert_ordered_bulk: 13.0 }, { indexed_insert_ordered_bulk: 14.0 }, { indexed_insert_ordered_bulk: 15.0 }, { indexed_insert_ordered_bulk: 16.0 }, { indexed_insert_ordered_bulk: 17.0 }, { indexed_insert_ordered_bulk: 18.0 }, { indexed_insert_ordered_bulk: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb61aca4787b9985d1ca7') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.395-0400 m31100| 2015-07-09T13:57:47.313-0400 I SHARDING [conn37] request split points lookup for chunk db28.coll28 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.395-0400 m31100| 2015-07-09T13:57:47.313-0400 I SHARDING [conn38] request split points lookup for chunk db28.coll28 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.395-0400 m31100| 2015-07-09T13:57:47.314-0400 W SHARDING [conn38] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.395-0400 m31100| 2015-07-09T13:57:47.314-0400 W SHARDING [conn38] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.395-0400 m31100| 2015-07-09T13:57:47.314-0400 W SHARDING [conn38] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.395-0400 m31100| 2015-07-09T13:57:47.314-0400 W SHARDING [conn38] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.396-0400 m31100| 2015-07-09T13:57:47.314-0400 W SHARDING [conn38] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.396-0400 m31100| 2015-07-09T13:57:47.314-0400 W SHARDING [conn38] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.396-0400 m31100| 2015-07-09T13:57:47.314-0400 W SHARDING [conn38] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.396-0400 m31100| 2015-07-09T13:57:47.314-0400 W SHARDING [conn38] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.396-0400 m31100| 2015-07-09T13:57:47.314-0400 W SHARDING [conn38] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.400-0400 m31100| 2015-07-09T13:57:47.314-0400 W SHARDING [conn38] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.400-0400 m31100| 2015-07-09T13:57:47.314-0400 W SHARDING [conn38] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 10.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.400-0400 m31100| 2015-07-09T13:57:47.314-0400 W SHARDING [conn37] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.400-0400 m31100| 2015-07-09T13:57:47.314-0400 W SHARDING [conn38] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 11.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.400-0400 m31100| 2015-07-09T13:57:47.314-0400 W SHARDING [conn37] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.402-0400 m31100| 2015-07-09T13:57:47.314-0400 W SHARDING [conn38] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 12.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.402-0400 m31100| 2015-07-09T13:57:47.315-0400 W SHARDING [conn37] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.402-0400 m31100| 2015-07-09T13:57:47.315-0400 W SHARDING [conn38] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 13.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.403-0400 m31100| 2015-07-09T13:57:47.315-0400 W SHARDING [conn37] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.403-0400 m31100| 2015-07-09T13:57:47.315-0400 W SHARDING [conn38] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 14.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.403-0400 m31100| 2015-07-09T13:57:47.315-0400 W SHARDING [conn37] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.403-0400 m31100| 2015-07-09T13:57:47.315-0400 W SHARDING [conn38] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 15.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.403-0400 m31100| 2015-07-09T13:57:47.317-0400 W SHARDING [conn37] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.403-0400 m31100| 2015-07-09T13:57:47.317-0400 W SHARDING [conn38] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 16.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.404-0400 m31100| 2015-07-09T13:57:47.317-0400 W SHARDING [conn37] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.404-0400 m31100| 2015-07-09T13:57:47.317-0400 W SHARDING [conn38] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 17.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.404-0400 m31100| 2015-07-09T13:57:47.317-0400 I SHARDING [conn132] request split points lookup for chunk db28.coll28 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.405-0400 m31100| 2015-07-09T13:57:47.317-0400 W SHARDING [conn37] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.405-0400 m31100| 2015-07-09T13:57:47.317-0400 I SHARDING [conn34] request split points lookup for chunk db28.coll28 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.407-0400 m31100| 2015-07-09T13:57:47.317-0400 W SHARDING [conn38] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 18.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.407-0400 m31100| 2015-07-09T13:57:47.318-0400 W SHARDING [conn37] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.407-0400 m31100| 2015-07-09T13:57:47.318-0400 W SHARDING [conn38] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 19.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.407-0400 m31100| 2015-07-09T13:57:47.318-0400 W SHARDING [conn37] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.408-0400 m31100| 2015-07-09T13:57:47.318-0400 W SHARDING [conn37] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 10.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.408-0400 m31100| 2015-07-09T13:57:47.318-0400 W SHARDING [conn37] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 11.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.408-0400 m31100| 2015-07-09T13:57:47.318-0400 W SHARDING [conn37] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 12.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.408-0400 m31100| 2015-07-09T13:57:47.318-0400 W SHARDING [conn37] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 13.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.408-0400 m31100| 2015-07-09T13:57:47.318-0400 W SHARDING [conn37] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 14.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.408-0400 m31100| 2015-07-09T13:57:47.318-0400 W SHARDING [conn37] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 15.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.409-0400 m31100| 2015-07-09T13:57:47.318-0400 W SHARDING [conn37] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 16.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.409-0400 m31100| 2015-07-09T13:57:47.318-0400 W SHARDING [conn37] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 17.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.409-0400 m31100| 2015-07-09T13:57:47.319-0400 W SHARDING [conn37] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 18.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.409-0400 m31100| 2015-07-09T13:57:47.319-0400 W SHARDING [conn37] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 19.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.409-0400 m31100| 2015-07-09T13:57:47.319-0400 W SHARDING [conn132] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.409-0400 m31100| 2015-07-09T13:57:47.319-0400 W SHARDING [conn132] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.410-0400 m31100| 2015-07-09T13:57:47.319-0400 W SHARDING [conn132] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.410-0400 m31100| 2015-07-09T13:57:47.319-0400 W SHARDING [conn132] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.410-0400 m31100| 2015-07-09T13:57:47.319-0400 W SHARDING [conn132] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.413-0400 m31100| 2015-07-09T13:57:47.319-0400 W SHARDING [conn132] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.413-0400 m31100| 2015-07-09T13:57:47.319-0400 W SHARDING [conn132] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.413-0400 m31100| 2015-07-09T13:57:47.319-0400 W SHARDING [conn132] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.414-0400 m31100| 2015-07-09T13:57:47.319-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db28.coll28", keyPattern: { indexed_insert_ordered_bulk: 1.0 }, min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_ordered_bulk: 0.0 }, { indexed_insert_ordered_bulk: 2.0 }, { indexed_insert_ordered_bulk: 3.0 }, { indexed_insert_ordered_bulk: 4.0 }, { indexed_insert_ordered_bulk: 5.0 }, { indexed_insert_ordered_bulk: 6.0 }, { indexed_insert_ordered_bulk: 7.0 }, { indexed_insert_ordered_bulk: 8.0 }, { indexed_insert_ordered_bulk: 9.0 }, { indexed_insert_ordered_bulk: 10.0 }, { indexed_insert_ordered_bulk: 11.0 }, { indexed_insert_ordered_bulk: 12.0 }, { indexed_insert_ordered_bulk: 13.0 }, { indexed_insert_ordered_bulk: 14.0 }, { indexed_insert_ordered_bulk: 15.0 }, { indexed_insert_ordered_bulk: 16.0 }, { indexed_insert_ordered_bulk: 17.0 }, { indexed_insert_ordered_bulk: 18.0 }, { indexed_insert_ordered_bulk: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb61aca4787b9985d1ca7') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.414-0400 m31100| 2015-07-09T13:57:47.319-0400 W SHARDING [conn132] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.420-0400 m31100| 2015-07-09T13:57:47.320-0400 W SHARDING [conn132] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.420-0400 m31100| 2015-07-09T13:57:47.320-0400 W SHARDING [conn132] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 10.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.420-0400 m31100| 2015-07-09T13:57:47.320-0400 W SHARDING [conn132] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 11.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.420-0400 m31100| 2015-07-09T13:57:47.320-0400 W SHARDING [conn132] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 12.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.421-0400 m31100| 2015-07-09T13:57:47.320-0400 W SHARDING [conn132] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 13.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.421-0400 m31100| 2015-07-09T13:57:47.320-0400 W SHARDING [conn132] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 14.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.421-0400 m31100| 2015-07-09T13:57:47.320-0400 W SHARDING [conn132] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 15.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.421-0400 m31100| 2015-07-09T13:57:47.320-0400 W SHARDING [conn132] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 16.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.421-0400 m31100| 2015-07-09T13:57:47.320-0400 W SHARDING [conn132] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 17.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.422-0400 m31100| 2015-07-09T13:57:47.320-0400 W SHARDING [conn132] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 18.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.423-0400 m31100| 2015-07-09T13:57:47.320-0400 W SHARDING [conn132] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 19.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.427-0400 m31100| 2015-07-09T13:57:47.320-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db28.coll28", keyPattern: { indexed_insert_ordered_bulk: 1.0 }, min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_ordered_bulk: 0.0 }, { indexed_insert_ordered_bulk: 2.0 }, { indexed_insert_ordered_bulk: 4.0 }, { indexed_insert_ordered_bulk: 5.0 }, { indexed_insert_ordered_bulk: 6.0 }, { indexed_insert_ordered_bulk: 7.0 }, { indexed_insert_ordered_bulk: 9.0 }, { indexed_insert_ordered_bulk: 10.0 }, { indexed_insert_ordered_bulk: 11.0 }, { indexed_insert_ordered_bulk: 12.0 }, { indexed_insert_ordered_bulk: 14.0 }, { indexed_insert_ordered_bulk: 16.0 }, { indexed_insert_ordered_bulk: 17.0 }, { indexed_insert_ordered_bulk: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb61aca4787b9985d1ca7') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.428-0400 m31100| 2015-07-09T13:57:47.321-0400 I SHARDING [conn132] received splitChunk request: { splitChunk: "db28.coll28", keyPattern: { indexed_insert_ordered_bulk: 1.0 }, min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_ordered_bulk: 0.0 }, { indexed_insert_ordered_bulk: 2.0 }, { indexed_insert_ordered_bulk: 3.0 }, { indexed_insert_ordered_bulk: 4.0 }, { indexed_insert_ordered_bulk: 5.0 }, { indexed_insert_ordered_bulk: 6.0 }, { indexed_insert_ordered_bulk: 7.0 }, { indexed_insert_ordered_bulk: 8.0 }, { indexed_insert_ordered_bulk: 9.0 }, { indexed_insert_ordered_bulk: 10.0 }, { indexed_insert_ordered_bulk: 11.0 }, { indexed_insert_ordered_bulk: 12.0 }, { indexed_insert_ordered_bulk: 13.0 }, { indexed_insert_ordered_bulk: 14.0 }, { indexed_insert_ordered_bulk: 15.0 }, { indexed_insert_ordered_bulk: 16.0 }, { indexed_insert_ordered_bulk: 17.0 }, { indexed_insert_ordered_bulk: 18.0 }, { indexed_insert_ordered_bulk: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb61aca4787b9985d1ca7') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.429-0400 m31100| 2015-07-09T13:57:47.321-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db28.coll28", keyPattern: { indexed_insert_ordered_bulk: 1.0 }, min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_ordered_bulk: 0.0 }, { indexed_insert_ordered_bulk: 2.0 }, { indexed_insert_ordered_bulk: 3.0 }, { indexed_insert_ordered_bulk: 4.0 }, { indexed_insert_ordered_bulk: 5.0 }, { indexed_insert_ordered_bulk: 6.0 }, { indexed_insert_ordered_bulk: 7.0 }, { indexed_insert_ordered_bulk: 8.0 }, { indexed_insert_ordered_bulk: 9.0 }, { indexed_insert_ordered_bulk: 10.0 }, { indexed_insert_ordered_bulk: 11.0 }, { indexed_insert_ordered_bulk: 12.0 }, { indexed_insert_ordered_bulk: 13.0 }, { indexed_insert_ordered_bulk: 14.0 }, { indexed_insert_ordered_bulk: 15.0 }, { indexed_insert_ordered_bulk: 16.0 }, { indexed_insert_ordered_bulk: 17.0 }, { indexed_insert_ordered_bulk: 18.0 }, { indexed_insert_ordered_bulk: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb61aca4787b9985d1ca7') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.429-0400 m29000| 2015-07-09T13:57:47.323-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63257 #51 (51 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.429-0400 m31100| 2015-07-09T13:57:47.324-0400 I SHARDING [conn35] request split points lookup for chunk db28.coll28 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.429-0400 m31100| 2015-07-09T13:57:47.325-0400 I SHARDING [conn39] request split points lookup for chunk db28.coll28 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.429-0400 m31100| 2015-07-09T13:57:47.326-0400 W SHARDING [conn35] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 10.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.430-0400 m31100| 2015-07-09T13:57:47.326-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db28.coll28", keyPattern: { indexed_insert_ordered_bulk: 1.0 }, min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_ordered_bulk: 0.0 }, { indexed_insert_ordered_bulk: 2.0 }, { indexed_insert_ordered_bulk: 3.0 }, { indexed_insert_ordered_bulk: 5.0 }, { indexed_insert_ordered_bulk: 6.0 }, { indexed_insert_ordered_bulk: 7.0 }, { indexed_insert_ordered_bulk: 8.0 }, { indexed_insert_ordered_bulk: 10.0 }, { indexed_insert_ordered_bulk: 11.0 }, { indexed_insert_ordered_bulk: 12.0 }, { indexed_insert_ordered_bulk: 13.0 }, { indexed_insert_ordered_bulk: 15.0 }, { indexed_insert_ordered_bulk: 16.0 }, { indexed_insert_ordered_bulk: 18.0 }, { indexed_insert_ordered_bulk: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb61aca4787b9985d1ca7') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.430-0400 m31100| 2015-07-09T13:57:47.328-0400 I SHARDING [conn39] received splitChunk request: { splitChunk: "db28.coll28", keyPattern: { indexed_insert_ordered_bulk: 1.0 }, min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_ordered_bulk: 0.0 }, { indexed_insert_ordered_bulk: 3.0 }, { indexed_insert_ordered_bulk: 5.0 }, { indexed_insert_ordered_bulk: 6.0 }, { indexed_insert_ordered_bulk: 7.0 }, { indexed_insert_ordered_bulk: 10.0 }, { indexed_insert_ordered_bulk: 11.0 }, { indexed_insert_ordered_bulk: 12.0 }, { indexed_insert_ordered_bulk: 14.0 }, { indexed_insert_ordered_bulk: 16.0 }, { indexed_insert_ordered_bulk: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb61aca4787b9985d1ca7') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.431-0400 m29000| 2015-07-09T13:57:47.329-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63258 #52 (52 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.431-0400 m29000| 2015-07-09T13:57:47.330-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63259 #53 (53 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.431-0400 m31100| 2015-07-09T13:57:47.332-0400 I SHARDING [conn32] request split points lookup for chunk db28.coll28 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.434-0400 m31100| 2015-07-09T13:57:47.335-0400 I SHARDING [conn32] received splitChunk request: { splitChunk: "db28.coll28", keyPattern: { indexed_insert_ordered_bulk: 1.0 }, min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_ordered_bulk: 0.0 }, { indexed_insert_ordered_bulk: 3.0 }, { indexed_insert_ordered_bulk: 5.0 }, { indexed_insert_ordered_bulk: 7.0 }, { indexed_insert_ordered_bulk: 9.0 }, { indexed_insert_ordered_bulk: 10.0 }, { indexed_insert_ordered_bulk: 12.0 }, { indexed_insert_ordered_bulk: 14.0 }, { indexed_insert_ordered_bulk: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb61aca4787b9985d1ca7') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.435-0400 m29000| 2015-07-09T13:57:47.336-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63260 #54 (54 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.435-0400 m30998| 2015-07-09T13:57:47.388-0400 I SHARDING [conn180] ChunkManager: time to load chunks for db28.coll28: 0ms sequenceNumber: 33 version: 1|19||559eb61aca4787b9985d1ca7 based on: 1|0||559eb61aca4787b9985d1ca7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.436-0400 m30998| 2015-07-09T13:57:47.389-0400 W SHARDING [conn182] splitChunk failed - cmd: { splitChunk: "db28.coll28", keyPattern: { indexed_insert_ordered_bulk: 1.0 }, min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_ordered_bulk: 0.0 }, { indexed_insert_ordered_bulk: 3.0 }, { indexed_insert_ordered_bulk: 5.0 }, { indexed_insert_ordered_bulk: 6.0 }, { indexed_insert_ordered_bulk: 7.0 }, { indexed_insert_ordered_bulk: 10.0 }, { indexed_insert_ordered_bulk: 11.0 }, { indexed_insert_ordered_bulk: 12.0 }, { indexed_insert_ordered_bulk: 14.0 }, { indexed_insert_ordered_bulk: 16.0 }, { indexed_insert_ordered_bulk: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb61aca4787b9985d1ca7') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db28.coll28 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.437-0400 m30999| 2015-07-09T13:57:47.390-0400 W SHARDING [conn181] splitChunk failed - cmd: { splitChunk: "db28.coll28", keyPattern: { indexed_insert_ordered_bulk: 1.0 }, min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_ordered_bulk: 0.0 }, { indexed_insert_ordered_bulk: 2.0 }, { indexed_insert_ordered_bulk: 3.0 }, { indexed_insert_ordered_bulk: 4.0 }, { indexed_insert_ordered_bulk: 5.0 }, { indexed_insert_ordered_bulk: 6.0 }, { indexed_insert_ordered_bulk: 7.0 }, { indexed_insert_ordered_bulk: 8.0 }, { indexed_insert_ordered_bulk: 9.0 }, { indexed_insert_ordered_bulk: 10.0 }, { indexed_insert_ordered_bulk: 11.0 }, { indexed_insert_ordered_bulk: 12.0 }, { indexed_insert_ordered_bulk: 13.0 }, { indexed_insert_ordered_bulk: 14.0 }, { indexed_insert_ordered_bulk: 15.0 }, { indexed_insert_ordered_bulk: 16.0 }, { indexed_insert_ordered_bulk: 17.0 }, { indexed_insert_ordered_bulk: 18.0 }, { indexed_insert_ordered_bulk: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb61aca4787b9985d1ca7') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db28.coll28 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.439-0400 m30998| 2015-07-09T13:57:47.389-0400 W SHARDING [conn184] splitChunk failed - cmd: { splitChunk: "db28.coll28", keyPattern: { indexed_insert_ordered_bulk: 1.0 }, min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_ordered_bulk: 0.0 }, { indexed_insert_ordered_bulk: 2.0 }, { indexed_insert_ordered_bulk: 3.0 }, { indexed_insert_ordered_bulk: 5.0 }, { indexed_insert_ordered_bulk: 6.0 }, { indexed_insert_ordered_bulk: 7.0 }, { indexed_insert_ordered_bulk: 8.0 }, { indexed_insert_ordered_bulk: 10.0 }, { indexed_insert_ordered_bulk: 11.0 }, { indexed_insert_ordered_bulk: 12.0 }, { indexed_insert_ordered_bulk: 13.0 }, { indexed_insert_ordered_bulk: 15.0 }, { indexed_insert_ordered_bulk: 16.0 }, { indexed_insert_ordered_bulk: 18.0 }, { indexed_insert_ordered_bulk: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb61aca4787b9985d1ca7') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db28.coll28 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.439-0400 m31100| 2015-07-09T13:57:47.389-0400 W SHARDING [conn39] could not acquire collection lock for db28.coll28 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db28.coll28 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.439-0400 m31100| 2015-07-09T13:57:47.389-0400 W SHARDING [conn35] could not acquire collection lock for db28.coll28 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db28.coll28 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.440-0400 m31100| 2015-07-09T13:57:47.390-0400 W SHARDING [conn15] could not acquire collection lock for db28.coll28 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db28.coll28 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.441-0400 m31100| 2015-07-09T13:57:47.390-0400 W SHARDING [conn38] could not acquire collection lock for db28.coll28 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db28.coll28 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.442-0400 m30999| 2015-07-09T13:57:47.390-0400 W SHARDING [conn180] splitChunk failed - cmd: { splitChunk: "db28.coll28", keyPattern: { indexed_insert_ordered_bulk: 1.0 }, min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_ordered_bulk: 0.0 }, { indexed_insert_ordered_bulk: 2.0 }, { indexed_insert_ordered_bulk: 3.0 }, { indexed_insert_ordered_bulk: 4.0 }, { indexed_insert_ordered_bulk: 5.0 }, { indexed_insert_ordered_bulk: 6.0 }, { indexed_insert_ordered_bulk: 7.0 }, { indexed_insert_ordered_bulk: 8.0 }, { indexed_insert_ordered_bulk: 9.0 }, { indexed_insert_ordered_bulk: 10.0 }, { indexed_insert_ordered_bulk: 11.0 }, { indexed_insert_ordered_bulk: 12.0 }, { indexed_insert_ordered_bulk: 13.0 }, { indexed_insert_ordered_bulk: 14.0 }, { indexed_insert_ordered_bulk: 15.0 }, { indexed_insert_ordered_bulk: 16.0 }, { indexed_insert_ordered_bulk: 17.0 }, { indexed_insert_ordered_bulk: 18.0 }, { indexed_insert_ordered_bulk: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb61aca4787b9985d1ca7') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db28.coll28 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.443-0400 m30999| 2015-07-09T13:57:47.391-0400 W SHARDING [conn183] splitChunk failed - cmd: { splitChunk: "db28.coll28", keyPattern: { indexed_insert_ordered_bulk: 1.0 }, min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_ordered_bulk: 0.0 }, { indexed_insert_ordered_bulk: 2.0 }, { indexed_insert_ordered_bulk: 3.0 }, { indexed_insert_ordered_bulk: 4.0 }, { indexed_insert_ordered_bulk: 5.0 }, { indexed_insert_ordered_bulk: 6.0 }, { indexed_insert_ordered_bulk: 7.0 }, { indexed_insert_ordered_bulk: 8.0 }, { indexed_insert_ordered_bulk: 9.0 }, { indexed_insert_ordered_bulk: 10.0 }, { indexed_insert_ordered_bulk: 11.0 }, { indexed_insert_ordered_bulk: 12.0 }, { indexed_insert_ordered_bulk: 13.0 }, { indexed_insert_ordered_bulk: 14.0 }, { indexed_insert_ordered_bulk: 15.0 }, { indexed_insert_ordered_bulk: 16.0 }, { indexed_insert_ordered_bulk: 17.0 }, { indexed_insert_ordered_bulk: 18.0 }, { indexed_insert_ordered_bulk: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb61aca4787b9985d1ca7') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db28.coll28 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.443-0400 m29000| 2015-07-09T13:57:47.391-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63261 #55 (55 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.444-0400 m31100| 2015-07-09T13:57:47.390-0400 W SHARDING [conn37] could not acquire collection lock for db28.coll28 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db28.coll28 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.444-0400 m29000| 2015-07-09T13:57:47.395-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63262 #56 (56 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.444-0400 m31100| 2015-07-09T13:57:47.396-0400 W SHARDING [conn34] could not acquire collection lock for db28.coll28 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db28.coll28 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.444-0400 m29000| 2015-07-09T13:57:47.396-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63263 #57 (57 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.445-0400 m30999| 2015-07-09T13:57:47.396-0400 W SHARDING [conn178] splitChunk failed - cmd: { splitChunk: "db28.coll28", keyPattern: { indexed_insert_ordered_bulk: 1.0 }, min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_ordered_bulk: 0.0 }, { indexed_insert_ordered_bulk: 2.0 }, { indexed_insert_ordered_bulk: 4.0 }, { indexed_insert_ordered_bulk: 5.0 }, { indexed_insert_ordered_bulk: 6.0 }, { indexed_insert_ordered_bulk: 7.0 }, { indexed_insert_ordered_bulk: 9.0 }, { indexed_insert_ordered_bulk: 10.0 }, { indexed_insert_ordered_bulk: 11.0 }, { indexed_insert_ordered_bulk: 12.0 }, { indexed_insert_ordered_bulk: 14.0 }, { indexed_insert_ordered_bulk: 16.0 }, { indexed_insert_ordered_bulk: 17.0 }, { indexed_insert_ordered_bulk: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb61aca4787b9985d1ca7') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db28.coll28 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.445-0400 m29000| 2015-07-09T13:57:47.397-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63264 #58 (58 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.445-0400 m31100| 2015-07-09T13:57:47.397-0400 W SHARDING [conn36] could not acquire collection lock for db28.coll28 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db28.coll28 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.447-0400 m30998| 2015-07-09T13:57:47.397-0400 W SHARDING [conn179] splitChunk failed - cmd: { splitChunk: "db28.coll28", keyPattern: { indexed_insert_ordered_bulk: 1.0 }, min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_ordered_bulk: 0.0 }, { indexed_insert_ordered_bulk: 2.0 }, { indexed_insert_ordered_bulk: 3.0 }, { indexed_insert_ordered_bulk: 4.0 }, { indexed_insert_ordered_bulk: 5.0 }, { indexed_insert_ordered_bulk: 6.0 }, { indexed_insert_ordered_bulk: 7.0 }, { indexed_insert_ordered_bulk: 8.0 }, { indexed_insert_ordered_bulk: 9.0 }, { indexed_insert_ordered_bulk: 10.0 }, { indexed_insert_ordered_bulk: 11.0 }, { indexed_insert_ordered_bulk: 12.0 }, { indexed_insert_ordered_bulk: 13.0 }, { indexed_insert_ordered_bulk: 14.0 }, { indexed_insert_ordered_bulk: 15.0 }, { indexed_insert_ordered_bulk: 16.0 }, { indexed_insert_ordered_bulk: 17.0 }, { indexed_insert_ordered_bulk: 18.0 }, { indexed_insert_ordered_bulk: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb61aca4787b9985d1ca7') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db28.coll28 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.448-0400 m30998| 2015-07-09T13:57:47.399-0400 W SHARDING [conn185] splitChunk failed - cmd: { splitChunk: "db28.coll28", keyPattern: { indexed_insert_ordered_bulk: 1.0 }, min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_ordered_bulk: 0.0 }, { indexed_insert_ordered_bulk: 3.0 }, { indexed_insert_ordered_bulk: 5.0 }, { indexed_insert_ordered_bulk: 7.0 }, { indexed_insert_ordered_bulk: 9.0 }, { indexed_insert_ordered_bulk: 10.0 }, { indexed_insert_ordered_bulk: 12.0 }, { indexed_insert_ordered_bulk: 14.0 }, { indexed_insert_ordered_bulk: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb61aca4787b9985d1ca7') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db28.coll28 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.448-0400 m31100| 2015-07-09T13:57:47.398-0400 W SHARDING [conn32] could not acquire collection lock for db28.coll28 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db28.coll28 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.448-0400 m30999| 2015-07-09T13:57:47.399-0400 I SHARDING [conn178] ChunkManager: time to load chunks for db28.coll28: 0ms sequenceNumber: 129 version: 1|19||559eb61aca4787b9985d1ca7 based on: 1|0||559eb61aca4787b9985d1ca7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.449-0400 m31100| 2015-07-09T13:57:47.402-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:47.402-0400-559eb61b792e00bb6727494e", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436464667402), what: "multi-split", ns: "db28.coll28", details: { before: { min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey } }, number: 1, of: 19, chunk: { min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: 0.0 }, lastmod: Timestamp 1000|1, lastmodEpoch: ObjectId('559eb61aca4787b9985d1ca7') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.449-0400 m31100| 2015-07-09T13:57:47.403-0400 I SHARDING [conn37] request split points lookup for chunk db28.coll28 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.449-0400 m31100| 2015-07-09T13:57:47.403-0400 I SHARDING [conn34] request split points lookup for chunk db28.coll28 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.449-0400 m31100| 2015-07-09T13:57:47.404-0400 W SHARDING [conn132] could not acquire collection lock for db28.coll28 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db28.coll28 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.449-0400 m31100| 2015-07-09T13:57:47.404-0400 W SHARDING [conn34] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.449-0400 m31100| 2015-07-09T13:57:47.404-0400 W SHARDING [conn34] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.450-0400 m31100| 2015-07-09T13:57:47.404-0400 W SHARDING [conn34] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.450-0400 m31100| 2015-07-09T13:57:47.404-0400 W SHARDING [conn37] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.450-0400 m31100| 2015-07-09T13:57:47.404-0400 W SHARDING [conn34] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.450-0400 m31100| 2015-07-09T13:57:47.404-0400 W SHARDING [conn37] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.454-0400 m31100| 2015-07-09T13:57:47.404-0400 W SHARDING [conn34] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.454-0400 m31100| 2015-07-09T13:57:47.404-0400 W SHARDING [conn37] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.458-0400 m30998| 2015-07-09T13:57:47.405-0400 W SHARDING [conn181] splitChunk failed - cmd: { splitChunk: "db28.coll28", keyPattern: { indexed_insert_ordered_bulk: 1.0 }, min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_ordered_bulk: 0.0 }, { indexed_insert_ordered_bulk: 2.0 }, { indexed_insert_ordered_bulk: 3.0 }, { indexed_insert_ordered_bulk: 4.0 }, { indexed_insert_ordered_bulk: 5.0 }, { indexed_insert_ordered_bulk: 6.0 }, { indexed_insert_ordered_bulk: 7.0 }, { indexed_insert_ordered_bulk: 8.0 }, { indexed_insert_ordered_bulk: 9.0 }, { indexed_insert_ordered_bulk: 10.0 }, { indexed_insert_ordered_bulk: 11.0 }, { indexed_insert_ordered_bulk: 12.0 }, { indexed_insert_ordered_bulk: 13.0 }, { indexed_insert_ordered_bulk: 14.0 }, { indexed_insert_ordered_bulk: 15.0 }, { indexed_insert_ordered_bulk: 16.0 }, { indexed_insert_ordered_bulk: 17.0 }, { indexed_insert_ordered_bulk: 18.0 }, { indexed_insert_ordered_bulk: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb61aca4787b9985d1ca7') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db28.coll28 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.458-0400 m31100| 2015-07-09T13:57:47.404-0400 W SHARDING [conn34] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.459-0400 m31100| 2015-07-09T13:57:47.405-0400 W SHARDING [conn37] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.459-0400 m31100| 2015-07-09T13:57:47.405-0400 W SHARDING [conn34] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.460-0400 m31100| 2015-07-09T13:57:47.405-0400 W SHARDING [conn37] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.460-0400 m31100| 2015-07-09T13:57:47.405-0400 W SHARDING [conn34] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.460-0400 m31100| 2015-07-09T13:57:47.405-0400 W SHARDING [conn37] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.460-0400 m31100| 2015-07-09T13:57:47.405-0400 W SHARDING [conn34] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.461-0400 m31100| 2015-07-09T13:57:47.405-0400 W SHARDING [conn37] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.461-0400 m31100| 2015-07-09T13:57:47.405-0400 W SHARDING [conn34] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.461-0400 m31100| 2015-07-09T13:57:47.405-0400 W SHARDING [conn37] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.461-0400 m31100| 2015-07-09T13:57:47.406-0400 W SHARDING [conn34] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 10.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.461-0400 m31100| 2015-07-09T13:57:47.406-0400 W SHARDING [conn37] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.461-0400 m31100| 2015-07-09T13:57:47.406-0400 W SHARDING [conn34] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 11.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.461-0400 m31100| 2015-07-09T13:57:47.406-0400 W SHARDING [conn37] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.462-0400 m31100| 2015-07-09T13:57:47.406-0400 W SHARDING [conn34] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 12.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.462-0400 m31100| 2015-07-09T13:57:47.406-0400 W SHARDING [conn37] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 10.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.462-0400 m31100| 2015-07-09T13:57:47.406-0400 W SHARDING [conn34] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 13.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.462-0400 m31100| 2015-07-09T13:57:47.406-0400 W SHARDING [conn37] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 11.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.462-0400 m31100| 2015-07-09T13:57:47.406-0400 W SHARDING [conn34] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 14.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.462-0400 m31100| 2015-07-09T13:57:47.406-0400 W SHARDING [conn37] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 12.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.462-0400 m31100| 2015-07-09T13:57:47.406-0400 W SHARDING [conn34] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 15.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.463-0400 m31100| 2015-07-09T13:57:47.406-0400 W SHARDING [conn37] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 13.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.463-0400 m31100| 2015-07-09T13:57:47.406-0400 W SHARDING [conn34] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 16.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.465-0400 m31100| 2015-07-09T13:57:47.407-0400 W SHARDING [conn34] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 17.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.465-0400 m31100| 2015-07-09T13:57:47.407-0400 W SHARDING [conn34] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 18.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.465-0400 m31100| 2015-07-09T13:57:47.407-0400 W SHARDING [conn37] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 14.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.465-0400 m31100| 2015-07-09T13:57:47.407-0400 W SHARDING [conn34] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 19.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.466-0400 m31100| 2015-07-09T13:57:47.407-0400 W SHARDING [conn37] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 15.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.466-0400 m31100| 2015-07-09T13:57:47.407-0400 W SHARDING [conn37] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 16.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.466-0400 m31100| 2015-07-09T13:57:47.407-0400 W SHARDING [conn37] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 17.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.466-0400 m31100| 2015-07-09T13:57:47.407-0400 W SHARDING [conn37] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 18.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.466-0400 m31100| 2015-07-09T13:57:47.407-0400 W SHARDING [conn37] possible low cardinality key detected in db28.coll28 - key is { indexed_insert_ordered_bulk: 19.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.467-0400 m31100| 2015-07-09T13:57:47.417-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db28.coll28", keyPattern: { indexed_insert_ordered_bulk: 1.0 }, min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_ordered_bulk: 0.0 }, { indexed_insert_ordered_bulk: 2.0 }, { indexed_insert_ordered_bulk: 3.0 }, { indexed_insert_ordered_bulk: 4.0 }, { indexed_insert_ordered_bulk: 5.0 }, { indexed_insert_ordered_bulk: 6.0 }, { indexed_insert_ordered_bulk: 7.0 }, { indexed_insert_ordered_bulk: 8.0 }, { indexed_insert_ordered_bulk: 9.0 }, { indexed_insert_ordered_bulk: 10.0 }, { indexed_insert_ordered_bulk: 11.0 }, { indexed_insert_ordered_bulk: 12.0 }, { indexed_insert_ordered_bulk: 13.0 }, { indexed_insert_ordered_bulk: 14.0 }, { indexed_insert_ordered_bulk: 15.0 }, { indexed_insert_ordered_bulk: 16.0 }, { indexed_insert_ordered_bulk: 17.0 }, { indexed_insert_ordered_bulk: 18.0 }, { indexed_insert_ordered_bulk: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb61aca4787b9985d1ca7') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.468-0400 m31100| 2015-07-09T13:57:47.418-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db28.coll28", keyPattern: { indexed_insert_ordered_bulk: 1.0 }, min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_ordered_bulk: 0.0 }, { indexed_insert_ordered_bulk: 2.0 }, { indexed_insert_ordered_bulk: 3.0 }, { indexed_insert_ordered_bulk: 4.0 }, { indexed_insert_ordered_bulk: 5.0 }, { indexed_insert_ordered_bulk: 6.0 }, { indexed_insert_ordered_bulk: 7.0 }, { indexed_insert_ordered_bulk: 8.0 }, { indexed_insert_ordered_bulk: 9.0 }, { indexed_insert_ordered_bulk: 10.0 }, { indexed_insert_ordered_bulk: 11.0 }, { indexed_insert_ordered_bulk: 12.0 }, { indexed_insert_ordered_bulk: 13.0 }, { indexed_insert_ordered_bulk: 14.0 }, { indexed_insert_ordered_bulk: 15.0 }, { indexed_insert_ordered_bulk: 16.0 }, { indexed_insert_ordered_bulk: 17.0 }, { indexed_insert_ordered_bulk: 18.0 }, { indexed_insert_ordered_bulk: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb61aca4787b9985d1ca7') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.468-0400 m31100| 2015-07-09T13:57:47.420-0400 W SHARDING [conn37] could not acquire collection lock for db28.coll28 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db28.coll28 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.469-0400 m30999| 2015-07-09T13:57:47.421-0400 W SHARDING [conn177] splitChunk failed - cmd: { splitChunk: "db28.coll28", keyPattern: { indexed_insert_ordered_bulk: 1.0 }, min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_ordered_bulk: 0.0 }, { indexed_insert_ordered_bulk: 2.0 }, { indexed_insert_ordered_bulk: 3.0 }, { indexed_insert_ordered_bulk: 4.0 }, { indexed_insert_ordered_bulk: 5.0 }, { indexed_insert_ordered_bulk: 6.0 }, { indexed_insert_ordered_bulk: 7.0 }, { indexed_insert_ordered_bulk: 8.0 }, { indexed_insert_ordered_bulk: 9.0 }, { indexed_insert_ordered_bulk: 10.0 }, { indexed_insert_ordered_bulk: 11.0 }, { indexed_insert_ordered_bulk: 12.0 }, { indexed_insert_ordered_bulk: 13.0 }, { indexed_insert_ordered_bulk: 14.0 }, { indexed_insert_ordered_bulk: 15.0 }, { indexed_insert_ordered_bulk: 16.0 }, { indexed_insert_ordered_bulk: 17.0 }, { indexed_insert_ordered_bulk: 18.0 }, { indexed_insert_ordered_bulk: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb61aca4787b9985d1ca7') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db28.coll28 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.469-0400 m31100| 2015-07-09T13:57:47.422-0400 W SHARDING [conn34] could not acquire collection lock for db28.coll28 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db28.coll28 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.470-0400 m30999| 2015-07-09T13:57:47.423-0400 W SHARDING [conn179] splitChunk failed - cmd: { splitChunk: "db28.coll28", keyPattern: { indexed_insert_ordered_bulk: 1.0 }, min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_ordered_bulk: 0.0 }, { indexed_insert_ordered_bulk: 2.0 }, { indexed_insert_ordered_bulk: 3.0 }, { indexed_insert_ordered_bulk: 4.0 }, { indexed_insert_ordered_bulk: 5.0 }, { indexed_insert_ordered_bulk: 6.0 }, { indexed_insert_ordered_bulk: 7.0 }, { indexed_insert_ordered_bulk: 8.0 }, { indexed_insert_ordered_bulk: 9.0 }, { indexed_insert_ordered_bulk: 10.0 }, { indexed_insert_ordered_bulk: 11.0 }, { indexed_insert_ordered_bulk: 12.0 }, { indexed_insert_ordered_bulk: 13.0 }, { indexed_insert_ordered_bulk: 14.0 }, { indexed_insert_ordered_bulk: 15.0 }, { indexed_insert_ordered_bulk: 16.0 }, { indexed_insert_ordered_bulk: 17.0 }, { indexed_insert_ordered_bulk: 18.0 }, { indexed_insert_ordered_bulk: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb61aca4787b9985d1ca7') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db28.coll28 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.470-0400 m31100| 2015-07-09T13:57:47.454-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:47.454-0400-559eb61b792e00bb6727494f", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436464667454), what: "multi-split", ns: "db28.coll28", details: { before: { min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey } }, number: 2, of: 19, chunk: { min: { indexed_insert_ordered_bulk: 0.0 }, max: { indexed_insert_ordered_bulk: 2.0 }, lastmod: Timestamp 1000|2, lastmodEpoch: ObjectId('559eb61aca4787b9985d1ca7') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.507-0400 m31100| 2015-07-09T13:57:47.507-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:47.507-0400-559eb61b792e00bb67274950", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436464667507), what: "multi-split", ns: "db28.coll28", details: { before: { min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey } }, number: 3, of: 19, chunk: { min: { indexed_insert_ordered_bulk: 2.0 }, max: { indexed_insert_ordered_bulk: 3.0 }, lastmod: Timestamp 1000|3, lastmodEpoch: ObjectId('559eb61aca4787b9985d1ca7') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.562-0400 m31100| 2015-07-09T13:57:47.560-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:47.560-0400-559eb61b792e00bb67274951", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436464667560), what: "multi-split", ns: "db28.coll28", details: { before: { min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey } }, number: 4, of: 19, chunk: { min: { indexed_insert_ordered_bulk: 3.0 }, max: { indexed_insert_ordered_bulk: 4.0 }, lastmod: Timestamp 1000|4, lastmodEpoch: ObjectId('559eb61aca4787b9985d1ca7') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.613-0400 m31100| 2015-07-09T13:57:47.612-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:47.612-0400-559eb61b792e00bb67274952", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436464667612), what: "multi-split", ns: "db28.coll28", details: { before: { min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey } }, number: 5, of: 19, chunk: { min: { indexed_insert_ordered_bulk: 4.0 }, max: { indexed_insert_ordered_bulk: 5.0 }, lastmod: Timestamp 1000|5, lastmodEpoch: ObjectId('559eb61aca4787b9985d1ca7') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.664-0400 m31100| 2015-07-09T13:57:47.664-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:47.664-0400-559eb61b792e00bb67274953", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436464667664), what: "multi-split", ns: "db28.coll28", details: { before: { min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey } }, number: 6, of: 19, chunk: { min: { indexed_insert_ordered_bulk: 5.0 }, max: { indexed_insert_ordered_bulk: 6.0 }, lastmod: Timestamp 1000|6, lastmodEpoch: ObjectId('559eb61aca4787b9985d1ca7') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.717-0400 m31100| 2015-07-09T13:57:47.716-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:47.716-0400-559eb61b792e00bb67274954", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436464667716), what: "multi-split", ns: "db28.coll28", details: { before: { min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey } }, number: 7, of: 19, chunk: { min: { indexed_insert_ordered_bulk: 6.0 }, max: { indexed_insert_ordered_bulk: 7.0 }, lastmod: Timestamp 1000|7, lastmodEpoch: ObjectId('559eb61aca4787b9985d1ca7') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.760-0400 m31100| 2015-07-09T13:57:47.759-0400 I COMMAND [conn66] command db28.$cmd command: insert { insert: "coll28", documents: 15, ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|19, ObjectId('559eb61aca4787b9985d1ca7') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 19, w: 19 } }, Database: { acquireCount: { w: 19 } }, Collection: { acquireCount: { w: 4 } }, Metadata: { acquireCount: { w: 15 } }, oplog: { acquireCount: { w: 15 } } } protocol:op_command 111ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.772-0400 m31100| 2015-07-09T13:57:47.772-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:47.772-0400-559eb61b792e00bb67274955", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436464667772), what: "multi-split", ns: "db28.coll28", details: { before: { min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey } }, number: 8, of: 19, chunk: { min: { indexed_insert_ordered_bulk: 7.0 }, max: { indexed_insert_ordered_bulk: 8.0 }, lastmod: Timestamp 1000|8, lastmodEpoch: ObjectId('559eb61aca4787b9985d1ca7') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.788-0400 m31100| 2015-07-09T13:57:47.773-0400 I COMMAND [conn22] command db28.$cmd command: insert { insert: "coll28", documents: 15, ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|19, ObjectId('559eb61aca4787b9985d1ca7') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 20, w: 20 } }, Database: { acquireCount: { w: 20 } }, Collection: { acquireCount: { w: 5 } }, Metadata: { acquireCount: { w: 15 } }, oplog: { acquireCount: { w: 15 } } } protocol:op_command 103ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.818-0400 m31100| 2015-07-09T13:57:47.805-0400 I COMMAND [conn23] command db28.$cmd command: insert { insert: "coll28", documents: 15, ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|19, ObjectId('559eb61aca4787b9985d1ca7') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 21, w: 21 } }, Database: { acquireCount: { w: 21 } }, Collection: { acquireCount: { w: 6 } }, Metadata: { acquireCount: { w: 15 } }, oplog: { acquireCount: { w: 15 } } } protocol:op_command 104ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.818-0400 m31100| 2015-07-09T13:57:47.807-0400 I COMMAND [conn147] command db28.$cmd command: insert { insert: "coll28", documents: 15, ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|19, ObjectId('559eb61aca4787b9985d1ca7') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 20, w: 20 } }, Database: { acquireCount: { w: 20 } }, Collection: { acquireCount: { w: 5 } }, Metadata: { acquireCount: { w: 15 } }, oplog: { acquireCount: { w: 15 } } } protocol:op_command 113ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.819-0400 m31100| 2015-07-09T13:57:47.812-0400 I COMMAND [conn29] command db28.$cmd command: insert { insert: "coll28", documents: 15, ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|19, ObjectId('559eb61aca4787b9985d1ca7') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 20, w: 20 } }, Database: { acquireCount: { w: 20 } }, Collection: { acquireCount: { w: 5 } }, Metadata: { acquireCount: { w: 15 } }, oplog: { acquireCount: { w: 15 } } } protocol:op_command 101ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.819-0400 m31100| 2015-07-09T13:57:47.815-0400 I COMMAND [conn133] command db28.$cmd command: insert { insert: "coll28", documents: 15, ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|19, ObjectId('559eb61aca4787b9985d1ca7') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 21, w: 21 } }, Database: { acquireCount: { w: 21 } }, Collection: { acquireCount: { w: 6 } }, Metadata: { acquireCount: { w: 15 } }, oplog: { acquireCount: { w: 15 } } } protocol:op_command 135ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.831-0400 m31100| 2015-07-09T13:57:47.830-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:47.830-0400-559eb61b792e00bb67274956", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436464667830), what: "multi-split", ns: "db28.coll28", details: { before: { min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey } }, number: 9, of: 19, chunk: { min: { indexed_insert_ordered_bulk: 8.0 }, max: { indexed_insert_ordered_bulk: 9.0 }, lastmod: Timestamp 1000|9, lastmodEpoch: ObjectId('559eb61aca4787b9985d1ca7') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.886-0400 m31100| 2015-07-09T13:57:47.884-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:47.884-0400-559eb61b792e00bb67274957", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436464667884), what: "multi-split", ns: "db28.coll28", details: { before: { min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey } }, number: 10, of: 19, chunk: { min: { indexed_insert_ordered_bulk: 9.0 }, max: { indexed_insert_ordered_bulk: 10.0 }, lastmod: Timestamp 1000|10, lastmodEpoch: ObjectId('559eb61aca4787b9985d1ca7') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.917-0400 m31100| 2015-07-09T13:57:47.915-0400 I COMMAND [conn23] command db28.$cmd command: insert { insert: "coll28", documents: 15, ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|19, ObjectId('559eb61aca4787b9985d1ca7') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 20, w: 20 } }, Database: { acquireCount: { w: 20 } }, Collection: { acquireCount: { w: 5 } }, Metadata: { acquireCount: { w: 15 } }, oplog: { acquireCount: { w: 15 } } } protocol:op_command 105ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.935-0400 m31100| 2015-07-09T13:57:47.935-0400 I COMMAND [conn26] command db28.$cmd command: insert { insert: "coll28", documents: 15, ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|19, ObjectId('559eb61aca4787b9985d1ca7') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 19, w: 19 } }, Database: { acquireCount: { w: 19 } }, Collection: { acquireCount: { w: 4 } }, Metadata: { acquireCount: { w: 15 } }, oplog: { acquireCount: { w: 15 } } } protocol:op_command 102ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.937-0400 m31100| 2015-07-09T13:57:47.937-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:47.937-0400-559eb61b792e00bb67274958", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436464667937), what: "multi-split", ns: "db28.coll28", details: { before: { min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey } }, number: 11, of: 19, chunk: { min: { indexed_insert_ordered_bulk: 10.0 }, max: { indexed_insert_ordered_bulk: 11.0 }, lastmod: Timestamp 1000|11, lastmodEpoch: ObjectId('559eb61aca4787b9985d1ca7') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:47.992-0400 m31100| 2015-07-09T13:57:47.991-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:47.991-0400-559eb61b792e00bb67274959", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436464667991), what: "multi-split", ns: "db28.coll28", details: { before: { min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey } }, number: 12, of: 19, chunk: { min: { indexed_insert_ordered_bulk: 11.0 }, max: { indexed_insert_ordered_bulk: 12.0 }, lastmod: Timestamp 1000|12, lastmodEpoch: ObjectId('559eb61aca4787b9985d1ca7') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:48.045-0400 m31100| 2015-07-09T13:57:48.044-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:48.044-0400-559eb61c792e00bb6727495a", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436464668044), what: "multi-split", ns: "db28.coll28", details: { before: { min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey } }, number: 13, of: 19, chunk: { min: { indexed_insert_ordered_bulk: 12.0 }, max: { indexed_insert_ordered_bulk: 13.0 }, lastmod: Timestamp 1000|13, lastmodEpoch: ObjectId('559eb61aca4787b9985d1ca7') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:48.099-0400 m31100| 2015-07-09T13:57:48.097-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:48.097-0400-559eb61c792e00bb6727495b", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436464668097), what: "multi-split", ns: "db28.coll28", details: { before: { min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey } }, number: 14, of: 19, chunk: { min: { indexed_insert_ordered_bulk: 13.0 }, max: { indexed_insert_ordered_bulk: 14.0 }, lastmod: Timestamp 1000|14, lastmodEpoch: ObjectId('559eb61aca4787b9985d1ca7') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:48.151-0400 m31100| 2015-07-09T13:57:48.149-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:48.149-0400-559eb61c792e00bb6727495c", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436464668149), what: "multi-split", ns: "db28.coll28", details: { before: { min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey } }, number: 15, of: 19, chunk: { min: { indexed_insert_ordered_bulk: 14.0 }, max: { indexed_insert_ordered_bulk: 15.0 }, lastmod: Timestamp 1000|15, lastmodEpoch: ObjectId('559eb61aca4787b9985d1ca7') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:48.203-0400 m31100| 2015-07-09T13:57:48.202-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:48.202-0400-559eb61c792e00bb6727495d", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436464668202), what: "multi-split", ns: "db28.coll28", details: { before: { min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey } }, number: 16, of: 19, chunk: { min: { indexed_insert_ordered_bulk: 15.0 }, max: { indexed_insert_ordered_bulk: 16.0 }, lastmod: Timestamp 1000|16, lastmodEpoch: ObjectId('559eb61aca4787b9985d1ca7') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:48.256-0400 m31100| 2015-07-09T13:57:48.255-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:48.255-0400-559eb61c792e00bb6727495e", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436464668255), what: "multi-split", ns: "db28.coll28", details: { before: { min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey } }, number: 17, of: 19, chunk: { min: { indexed_insert_ordered_bulk: 16.0 }, max: { indexed_insert_ordered_bulk: 18.0 }, lastmod: Timestamp 1000|17, lastmodEpoch: ObjectId('559eb61aca4787b9985d1ca7') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:48.311-0400 m31100| 2015-07-09T13:57:48.309-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:48.309-0400-559eb61c792e00bb6727495f", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436464668309), what: "multi-split", ns: "db28.coll28", details: { before: { min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey } }, number: 18, of: 19, chunk: { min: { indexed_insert_ordered_bulk: 18.0 }, max: { indexed_insert_ordered_bulk: 19.0 }, lastmod: Timestamp 1000|18, lastmodEpoch: ObjectId('559eb61aca4787b9985d1ca7') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:48.366-0400 m31100| 2015-07-09T13:57:48.366-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:48.366-0400-559eb61c792e00bb67274960", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436464668366), what: "multi-split", ns: "db28.coll28", details: { before: { min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey } }, number: 19, of: 19, chunk: { min: { indexed_insert_ordered_bulk: 19.0 }, max: { indexed_insert_ordered_bulk: MaxKey }, lastmod: Timestamp 1000|19, lastmodEpoch: ObjectId('559eb61aca4787b9985d1ca7') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:48.400-0400 m31100| 2015-07-09T13:57:48.399-0400 I COMMAND [conn144] command db28.$cmd command: insert { insert: "coll28", documents: 15, ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|19, ObjectId('559eb61aca4787b9985d1ca7') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 20, w: 20 } }, Database: { acquireCount: { w: 20 } }, Collection: { acquireCount: { w: 5 } }, Metadata: { acquireCount: { w: 15 } }, oplog: { acquireCount: { w: 15 } } } protocol:op_command 102ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:48.400-0400 m31100| 2015-07-09T13:57:48.400-0400 I COMMAND [conn147] command db28.$cmd command: insert { insert: "coll28", documents: 15, ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|19, ObjectId('559eb61aca4787b9985d1ca7') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 20, w: 20 } }, Database: { acquireCount: { w: 20 } }, Collection: { acquireCount: { w: 5 } }, Metadata: { acquireCount: { w: 15 } }, oplog: { acquireCount: { w: 15 } } } protocol:op_command 103ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:48.414-0400 m31100| 2015-07-09T13:57:48.414-0400 I COMMAND [conn133] command db28.$cmd command: insert { insert: "coll28", documents: 15, ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|19, ObjectId('559eb61aca4787b9985d1ca7') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 20, w: 20 } }, Database: { acquireCount: { w: 20 } }, Collection: { acquireCount: { w: 5 } }, Metadata: { acquireCount: { w: 15 } }, oplog: { acquireCount: { w: 15 } } } protocol:op_command 101ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:48.419-0400 m31100| 2015-07-09T13:57:48.418-0400 I SHARDING [conn40] distributed lock 'db28.coll28/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:48.420-0400 m31100| 2015-07-09T13:57:48.419-0400 I COMMAND [conn40] command db28.coll28 command: splitChunk { splitChunk: "db28.coll28", keyPattern: { indexed_insert_ordered_bulk: 1.0 }, min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_ordered_bulk: 0.0 }, { indexed_insert_ordered_bulk: 2.0 }, { indexed_insert_ordered_bulk: 3.0 }, { indexed_insert_ordered_bulk: 4.0 }, { indexed_insert_ordered_bulk: 5.0 }, { indexed_insert_ordered_bulk: 6.0 }, { indexed_insert_ordered_bulk: 7.0 }, { indexed_insert_ordered_bulk: 8.0 }, { indexed_insert_ordered_bulk: 9.0 }, { indexed_insert_ordered_bulk: 10.0 }, { indexed_insert_ordered_bulk: 11.0 }, { indexed_insert_ordered_bulk: 12.0 }, { indexed_insert_ordered_bulk: 13.0 }, { indexed_insert_ordered_bulk: 14.0 }, { indexed_insert_ordered_bulk: 15.0 }, { indexed_insert_ordered_bulk: 16.0 }, { indexed_insert_ordered_bulk: 18.0 }, { indexed_insert_ordered_bulk: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb61aca4787b9985d1ca7') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 4, w: 2 } }, Database: { acquireCount: { r: 1, w: 2 } }, Collection: { acquireCount: { r: 1, W: 2 }, acquireWaitCount: { W: 2 }, timeAcquiringMicros: { W: 26294 } } } protocol:op_command 1148ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:48.420-0400 m30999| 2015-07-09T13:57:48.420-0400 I SHARDING [conn185] autosplitted db28.coll28 shard: ns: db28.coll28, shard: test-rs0, lastmod: 1|0||000000000000000000000000, min: { indexed_insert_ordered_bulk: MinKey }, max: { indexed_insert_ordered_bulk: MaxKey } into 19 (splitThreshold 921) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:48.421-0400 m31100| 2015-07-09T13:57:48.420-0400 I COMMAND [conn68] command db28.$cmd command: insert { insert: "coll28", documents: 15, ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|19, ObjectId('559eb61aca4787b9985d1ca7') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 20, w: 20 } }, Database: { acquireCount: { w: 20 } }, Collection: { acquireCount: { w: 5 } }, Metadata: { acquireCount: { w: 15 } }, oplog: { acquireCount: { w: 15 } } } protocol:op_command 103ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:48.449-0400 m31100| 2015-07-09T13:57:48.448-0400 I COMMAND [conn67] command db28.$cmd command: insert { insert: "coll28", documents: 15, ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|19, ObjectId('559eb61aca4787b9985d1ca7') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 20, w: 20 } }, Database: { acquireCount: { w: 20 } }, Collection: { acquireCount: { w: 5 } }, Metadata: { acquireCount: { w: 15 } }, oplog: { acquireCount: { w: 15 } } } protocol:op_command 101ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:48.581-0400 m31100| 2015-07-09T13:57:48.580-0400 I COMMAND [conn16] command db28.$cmd command: insert { insert: "coll28", documents: 15, ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|19, ObjectId('559eb61aca4787b9985d1ca7') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 21, w: 21 } }, Database: { acquireCount: { w: 21 } }, Collection: { acquireCount: { w: 6 } }, Metadata: { acquireCount: { w: 15 } }, oplog: { acquireCount: { w: 15 } } } protocol:op_command 119ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:48.596-0400 m31100| 2015-07-09T13:57:48.595-0400 I COMMAND [conn66] command db28.$cmd command: insert { insert: "coll28", documents: 15, ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|19, ObjectId('559eb61aca4787b9985d1ca7') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 21, w: 21 } }, Database: { acquireCount: { w: 21 } }, Collection: { acquireCount: { w: 6 } }, Metadata: { acquireCount: { w: 15 } }, oplog: { acquireCount: { w: 15 } } } protocol:op_command 115ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:48.684-0400 m31100| 2015-07-09T13:57:48.683-0400 I COMMAND [conn144] command db28.$cmd command: insert { insert: "coll28", documents: 15, ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|19, ObjectId('559eb61aca4787b9985d1ca7') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 21, w: 21 } }, Database: { acquireCount: { w: 21 } }, Collection: { acquireCount: { w: 6 } }, Metadata: { acquireCount: { w: 15 } }, oplog: { acquireCount: { w: 15 } } } protocol:op_command 115ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:48.689-0400 m31100| 2015-07-09T13:57:48.688-0400 I COMMAND [conn30] command db28.$cmd command: insert { insert: "coll28", documents: 15, ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|19, ObjectId('559eb61aca4787b9985d1ca7') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 21, w: 21 } }, Database: { acquireCount: { w: 21 } }, Collection: { acquireCount: { w: 6 } }, Metadata: { acquireCount: { w: 15 } }, oplog: { acquireCount: { w: 15 } } } protocol:op_command 118ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:48.703-0400 m31100| 2015-07-09T13:57:48.702-0400 I COMMAND [conn16] command db28.$cmd command: insert { insert: "coll28", documents: 15, ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|19, ObjectId('559eb61aca4787b9985d1ca7') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 21, w: 21 } }, Database: { acquireCount: { w: 21 } }, Collection: { acquireCount: { w: 6 } }, Metadata: { acquireCount: { w: 15 } }, oplog: { acquireCount: { w: 15 } } } protocol:op_command 114ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:48.732-0400 m31100| 2015-07-09T13:57:48.731-0400 I COMMAND [conn147] command db28.$cmd command: insert { insert: "coll28", documents: 15, ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|19, ObjectId('559eb61aca4787b9985d1ca7') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 20, w: 20 } }, Database: { acquireCount: { w: 20 } }, Collection: { acquireCount: { w: 5 } }, Metadata: { acquireCount: { w: 15 } }, oplog: { acquireCount: { w: 15 } } } protocol:op_command 116ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:48.755-0400 m31100| 2015-07-09T13:57:48.755-0400 I COMMAND [conn31] command db28.$cmd command: insert { insert: "coll28", documents: 15, ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|19, ObjectId('559eb61aca4787b9985d1ca7') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 21, w: 21 } }, Database: { acquireCount: { w: 21 } }, Collection: { acquireCount: { w: 6 } }, Metadata: { acquireCount: { w: 15 } }, oplog: { acquireCount: { w: 15 } } } protocol:op_command 127ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:48.767-0400 m31100| 2015-07-09T13:57:48.767-0400 I COMMAND [conn67] command db28.$cmd command: insert { insert: "coll28", documents: 15, ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|19, ObjectId('559eb61aca4787b9985d1ca7') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 21, w: 21 } }, Database: { acquireCount: { w: 21 } }, Collection: { acquireCount: { w: 6 } }, Metadata: { acquireCount: { w: 15 } }, oplog: { acquireCount: { w: 15 } } } protocol:op_command 135ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:48.905-0400 m31100| 2015-07-09T13:57:48.904-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63265 #148 (92 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:48.913-0400 m31100| 2015-07-09T13:57:48.913-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63266 #149 (93 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:48.915-0400 m31100| 2015-07-09T13:57:48.915-0400 I QUERY [conn47] query db28.coll28 query: { query: { indexed_insert_ordered_bulk: 8.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:5340 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:41 nreturned:75 reslen:4445 locks:{ Global: { acquireCount: { r: 84 } }, Database: { acquireCount: { r: 42 } }, Collection: { acquireCount: { r: 42 } } } 120ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:48.925-0400 m31100| 2015-07-09T13:57:48.924-0400 I QUERY [conn139] getmore db28.coll28 query: { indexed_insert_ordered_bulk: 7.0 } cursorid:2298889957257 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:30 nreturned:184 reslen:10876 locks:{ Global: { acquireCount: { r: 62 } }, Database: { acquireCount: { r: 31 } }, Collection: { acquireCount: { r: 31 } } } 103ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:49.047-0400 m31100| 2015-07-09T13:57:49.047-0400 I QUERY [conn47] query db28.coll28 query: { query: { indexed_insert_ordered_bulk: 8.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:5640 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:44 nreturned:90 reslen:5330 locks:{ Global: { acquireCount: { r: 90 } }, Database: { acquireCount: { r: 45 } }, Collection: { acquireCount: { r: 45 } } } 118ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:49.055-0400 m31100| 2015-07-09T13:57:49.053-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63267 #150 (94 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:49.176-0400 m31100| 2015-07-09T13:57:49.176-0400 I QUERY [conn56] query db28.coll28 query: { query: { indexed_insert_ordered_bulk: 8.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN cursorid:2298252242125 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:5664 keyUpdates:0 writeConflicts:0 numYields:44 nreturned:101 reslen:5979 locks:{ Global: { acquireCount: { r: 90 } }, Database: { acquireCount: { r: 45 } }, Collection: { acquireCount: { r: 45 } } } 113ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:49.256-0400 m31100| 2015-07-09T13:57:49.255-0400 I QUERY [conn136] getmore db28.coll28 query: { indexed_insert_ordered_bulk: 13.0 } cursorid:2298895152278 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:35 nreturned:244 reslen:14416 locks:{ Global: { acquireCount: { r: 72 } }, Database: { acquireCount: { r: 36 } }, Collection: { acquireCount: { r: 36 } } } 101ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:49.273-0400 m31100| 2015-07-09T13:57:49.271-0400 I QUERY [conn139] getmore db28.coll28 query: { indexed_insert_ordered_bulk: 19.0 } cursorid:2298014435546 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:30 nreturned:199 reslen:11761 locks:{ Global: { acquireCount: { r: 62 } }, Database: { acquireCount: { r: 31 } }, Collection: { acquireCount: { r: 31 } } } 101ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:49.279-0400 m31100| 2015-07-09T13:57:49.275-0400 I QUERY [conn142] getmore db28.coll28 query: { indexed_insert_ordered_bulk: 6.0 } cursorid:2299936288082 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:33 nreturned:199 reslen:11761 locks:{ Global: { acquireCount: { r: 68 } }, Database: { acquireCount: { r: 34 } }, Collection: { acquireCount: { r: 34 } } } 106ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:49.290-0400 m31100| 2015-07-09T13:57:49.289-0400 I QUERY [conn138] getmore db28.coll28 query: { indexed_insert_ordered_bulk: 9.0 } cursorid:2299709725426 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:31 nreturned:184 reslen:10876 locks:{ Global: { acquireCount: { r: 64 } }, Database: { acquireCount: { r: 32 } }, Collection: { acquireCount: { r: 32 } } } 110ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:49.315-0400 m31100| 2015-07-09T13:57:49.315-0400 I QUERY [conn149] getmore db28.coll28 query: { indexed_insert_ordered_bulk: 16.0 } cursorid:2298510139792 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:32 nreturned:199 reslen:11761 locks:{ Global: { acquireCount: { r: 66 } }, Database: { acquireCount: { r: 33 } }, Collection: { acquireCount: { r: 33 } } } 127ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:49.316-0400 m31100| 2015-07-09T13:57:49.315-0400 I QUERY [conn141] getmore db28.coll28 query: { indexed_insert_ordered_bulk: 4.0 } cursorid:2298593075036 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:31 nreturned:199 reslen:11761 locks:{ Global: { acquireCount: { r: 64 } }, Database: { acquireCount: { r: 32 } }, Collection: { acquireCount: { r: 32 } } } 130ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:49.325-0400 m31100| 2015-07-09T13:57:49.324-0400 I QUERY [conn135] getmore db28.coll28 query: { indexed_insert_ordered_bulk: 18.0 } cursorid:2298215751687 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:31 nreturned:214 reslen:12646 locks:{ Global: { acquireCount: { r: 64 } }, Database: { acquireCount: { r: 32 } }, Collection: { acquireCount: { r: 32 } } } 136ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:49.333-0400 m31100| 2015-07-09T13:57:49.332-0400 I QUERY [conn74] getmore db28.coll28 query: { indexed_insert_ordered_bulk: 15.0 } cursorid:2299667786726 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:29 nreturned:214 reslen:12646 locks:{ Global: { acquireCount: { r: 60 } }, Database: { acquireCount: { r: 30 } }, Collection: { acquireCount: { r: 30 } } } 131ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:49.355-0400 m31100| 2015-07-09T13:57:49.355-0400 I QUERY [conn45] query db28.coll28 query: { query: { indexed_insert_ordered_bulk: 8.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN cursorid:2297937172516 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:5664 keyUpdates:0 writeConflicts:0 numYields:44 nreturned:101 reslen:5979 locks:{ Global: { acquireCount: { r: 90 } }, Database: { acquireCount: { r: 45 } }, Collection: { acquireCount: { r: 45 } } } 158ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:49.378-0400 m31100| 2015-07-09T13:57:49.377-0400 I QUERY [conn143] getmore db28.coll28 query: { indexed_insert_ordered_bulk: 0.0 } cursorid:2299408516233 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:37 nreturned:229 reslen:13531 locks:{ Global: { acquireCount: { r: 76 } }, Database: { acquireCount: { r: 38 } }, Collection: { acquireCount: { r: 38 } } } 114ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:49.391-0400 m31100| 2015-07-09T13:57:49.390-0400 I QUERY [conn134] getmore db28.coll28 query: { indexed_insert_ordered_bulk: 3.0 } cursorid:2298202894549 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:35 nreturned:229 reslen:13531 locks:{ Global: { acquireCount: { r: 72 } }, Database: { acquireCount: { r: 36 } }, Collection: { acquireCount: { r: 36 } } } 108ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:49.394-0400 m31100| 2015-07-09T13:57:49.393-0400 I QUERY [conn138] getmore db28.coll28 query: { indexed_insert_ordered_bulk: 7.0 } cursorid:2298928606949 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:37 nreturned:229 reslen:13531 locks:{ Global: { acquireCount: { r: 76 } }, Database: { acquireCount: { r: 38 } }, Collection: { acquireCount: { r: 38 } } } 102ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:49.475-0400 m30998| 2015-07-09T13:57:49.475-0400 I NETWORK [conn183] end connection 127.0.0.1:63243 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:49.486-0400 m30998| 2015-07-09T13:57:49.485-0400 I NETWORK [conn177] end connection 127.0.0.1:63230 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:49.490-0400 m31100| 2015-07-09T13:57:49.488-0400 I QUERY [conn137] getmore db28.coll28 query: { indexed_insert_ordered_bulk: 9.0 } cursorid:2297912025361 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:33 nreturned:199 reslen:11761 locks:{ Global: { acquireCount: { r: 68 } }, Database: { acquireCount: { r: 34 } }, Collection: { acquireCount: { r: 34 } } } 118ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:49.505-0400 m31100| 2015-07-09T13:57:49.504-0400 I QUERY [conn86] getmore db28.coll28 query: { indexed_insert_ordered_bulk: 18.0 } cursorid:2299822542256 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:33 nreturned:229 reslen:13531 locks:{ Global: { acquireCount: { r: 68 } }, Database: { acquireCount: { r: 34 } }, Collection: { acquireCount: { r: 34 } } } 121ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:49.511-0400 m31100| 2015-07-09T13:57:49.510-0400 I QUERY [conn44] getmore db28.coll28 query: { indexed_insert_ordered_bulk: 16.0 } cursorid:2298079738366 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:34 nreturned:214 reslen:12646 locks:{ Global: { acquireCount: { r: 70 } }, Database: { acquireCount: { r: 35 } }, Collection: { acquireCount: { r: 35 } } } 129ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:49.511-0400 m31100| 2015-07-09T13:57:49.511-0400 I QUERY [conn143] getmore db28.coll28 query: { indexed_insert_ordered_bulk: 4.0 } cursorid:2298862019933 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:34 nreturned:214 reslen:12646 locks:{ Global: { acquireCount: { r: 70 } }, Database: { acquireCount: { r: 35 } }, Collection: { acquireCount: { r: 35 } } } 132ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:49.516-0400 m31100| 2015-07-09T13:57:49.515-0400 I QUERY [conn139] getmore db28.coll28 query: { indexed_insert_ordered_bulk: 15.0 } cursorid:2299287617066 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:31 nreturned:229 reslen:13531 locks:{ Global: { acquireCount: { r: 64 } }, Database: { acquireCount: { r: 32 } }, Collection: { acquireCount: { r: 32 } } } 125ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:49.534-0400 m31100| 2015-07-09T13:57:49.533-0400 I QUERY [conn59] query db28.coll28 query: { query: { indexed_insert_ordered_bulk: 8.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN cursorid:2298397244005 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:5664 keyUpdates:0 writeConflicts:0 numYields:44 nreturned:101 reslen:5979 locks:{ Global: { acquireCount: { r: 90 } }, Database: { acquireCount: { r: 45 } }, Collection: { acquireCount: { r: 45 } } } 152ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:49.555-0400 m31100| 2015-07-09T13:57:49.555-0400 I QUERY [conn135] getmore db28.coll28 query: { indexed_insert_ordered_bulk: 0.0 } cursorid:2299215531826 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:39 nreturned:244 reslen:14416 locks:{ Global: { acquireCount: { r: 80 } }, Database: { acquireCount: { r: 40 } }, Collection: { acquireCount: { r: 40 } } } 136ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:49.556-0400 m31100| 2015-07-09T13:57:49.555-0400 I QUERY [conn140] getmore db28.coll28 query: { indexed_insert_ordered_bulk: 1.0 } cursorid:2299457294965 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:33 nreturned:184 reslen:10876 locks:{ Global: { acquireCount: { r: 68 } }, Database: { acquireCount: { r: 34 } }, Collection: { acquireCount: { r: 34 } } } 104ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:49.563-0400 m31100| 2015-07-09T13:57:49.562-0400 I QUERY [conn148] getmore db28.coll28 query: { indexed_insert_ordered_bulk: 3.0 } cursorid:2298491363834 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:37 nreturned:244 reslen:14416 locks:{ Global: { acquireCount: { r: 76 } }, Database: { acquireCount: { r: 38 } }, Collection: { acquireCount: { r: 38 } } } 114ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:49.565-0400 m31100| 2015-07-09T13:57:49.565-0400 I QUERY [conn74] getmore db28.coll28 query: { indexed_insert_ordered_bulk: 7.0 } cursorid:2297984215239 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:39 nreturned:244 reslen:14416 locks:{ Global: { acquireCount: { r: 80 } }, Database: { acquireCount: { r: 40 } }, Collection: { acquireCount: { r: 40 } } } 112ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:49.634-0400 m30999| 2015-07-09T13:57:49.634-0400 I NETWORK [conn182] end connection 127.0.0.1:63242 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:49.658-0400 m31100| 2015-07-09T13:57:49.657-0400 I QUERY [conn135] getmore db28.coll28 query: { indexed_insert_ordered_bulk: 4.0 } cursorid:2299714855700 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:36 nreturned:229 reslen:13531 locks:{ Global: { acquireCount: { r: 74 } }, Database: { acquireCount: { r: 37 } }, Collection: { acquireCount: { r: 37 } } } 102ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:49.662-0400 m31100| 2015-07-09T13:57:49.662-0400 I QUERY [conn60] query db28.coll28 query: { query: { indexed_insert_ordered_bulk: 8.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN cursorid:2299356839340 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:5664 keyUpdates:0 writeConflicts:0 numYields:44 nreturned:101 reslen:5979 locks:{ Global: { acquireCount: { r: 90 } }, Database: { acquireCount: { r: 45 } }, Collection: { acquireCount: { r: 45 } } } 108ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:49.694-0400 m31100| 2015-07-09T13:57:49.693-0400 I QUERY [conn141] getmore db28.coll28 query: { indexed_insert_ordered_bulk: 0.0 } cursorid:2298585347310 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:41 nreturned:259 reslen:15301 locks:{ Global: { acquireCount: { r: 84 } }, Database: { acquireCount: { r: 42 } }, Collection: { acquireCount: { r: 42 } } } 103ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:49.724-0400 m30999| 2015-07-09T13:57:49.724-0400 I NETWORK [conn186] end connection 127.0.0.1:63248 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:49.752-0400 m30998| 2015-07-09T13:57:49.752-0400 I NETWORK [conn186] end connection 127.0.0.1:63250 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:49.765-0400 m30998| 2015-07-09T13:57:49.765-0400 I NETWORK [conn178] end connection 127.0.0.1:63233 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:49.795-0400 m31100| 2015-07-09T13:57:49.795-0400 I QUERY [conn20] query db28.coll28 query: { query: { indexed_insert_ordered_bulk: 8.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN cursorid:2299121898579 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:5664 keyUpdates:0 writeConflicts:0 numYields:44 nreturned:101 reslen:5979 locks:{ Global: { acquireCount: { r: 90 } }, Database: { acquireCount: { r: 45 } }, Collection: { acquireCount: { r: 45 } } } 106ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:49.810-0400 m31100| 2015-07-09T13:57:49.801-0400 I QUERY [conn86] getmore db28.coll28 query: { indexed_insert_ordered_bulk: 4.0 } cursorid:2299487881414 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:38 nreturned:244 reslen:14416 locks:{ Global: { acquireCount: { r: 78 } }, Database: { acquireCount: { r: 39 } }, Collection: { acquireCount: { r: 39 } } } 101ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:49.816-0400 m30999| 2015-07-09T13:57:49.816-0400 I NETWORK [conn179] end connection 127.0.0.1:63236 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:49.843-0400 m30999| 2015-07-09T13:57:49.843-0400 I NETWORK [conn180] end connection 127.0.0.1:63239 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:49.857-0400 m30998| 2015-07-09T13:57:49.856-0400 I NETWORK [conn180] end connection 127.0.0.1:63237 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:49.864-0400 m30999| 2015-07-09T13:57:49.862-0400 I NETWORK [conn177] end connection 127.0.0.1:63231 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:49.918-0400 m30999| 2015-07-09T13:57:49.918-0400 I NETWORK [conn178] end connection 127.0.0.1:63232 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:49.941-0400 m30998| 2015-07-09T13:57:49.939-0400 I NETWORK [conn181] end connection 127.0.0.1:63238 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:49.954-0400 m30998| 2015-07-09T13:57:49.954-0400 I NETWORK [conn182] end connection 127.0.0.1:63241 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:49.987-0400 m30999| 2015-07-09T13:57:49.985-0400 I NETWORK [conn184] end connection 127.0.0.1:63245 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.004-0400 m30999| 2015-07-09T13:57:50.004-0400 I NETWORK [conn183] end connection 127.0.0.1:63244 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.014-0400 m30999| 2015-07-09T13:57:50.013-0400 I NETWORK [conn181] end connection 127.0.0.1:63240 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.098-0400 m30998| 2015-07-09T13:57:50.098-0400 I NETWORK [conn184] end connection 127.0.0.1:63247 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.109-0400 m30998| 2015-07-09T13:57:50.108-0400 I NETWORK [conn179] end connection 127.0.0.1:63235 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.115-0400 m30998| 2015-07-09T13:57:50.115-0400 I NETWORK [conn185] end connection 127.0.0.1:63249 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.315-0400 m30999| 2015-07-09T13:57:50.315-0400 I NETWORK [conn185] end connection 127.0.0.1:63246 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.335-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.336-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.336-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.336-0400 jstests/concurrency/fsm_workloads/indexed_insert_ordered_bulk.js: Workload completed in 3432 ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.336-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.336-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.336-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.336-0400 m30999| 2015-07-09T13:57:50.336-0400 I COMMAND [conn1] DROP: db28.coll28 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.336-0400 m30999| 2015-07-09T13:57:50.336-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:50.336-0400-559eb61eca4787b9985d1ca9", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464670336), what: "dropCollection.start", ns: "db28.coll28", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.393-0400 m30999| 2015-07-09T13:57:50.392-0400 I SHARDING [conn1] distributed lock 'db28.coll28/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb61eca4787b9985d1caa [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.394-0400 m31100| 2015-07-09T13:57:50.393-0400 I COMMAND [conn40] CMD: drop db28.coll28 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.397-0400 m31200| 2015-07-09T13:57:50.397-0400 I COMMAND [conn18] CMD: drop db28.coll28 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.398-0400 m31101| 2015-07-09T13:57:50.397-0400 I COMMAND [repl writer worker 11] CMD: drop db28.coll28 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.400-0400 m31102| 2015-07-09T13:57:50.398-0400 I COMMAND [repl writer worker 13] CMD: drop db28.coll28 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.409-0400 m31202| 2015-07-09T13:57:50.409-0400 I COMMAND [repl writer worker 5] CMD: drop db28.coll28 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.410-0400 m31201| 2015-07-09T13:57:50.410-0400 I COMMAND [repl writer worker 11] CMD: drop db28.coll28 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.457-0400 m31100| 2015-07-09T13:57:50.456-0400 I SHARDING [conn40] remotely refreshing metadata for db28.coll28 with requested shard version 0|0||000000000000000000000000, current shard version is 1|19||559eb61aca4787b9985d1ca7, current metadata version is 1|19||559eb61aca4787b9985d1ca7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.458-0400 m31100| 2015-07-09T13:57:50.458-0400 W SHARDING [conn40] no chunks found when reloading db28.coll28, previous version was 0|0||559eb61aca4787b9985d1ca7, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.458-0400 m31100| 2015-07-09T13:57:50.458-0400 I SHARDING [conn40] dropping metadata for db28.coll28 at shard version 1|19||559eb61aca4787b9985d1ca7, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.460-0400 m30999| 2015-07-09T13:57:50.459-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:50.459-0400-559eb61eca4787b9985d1cab", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464670459), what: "dropCollection", ns: "db28.coll28", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.514-0400 m30999| 2015-07-09T13:57:50.513-0400 I SHARDING [conn1] distributed lock 'db28.coll28/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.569-0400 m30999| 2015-07-09T13:57:50.569-0400 I COMMAND [conn1] DROP DATABASE: db28 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.569-0400 m30999| 2015-07-09T13:57:50.569-0400 I SHARDING [conn1] DBConfig::dropDatabase: db28 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.569-0400 m30999| 2015-07-09T13:57:50.569-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:50.569-0400-559eb61eca4787b9985d1cac", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464670569), what: "dropDatabase.start", ns: "db28", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.675-0400 m30999| 2015-07-09T13:57:50.674-0400 I SHARDING [conn1] DBConfig::dropDatabase: db28 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.676-0400 m31100| 2015-07-09T13:57:50.675-0400 I COMMAND [conn28] dropDatabase db28 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.676-0400 m31100| 2015-07-09T13:57:50.676-0400 I COMMAND [conn28] dropDatabase db28 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.677-0400 m30999| 2015-07-09T13:57:50.676-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:50.676-0400-559eb61eca4787b9985d1cad", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464670676), what: "dropDatabase", ns: "db28", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.677-0400 m31102| 2015-07-09T13:57:50.677-0400 I COMMAND [repl writer worker 4] dropDatabase db28 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.677-0400 m31102| 2015-07-09T13:57:50.677-0400 I COMMAND [repl writer worker 4] dropDatabase db28 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.677-0400 m31101| 2015-07-09T13:57:50.677-0400 I COMMAND [repl writer worker 9] dropDatabase db28 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.678-0400 m31101| 2015-07-09T13:57:50.677-0400 I COMMAND [repl writer worker 9] dropDatabase db28 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.766-0400 m31100| 2015-07-09T13:57:50.765-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.769-0400 m31102| 2015-07-09T13:57:50.769-0400 I COMMAND [repl writer worker 0] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.769-0400 m31101| 2015-07-09T13:57:50.769-0400 I COMMAND [repl writer worker 0] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.811-0400 m31200| 2015-07-09T13:57:50.810-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.814-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.814-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.814-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.814-0400 jstests/concurrency/fsm_workloads/yield_rooted_or.js [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.814-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.815-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.815-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.821-0400 m31201| 2015-07-09T13:57:50.820-0400 I COMMAND [repl writer worker 4] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.821-0400 m30999| 2015-07-09T13:57:50.821-0400 I SHARDING [conn1] distributed lock 'db29/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb61eca4787b9985d1cae [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.822-0400 m31202| 2015-07-09T13:57:50.822-0400 I COMMAND [repl writer worker 1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.826-0400 m30999| 2015-07-09T13:57:50.825-0400 I SHARDING [conn1] Placing [db29] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.826-0400 m30999| 2015-07-09T13:57:50.826-0400 I SHARDING [conn1] Enabling sharding for database [db29] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.880-0400 m30999| 2015-07-09T13:57:50.879-0400 I SHARDING [conn1] distributed lock 'db29/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.903-0400 m31100| 2015-07-09T13:57:50.903-0400 I INDEX [conn29] build index on: db29.coll29 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db29.coll29" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.904-0400 m31100| 2015-07-09T13:57:50.903-0400 I INDEX [conn29] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.913-0400 m31100| 2015-07-09T13:57:50.913-0400 I INDEX [conn29] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.915-0400 m30999| 2015-07-09T13:57:50.914-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db29.coll29", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.920-0400 m30999| 2015-07-09T13:57:50.920-0400 I SHARDING [conn1] distributed lock 'db29.coll29/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb61eca4787b9985d1caf [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.922-0400 m30999| 2015-07-09T13:57:50.921-0400 I SHARDING [conn1] enable sharding on: db29.coll29 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.922-0400 m30999| 2015-07-09T13:57:50.921-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:50.921-0400-559eb61eca4787b9985d1cb0", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464670921), what: "shardCollection.start", ns: "db29.coll29", details: { shardKey: { _id: "hashed" }, collection: "db29.coll29", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.929-0400 m31102| 2015-07-09T13:57:50.929-0400 I INDEX [repl writer worker 6] build index on: db29.coll29 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db29.coll29" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.929-0400 m31102| 2015-07-09T13:57:50.929-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.936-0400 m31101| 2015-07-09T13:57:50.936-0400 I INDEX [repl writer worker 8] build index on: db29.coll29 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db29.coll29" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.937-0400 m31101| 2015-07-09T13:57:50.936-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.938-0400 m31102| 2015-07-09T13:57:50.937-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.943-0400 m31101| 2015-07-09T13:57:50.942-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:50.975-0400 m30999| 2015-07-09T13:57:50.974-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db29.coll29 using new epoch 559eb61eca4787b9985d1cb1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.085-0400 m30999| 2015-07-09T13:57:51.084-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db29.coll29: 1ms sequenceNumber: 130 version: 1|1||559eb61eca4787b9985d1cb1 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.140-0400 m30999| 2015-07-09T13:57:51.139-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db29.coll29: 0ms sequenceNumber: 131 version: 1|1||559eb61eca4787b9985d1cb1 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.142-0400 m31100| 2015-07-09T13:57:51.141-0400 I SHARDING [conn52] remotely refreshing metadata for db29.coll29 with requested shard version 1|1||559eb61eca4787b9985d1cb1, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.143-0400 m31100| 2015-07-09T13:57:51.143-0400 I SHARDING [conn52] collection db29.coll29 was previously unsharded, new metadata loaded with shard version 1|1||559eb61eca4787b9985d1cb1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.144-0400 m31100| 2015-07-09T13:57:51.143-0400 I SHARDING [conn52] collection version was loaded at version 1|1||559eb61eca4787b9985d1cb1, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.144-0400 m30999| 2015-07-09T13:57:51.143-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:51.143-0400-559eb61fca4787b9985d1cb2", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464671143), what: "shardCollection", ns: "db29.coll29", details: { version: "1|1||559eb61eca4787b9985d1cb1" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.199-0400 m30999| 2015-07-09T13:57:51.198-0400 I SHARDING [conn1] distributed lock 'db29.coll29/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.200-0400 m30999| 2015-07-09T13:57:51.199-0400 I SHARDING [conn1] moving chunk ns: db29.coll29 moving ( ns: db29.coll29, shard: test-rs0, lastmod: 1|1||000000000000000000000000, min: { _id: 0 }, max: { _id: MaxKey }) test-rs0 -> test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.200-0400 m31100| 2015-07-09T13:57:51.200-0400 I SHARDING [conn40] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.201-0400 m31100| 2015-07-09T13:57:51.201-0400 I SHARDING [conn40] received moveChunk request: { moveChunk: "db29.coll29", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb61eca4787b9985d1cb1') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.205-0400 m31100| 2015-07-09T13:57:51.204-0400 I SHARDING [conn40] distributed lock 'db29.coll29/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb61f792e00bb67274962 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.205-0400 m31100| 2015-07-09T13:57:51.205-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:51.205-0400-559eb61f792e00bb67274963", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436464671205), what: "moveChunk.start", ns: "db29.coll29", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.259-0400 m31100| 2015-07-09T13:57:51.259-0400 I SHARDING [conn40] remotely refreshing metadata for db29.coll29 based on current shard version 1|1||559eb61eca4787b9985d1cb1, current metadata version is 1|1||559eb61eca4787b9985d1cb1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.261-0400 m31100| 2015-07-09T13:57:51.260-0400 I SHARDING [conn40] metadata of collection db29.coll29 already up to date (shard version : 1|1||559eb61eca4787b9985d1cb1, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.261-0400 m31100| 2015-07-09T13:57:51.261-0400 I SHARDING [conn40] moveChunk request accepted at version 1|1||559eb61eca4787b9985d1cb1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.261-0400 m31100| 2015-07-09T13:57:51.261-0400 I SHARDING [conn40] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.262-0400 m31200| 2015-07-09T13:57:51.261-0400 I SHARDING [conn16] remotely refreshing metadata for db29.coll29, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.263-0400 m31200| 2015-07-09T13:57:51.263-0400 I SHARDING [conn16] collection db29.coll29 was previously unsharded, new metadata loaded with shard version 0|0||559eb61eca4787b9985d1cb1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.263-0400 m31200| 2015-07-09T13:57:51.263-0400 I SHARDING [conn16] collection version was loaded at version 1|1||559eb61eca4787b9985d1cb1, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.264-0400 m31200| 2015-07-09T13:57:51.263-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: 0 } -> { _id: MaxKey } for collection db29.coll29 from test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 at epoch 559eb61eca4787b9985d1cb1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.266-0400 m31100| 2015-07-09T13:57:51.265-0400 I SHARDING [conn40] moveChunk data transfer progress: { active: true, ns: "db29.coll29", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.269-0400 m31100| 2015-07-09T13:57:51.268-0400 I SHARDING [conn40] moveChunk data transfer progress: { active: true, ns: "db29.coll29", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.274-0400 m31100| 2015-07-09T13:57:51.273-0400 I SHARDING [conn40] moveChunk data transfer progress: { active: true, ns: "db29.coll29", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.282-0400 m31200| 2015-07-09T13:57:51.282-0400 I INDEX [migrateThread] build index on: db29.coll29 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db29.coll29" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.282-0400 m31200| 2015-07-09T13:57:51.282-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.283-0400 m31100| 2015-07-09T13:57:51.283-0400 I SHARDING [conn40] moveChunk data transfer progress: { active: true, ns: "db29.coll29", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.294-0400 m31200| 2015-07-09T13:57:51.294-0400 I INDEX [migrateThread] build index on: db29.coll29 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db29.coll29" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.295-0400 m31200| 2015-07-09T13:57:51.294-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.301-0400 m31100| 2015-07-09T13:57:51.300-0400 I SHARDING [conn40] moveChunk data transfer progress: { active: true, ns: "db29.coll29", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.310-0400 m31200| 2015-07-09T13:57:51.309-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.311-0400 m31200| 2015-07-09T13:57:51.310-0400 I SHARDING [migrateThread] Deleter starting delete for: db29.coll29 from { _id: 0 } -> { _id: MaxKey }, with opId: 43646 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.311-0400 m31200| 2015-07-09T13:57:51.311-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db29.coll29 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.326-0400 m31201| 2015-07-09T13:57:51.326-0400 I INDEX [repl writer worker 9] build index on: db29.coll29 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db29.coll29" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.327-0400 m31201| 2015-07-09T13:57:51.326-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.331-0400 m31202| 2015-07-09T13:57:51.330-0400 I INDEX [repl writer worker 2] build index on: db29.coll29 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db29.coll29" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.331-0400 m31202| 2015-07-09T13:57:51.331-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.334-0400 m31100| 2015-07-09T13:57:51.334-0400 I SHARDING [conn40] moveChunk data transfer progress: { active: true, ns: "db29.coll29", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.339-0400 m31201| 2015-07-09T13:57:51.338-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.341-0400 m31200| 2015-07-09T13:57:51.341-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.341-0400 m31200| 2015-07-09T13:57:51.341-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db29.coll29' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.342-0400 m31202| 2015-07-09T13:57:51.342-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.400-0400 m31100| 2015-07-09T13:57:51.399-0400 I SHARDING [conn40] moveChunk data transfer progress: { active: true, ns: "db29.coll29", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.400-0400 m31100| 2015-07-09T13:57:51.399-0400 I SHARDING [conn40] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.400-0400 m31100| 2015-07-09T13:57:51.400-0400 I SHARDING [conn40] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.400-0400 m31100| 2015-07-09T13:57:51.400-0400 I SHARDING [conn40] moveChunk setting version to: 2|0||559eb61eca4787b9985d1cb1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.411-0400 m31200| 2015-07-09T13:57:51.411-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db29.coll29' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.411-0400 m31200| 2015-07-09T13:57:51.411-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:51.411-0400-559eb61fd5a107a5b9c0db22", server: "bs-osx108-8", clientAddr: "", time: new Date(1436464671411), what: "moveChunk.to", ns: "db29.coll29", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 5: 46, step 2 of 5: 29, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 70, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.465-0400 m31100| 2015-07-09T13:57:51.464-0400 I SHARDING [conn40] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db29.coll29", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.465-0400 m31100| 2015-07-09T13:57:51.464-0400 I SHARDING [conn40] moveChunk updating self version to: 2|1||559eb61eca4787b9985d1cb1 through { _id: MinKey } -> { _id: 0 } for collection 'db29.coll29' [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.467-0400 m31100| 2015-07-09T13:57:51.466-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:51.466-0400-559eb61f792e00bb67274964", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436464671466), what: "moveChunk.commit", ns: "db29.coll29", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.520-0400 m31100| 2015-07-09T13:57:51.520-0400 I SHARDING [conn40] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.521-0400 m31100| 2015-07-09T13:57:51.520-0400 I SHARDING [conn40] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.521-0400 m31100| 2015-07-09T13:57:51.520-0400 I SHARDING [conn40] Deleter starting delete for: db29.coll29 from { _id: 0 } -> { _id: MaxKey }, with opId: 37630 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.521-0400 m31100| 2015-07-09T13:57:51.520-0400 I SHARDING [conn40] rangeDeleter deleted 0 documents for db29.coll29 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.521-0400 m31100| 2015-07-09T13:57:51.520-0400 I SHARDING [conn40] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.522-0400 m31100| 2015-07-09T13:57:51.522-0400 I SHARDING [conn40] distributed lock 'db29.coll29/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.523-0400 m31100| 2015-07-09T13:57:51.522-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:51.522-0400-559eb61f792e00bb67274965", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436464671522), what: "moveChunk.from", ns: "db29.coll29", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 6: 0, step 2 of 6: 59, step 3 of 6: 2, step 4 of 6: 135, step 5 of 6: 120, step 6 of 6: 0, to: "test-rs1", from: "test-rs0", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.576-0400 m31100| 2015-07-09T13:57:51.575-0400 I COMMAND [conn40] command db29.coll29 command: moveChunk { moveChunk: "db29.coll29", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb61eca4787b9985d1cb1') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 374ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.578-0400 m30999| 2015-07-09T13:57:51.577-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db29.coll29: 0ms sequenceNumber: 132 version: 2|1||559eb61eca4787b9985d1cb1 based on: 1|1||559eb61eca4787b9985d1cb1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.579-0400 m31100| 2015-07-09T13:57:51.579-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db29.coll29", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb61eca4787b9985d1cb1') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.585-0400 m31100| 2015-07-09T13:57:51.585-0400 I SHARDING [conn40] distributed lock 'db29.coll29/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb61f792e00bb67274966 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.586-0400 m31100| 2015-07-09T13:57:51.585-0400 I SHARDING [conn40] remotely refreshing metadata for db29.coll29 based on current shard version 2|0||559eb61eca4787b9985d1cb1, current metadata version is 2|0||559eb61eca4787b9985d1cb1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.587-0400 m31100| 2015-07-09T13:57:51.587-0400 I SHARDING [conn40] updating metadata for db29.coll29 from shard version 2|0||559eb61eca4787b9985d1cb1 to shard version 2|1||559eb61eca4787b9985d1cb1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.588-0400 m31100| 2015-07-09T13:57:51.587-0400 I SHARDING [conn40] collection version was loaded at version 2|1||559eb61eca4787b9985d1cb1, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.588-0400 m31100| 2015-07-09T13:57:51.587-0400 I SHARDING [conn40] splitChunk accepted at version 2|1||559eb61eca4787b9985d1cb1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.589-0400 m31100| 2015-07-09T13:57:51.588-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:51.588-0400-559eb61f792e00bb67274967", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436464671588), what: "split", ns: "db29.coll29", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559eb61eca4787b9985d1cb1') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559eb61eca4787b9985d1cb1') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.642-0400 m31100| 2015-07-09T13:57:51.642-0400 I SHARDING [conn40] distributed lock 'db29.coll29/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.645-0400 m30999| 2015-07-09T13:57:51.644-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db29.coll29: 0ms sequenceNumber: 133 version: 2|3||559eb61eca4787b9985d1cb1 based on: 2|1||559eb61eca4787b9985d1cb1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.645-0400 m31200| 2015-07-09T13:57:51.645-0400 I SHARDING [conn18] received splitChunk request: { splitChunk: "db29.coll29", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb61eca4787b9985d1cb1') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.650-0400 m31200| 2015-07-09T13:57:51.649-0400 I SHARDING [conn18] distributed lock 'db29.coll29/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb61fd5a107a5b9c0db23 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.650-0400 m31200| 2015-07-09T13:57:51.650-0400 I SHARDING [conn18] remotely refreshing metadata for db29.coll29 based on current shard version 0|0||559eb61eca4787b9985d1cb1, current metadata version is 1|1||559eb61eca4787b9985d1cb1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.652-0400 m31200| 2015-07-09T13:57:51.651-0400 I SHARDING [conn18] updating metadata for db29.coll29 from shard version 0|0||559eb61eca4787b9985d1cb1 to shard version 2|0||559eb61eca4787b9985d1cb1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.652-0400 m31200| 2015-07-09T13:57:51.652-0400 I SHARDING [conn18] collection version was loaded at version 2|3||559eb61eca4787b9985d1cb1, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.652-0400 m31200| 2015-07-09T13:57:51.652-0400 I SHARDING [conn18] splitChunk accepted at version 2|0||559eb61eca4787b9985d1cb1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.655-0400 m31200| 2015-07-09T13:57:51.654-0400 I SHARDING [conn18] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:57:51.654-0400-559eb61fd5a107a5b9c0db24", server: "bs-osx108-8", clientAddr: "127.0.0.1:62585", time: new Date(1436464671654), what: "split", ns: "db29.coll29", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559eb61eca4787b9985d1cb1') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559eb61eca4787b9985d1cb1') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.710-0400 m31200| 2015-07-09T13:57:51.709-0400 I SHARDING [conn18] distributed lock 'db29.coll29/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.712-0400 m30999| 2015-07-09T13:57:51.711-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db29.coll29: 0ms sequenceNumber: 134 version: 2|5||559eb61eca4787b9985d1cb1 based on: 2|3||559eb61eca4787b9985d1cb1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.860-0400 m31200| 2015-07-09T13:57:51.859-0400 I COMMAND [conn23] command db29.$cmd command: update { update: "coll29", updates: 109, ordered: false, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 2000|5, ObjectId('559eb61eca4787b9985d1cb1') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:3751 locks:{ Global: { acquireCount: { r: 218, w: 218 } }, Database: { acquireCount: { w: 218 } }, Collection: { acquireCount: { w: 109 } }, Metadata: { acquireCount: { w: 109 } }, oplog: { acquireCount: { w: 109 } } } protocol:op_command 118ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.871-0400 m31100| 2015-07-09T13:57:51.870-0400 I INDEX [conn52] build index on: db29.coll29 properties: { v: 1, key: { c: 1.0 }, name: "c_1", ns: "db29.coll29" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.871-0400 m31100| 2015-07-09T13:57:51.871-0400 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.880-0400 m31200| 2015-07-09T13:57:51.879-0400 I INDEX [conn32] build index on: db29.coll29 properties: { v: 1, key: { c: 1.0 }, name: "c_1", ns: "db29.coll29" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.881-0400 m31200| 2015-07-09T13:57:51.880-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.883-0400 m31100| 2015-07-09T13:57:51.883-0400 I INDEX [conn52] build index done. scanned 91 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.886-0400 m31200| 2015-07-09T13:57:51.886-0400 I INDEX [conn32] build index done. scanned 109 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.902-0400 m31202| 2015-07-09T13:57:51.901-0400 I INDEX [repl writer worker 2] build index on: db29.coll29 properties: { v: 1, key: { c: 1.0 }, name: "c_1", ns: "db29.coll29" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.902-0400 m31201| 2015-07-09T13:57:51.901-0400 I INDEX [repl writer worker 13] build index on: db29.coll29 properties: { v: 1, key: { c: 1.0 }, name: "c_1", ns: "db29.coll29" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.902-0400 m31202| 2015-07-09T13:57:51.901-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.903-0400 m31101| 2015-07-09T13:57:51.901-0400 I INDEX [repl writer worker 5] build index on: db29.coll29 properties: { v: 1, key: { c: 1.0 }, name: "c_1", ns: "db29.coll29" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.903-0400 m31201| 2015-07-09T13:57:51.901-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.904-0400 m31101| 2015-07-09T13:57:51.901-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.905-0400 m31102| 2015-07-09T13:57:51.904-0400 I INDEX [repl writer worker 0] build index on: db29.coll29 properties: { v: 1, key: { c: 1.0 }, name: "c_1", ns: "db29.coll29" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.905-0400 m31100| 2015-07-09T13:57:51.904-0400 I INDEX [conn52] build index on: db29.coll29 properties: { v: 1, key: { d: 1.0 }, name: "d_1", ns: "db29.coll29" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.905-0400 m31100| 2015-07-09T13:57:51.904-0400 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.905-0400 m31102| 2015-07-09T13:57:51.904-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.907-0400 m31200| 2015-07-09T13:57:51.907-0400 I INDEX [conn32] build index on: db29.coll29 properties: { v: 1, key: { d: 1.0 }, name: "d_1", ns: "db29.coll29" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.907-0400 m31200| 2015-07-09T13:57:51.907-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.914-0400 m31202| 2015-07-09T13:57:51.914-0400 I INDEX [repl writer worker 2] build index done. scanned 109 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.914-0400 m31201| 2015-07-09T13:57:51.914-0400 I INDEX [repl writer worker 13] build index done. scanned 109 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.918-0400 m31100| 2015-07-09T13:57:51.917-0400 I INDEX [conn52] build index done. scanned 91 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.918-0400 m31101| 2015-07-09T13:57:51.917-0400 I INDEX [repl writer worker 5] build index done. scanned 91 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.920-0400 m31102| 2015-07-09T13:57:51.920-0400 I INDEX [repl writer worker 0] build index done. scanned 91 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.924-0400 m31200| 2015-07-09T13:57:51.924-0400 I INDEX [conn32] build index done. scanned 109 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.926-0400 Using 5 threads (requested 5) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.937-0400 m31101| 2015-07-09T13:57:51.936-0400 I INDEX [repl writer worker 1] build index on: db29.coll29 properties: { v: 1, key: { d: 1.0 }, name: "d_1", ns: "db29.coll29" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.937-0400 m31101| 2015-07-09T13:57:51.936-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.963-0400 m31102| 2015-07-09T13:57:51.962-0400 I INDEX [repl writer worker 13] build index on: db29.coll29 properties: { v: 1, key: { d: 1.0 }, name: "d_1", ns: "db29.coll29" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.963-0400 m31102| 2015-07-09T13:57:51.962-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.968-0400 m31101| 2015-07-09T13:57:51.967-0400 I INDEX [repl writer worker 1] build index done. scanned 91 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.969-0400 m31202| 2015-07-09T13:57:51.968-0400 I INDEX [repl writer worker 11] build index on: db29.coll29 properties: { v: 1, key: { d: 1.0 }, name: "d_1", ns: "db29.coll29" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.969-0400 m31202| 2015-07-09T13:57:51.968-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.973-0400 m31201| 2015-07-09T13:57:51.973-0400 I INDEX [repl writer worker 1] build index on: db29.coll29 properties: { v: 1, key: { d: 1.0 }, name: "d_1", ns: "db29.coll29" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.973-0400 m31201| 2015-07-09T13:57:51.973-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.980-0400 m31201| 2015-07-09T13:57:51.980-0400 I INDEX [repl writer worker 1] build index done. scanned 109 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.994-0400 m31102| 2015-07-09T13:57:51.991-0400 I INDEX [repl writer worker 13] build index done. scanned 91 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:51.995-0400 m31202| 2015-07-09T13:57:51.993-0400 I INDEX [repl writer worker 11] build index done. scanned 109 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:52.009-0400 m30999| 2015-07-09T13:57:52.008-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63268 #187 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:52.009-0400 m30999| 2015-07-09T13:57:52.008-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63269 #188 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:52.009-0400 m30999| 2015-07-09T13:57:52.009-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63270 #189 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:52.010-0400 m30998| 2015-07-09T13:57:52.010-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63271 #187 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:52.014-0400 m30998| 2015-07-09T13:57:52.012-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63272 #188 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:52.024-0400 setting random seed: 240422342903 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:52.024-0400 setting random seed: 9770062528550 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:52.024-0400 setting random seed: 5440993811935 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:52.025-0400 setting random seed: 6783039774745 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:52.025-0400 setting random seed: 2549547236412 [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:52.028-0400 m30998| 2015-07-09T13:57:52.027-0400 I SHARDING [conn188] ChunkManager: time to load chunks for db29.coll29: 0ms sequenceNumber: 34 version: 2|5||559eb61eca4787b9985d1cb1 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:58.923-0400 m30999| 2015-07-09T13:57:58.921-0400 I NETWORK [conn187] end connection 127.0.0.1:63268 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:57:59.784-0400 m30999| 2015-07-09T13:57:59.784-0400 I NETWORK [conn189] end connection 127.0.0.1:63270 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:01.451-0400 m30998| 2015-07-09T13:58:01.451-0400 I NETWORK [conn187] end connection 127.0.0.1:63271 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:01.512-0400 m30998| 2015-07-09T13:58:01.512-0400 I NETWORK [conn188] end connection 127.0.0.1:63272 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:01.576-0400 m30999| 2015-07-09T13:58:01.575-0400 I NETWORK [conn188] end connection 127.0.0.1:63269 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:01.591-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:01.591-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:01.591-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:01.591-0400 jstests/concurrency/fsm_workloads/yield_rooted_or.js: Workload completed in 9649 ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:01.592-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:01.592-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:01.592-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:01.592-0400 m30999| 2015-07-09T13:58:01.591-0400 I COMMAND [conn1] DROP: db29.coll29 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:01.592-0400 m30999| 2015-07-09T13:58:01.592-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:01.592-0400-559eb629ca4787b9985d1cb3", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464681592), what: "dropCollection.start", ns: "db29.coll29", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:01.651-0400 m30999| 2015-07-09T13:58:01.651-0400 I SHARDING [conn1] distributed lock 'db29.coll29/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb629ca4787b9985d1cb4 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:01.653-0400 m31100| 2015-07-09T13:58:01.652-0400 I COMMAND [conn40] CMD: drop db29.coll29 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:01.656-0400 m31200| 2015-07-09T13:58:01.656-0400 I COMMAND [conn18] CMD: drop db29.coll29 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:01.657-0400 m31102| 2015-07-09T13:58:01.657-0400 I COMMAND [repl writer worker 5] CMD: drop db29.coll29 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:01.657-0400 m31101| 2015-07-09T13:58:01.657-0400 I COMMAND [repl writer worker 10] CMD: drop db29.coll29 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:01.663-0400 m31201| 2015-07-09T13:58:01.663-0400 I COMMAND [repl writer worker 7] CMD: drop db29.coll29 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:01.665-0400 m31202| 2015-07-09T13:58:01.665-0400 I COMMAND [repl writer worker 8] CMD: drop db29.coll29 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:01.717-0400 m31100| 2015-07-09T13:58:01.717-0400 I SHARDING [conn40] remotely refreshing metadata for db29.coll29 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559eb61eca4787b9985d1cb1, current metadata version is 2|3||559eb61eca4787b9985d1cb1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:01.719-0400 m31100| 2015-07-09T13:58:01.719-0400 W SHARDING [conn40] no chunks found when reloading db29.coll29, previous version was 0|0||559eb61eca4787b9985d1cb1, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:01.719-0400 m31100| 2015-07-09T13:58:01.719-0400 I SHARDING [conn40] dropping metadata for db29.coll29 at shard version 2|3||559eb61eca4787b9985d1cb1, took 2ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:01.721-0400 m31200| 2015-07-09T13:58:01.721-0400 I SHARDING [conn18] remotely refreshing metadata for db29.coll29 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559eb61eca4787b9985d1cb1, current metadata version is 2|5||559eb61eca4787b9985d1cb1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:01.723-0400 m31200| 2015-07-09T13:58:01.723-0400 W SHARDING [conn18] no chunks found when reloading db29.coll29, previous version was 0|0||559eb61eca4787b9985d1cb1, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:01.723-0400 m31200| 2015-07-09T13:58:01.723-0400 I SHARDING [conn18] dropping metadata for db29.coll29 at shard version 2|5||559eb61eca4787b9985d1cb1, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:01.725-0400 m30999| 2015-07-09T13:58:01.724-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:01.724-0400-559eb629ca4787b9985d1cb5", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464681724), what: "dropCollection", ns: "db29.coll29", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:01.779-0400 m30999| 2015-07-09T13:58:01.779-0400 I SHARDING [conn1] distributed lock 'db29.coll29/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:01.835-0400 m30999| 2015-07-09T13:58:01.835-0400 I COMMAND [conn1] DROP DATABASE: db29 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:01.836-0400 m30999| 2015-07-09T13:58:01.835-0400 I SHARDING [conn1] DBConfig::dropDatabase: db29 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:01.836-0400 m30999| 2015-07-09T13:58:01.835-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:01.835-0400-559eb629ca4787b9985d1cb6", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464681835), what: "dropDatabase.start", ns: "db29", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:01.944-0400 m30999| 2015-07-09T13:58:01.943-0400 I SHARDING [conn1] DBConfig::dropDatabase: db29 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:01.944-0400 m31100| 2015-07-09T13:58:01.944-0400 I COMMAND [conn28] dropDatabase db29 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:01.945-0400 m31100| 2015-07-09T13:58:01.944-0400 I COMMAND [conn28] dropDatabase db29 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:01.945-0400 m30999| 2015-07-09T13:58:01.945-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:01.945-0400-559eb629ca4787b9985d1cb7", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464681945), what: "dropDatabase", ns: "db29", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:01.946-0400 m31102| 2015-07-09T13:58:01.946-0400 I COMMAND [repl writer worker 14] dropDatabase db29 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:01.946-0400 m31102| 2015-07-09T13:58:01.946-0400 I COMMAND [repl writer worker 14] dropDatabase db29 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:01.947-0400 m31101| 2015-07-09T13:58:01.946-0400 I COMMAND [repl writer worker 3] dropDatabase db29 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:01.947-0400 m31101| 2015-07-09T13:58:01.946-0400 I COMMAND [repl writer worker 3] dropDatabase db29 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.048-0400 m31100| 2015-07-09T13:58:02.048-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.052-0400 m31102| 2015-07-09T13:58:02.052-0400 I COMMAND [repl writer worker 2] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.052-0400 m31101| 2015-07-09T13:58:02.052-0400 I COMMAND [repl writer worker 9] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.095-0400 m31200| 2015-07-09T13:58:02.094-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.097-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.098-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.098-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.098-0400 jstests/concurrency/fsm_workloads/collmod.js [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.098-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.098-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.098-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.099-0400 m31201| 2015-07-09T13:58:02.099-0400 I COMMAND [repl writer worker 6] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.102-0400 m31202| 2015-07-09T13:58:02.101-0400 I COMMAND [repl writer worker 9] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.105-0400 m30999| 2015-07-09T13:58:02.104-0400 I SHARDING [conn1] distributed lock 'db30/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb62aca4787b9985d1cb8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.109-0400 m30999| 2015-07-09T13:58:02.109-0400 I SHARDING [conn1] Placing [db30] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.109-0400 m30999| 2015-07-09T13:58:02.109-0400 I SHARDING [conn1] Enabling sharding for database [db30] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.165-0400 m30999| 2015-07-09T13:58:02.165-0400 I SHARDING [conn1] distributed lock 'db30/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.190-0400 m31100| 2015-07-09T13:58:02.190-0400 I INDEX [conn69] build index on: db30.coll30 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db30.coll30" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.190-0400 m31100| 2015-07-09T13:58:02.190-0400 I INDEX [conn69] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.200-0400 m31100| 2015-07-09T13:58:02.200-0400 I INDEX [conn69] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.204-0400 m30999| 2015-07-09T13:58:02.201-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db30.coll30", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.206-0400 m30999| 2015-07-09T13:58:02.205-0400 I SHARDING [conn1] distributed lock 'db30.coll30/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb62aca4787b9985d1cb9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.206-0400 m30999| 2015-07-09T13:58:02.206-0400 I SHARDING [conn1] enable sharding on: db30.coll30 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.207-0400 m30999| 2015-07-09T13:58:02.206-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:02.206-0400-559eb62aca4787b9985d1cba", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464682206), what: "shardCollection.start", ns: "db30.coll30", details: { shardKey: { _id: "hashed" }, collection: "db30.coll30", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.212-0400 m31102| 2015-07-09T13:58:02.211-0400 I INDEX [repl writer worker 4] build index on: db30.coll30 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db30.coll30" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.212-0400 m31102| 2015-07-09T13:58:02.211-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.221-0400 m31101| 2015-07-09T13:58:02.220-0400 I INDEX [repl writer worker 5] build index on: db30.coll30 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db30.coll30" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.221-0400 m31101| 2015-07-09T13:58:02.220-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.222-0400 m31102| 2015-07-09T13:58:02.221-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.225-0400 m31101| 2015-07-09T13:58:02.225-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.260-0400 m30999| 2015-07-09T13:58:02.259-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db30.coll30 using new epoch 559eb62aca4787b9985d1cbb [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.368-0400 m30999| 2015-07-09T13:58:02.367-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db30.coll30: 1ms sequenceNumber: 135 version: 1|1||559eb62aca4787b9985d1cbb based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.425-0400 m30999| 2015-07-09T13:58:02.424-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db30.coll30: 1ms sequenceNumber: 136 version: 1|1||559eb62aca4787b9985d1cbb based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.427-0400 m31100| 2015-07-09T13:58:02.426-0400 I SHARDING [conn52] remotely refreshing metadata for db30.coll30 with requested shard version 1|1||559eb62aca4787b9985d1cbb, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.429-0400 m31100| 2015-07-09T13:58:02.428-0400 I SHARDING [conn52] collection db30.coll30 was previously unsharded, new metadata loaded with shard version 1|1||559eb62aca4787b9985d1cbb [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.429-0400 m31100| 2015-07-09T13:58:02.428-0400 I SHARDING [conn52] collection version was loaded at version 1|1||559eb62aca4787b9985d1cbb, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.429-0400 m30999| 2015-07-09T13:58:02.429-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:02.429-0400-559eb62aca4787b9985d1cbc", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464682429), what: "shardCollection", ns: "db30.coll30", details: { version: "1|1||559eb62aca4787b9985d1cbb" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.484-0400 m30999| 2015-07-09T13:58:02.483-0400 I SHARDING [conn1] distributed lock 'db30.coll30/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.485-0400 m30999| 2015-07-09T13:58:02.485-0400 I SHARDING [conn1] moving chunk ns: db30.coll30 moving ( ns: db30.coll30, shard: test-rs0, lastmod: 1|1||000000000000000000000000, min: { _id: 0 }, max: { _id: MaxKey }) test-rs0 -> test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.486-0400 m31100| 2015-07-09T13:58:02.485-0400 I SHARDING [conn40] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.487-0400 m31100| 2015-07-09T13:58:02.486-0400 I SHARDING [conn40] received moveChunk request: { moveChunk: "db30.coll30", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb62aca4787b9985d1cbb') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.492-0400 m31100| 2015-07-09T13:58:02.492-0400 I SHARDING [conn40] distributed lock 'db30.coll30/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb62a792e00bb67274969 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.493-0400 m31100| 2015-07-09T13:58:02.492-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:02.492-0400-559eb62a792e00bb6727496a", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436464682492), what: "moveChunk.start", ns: "db30.coll30", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.546-0400 m31100| 2015-07-09T13:58:02.545-0400 I SHARDING [conn40] remotely refreshing metadata for db30.coll30 based on current shard version 1|1||559eb62aca4787b9985d1cbb, current metadata version is 1|1||559eb62aca4787b9985d1cbb [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.548-0400 m31100| 2015-07-09T13:58:02.547-0400 I SHARDING [conn40] metadata of collection db30.coll30 already up to date (shard version : 1|1||559eb62aca4787b9985d1cbb, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.548-0400 m31100| 2015-07-09T13:58:02.547-0400 I SHARDING [conn40] moveChunk request accepted at version 1|1||559eb62aca4787b9985d1cbb [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.548-0400 m31100| 2015-07-09T13:58:02.548-0400 I SHARDING [conn40] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.549-0400 m31200| 2015-07-09T13:58:02.548-0400 I SHARDING [conn16] remotely refreshing metadata for db30.coll30, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.550-0400 m31200| 2015-07-09T13:58:02.550-0400 I SHARDING [conn16] collection db30.coll30 was previously unsharded, new metadata loaded with shard version 0|0||559eb62aca4787b9985d1cbb [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.550-0400 m31200| 2015-07-09T13:58:02.550-0400 I SHARDING [conn16] collection version was loaded at version 1|1||559eb62aca4787b9985d1cbb, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.551-0400 m31200| 2015-07-09T13:58:02.550-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: 0 } -> { _id: MaxKey } for collection db30.coll30 from test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 at epoch 559eb62aca4787b9985d1cbb [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.552-0400 m31100| 2015-07-09T13:58:02.552-0400 I SHARDING [conn40] moveChunk data transfer progress: { active: true, ns: "db30.coll30", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.556-0400 m31100| 2015-07-09T13:58:02.555-0400 I SHARDING [conn40] moveChunk data transfer progress: { active: true, ns: "db30.coll30", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.562-0400 m31100| 2015-07-09T13:58:02.561-0400 I SHARDING [conn40] moveChunk data transfer progress: { active: true, ns: "db30.coll30", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.571-0400 m31100| 2015-07-09T13:58:02.571-0400 I SHARDING [conn40] moveChunk data transfer progress: { active: true, ns: "db30.coll30", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.573-0400 m31200| 2015-07-09T13:58:02.573-0400 I INDEX [migrateThread] build index on: db30.coll30 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db30.coll30" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.574-0400 m31200| 2015-07-09T13:58:02.573-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.585-0400 m31200| 2015-07-09T13:58:02.584-0400 I INDEX [migrateThread] build index on: db30.coll30 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db30.coll30" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.585-0400 m31200| 2015-07-09T13:58:02.585-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.589-0400 m31100| 2015-07-09T13:58:02.588-0400 I SHARDING [conn40] moveChunk data transfer progress: { active: true, ns: "db30.coll30", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.596-0400 m31200| 2015-07-09T13:58:02.596-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.597-0400 m31200| 2015-07-09T13:58:02.597-0400 I SHARDING [migrateThread] Deleter starting delete for: db30.coll30 from { _id: 0 } -> { _id: MaxKey }, with opId: 53860 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.602-0400 m31200| 2015-07-09T13:58:02.601-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db30.coll30 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.609-0400 m31202| 2015-07-09T13:58:02.609-0400 I INDEX [repl writer worker 1] build index on: db30.coll30 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db30.coll30" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.609-0400 m31202| 2015-07-09T13:58:02.609-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.612-0400 m31201| 2015-07-09T13:58:02.612-0400 I INDEX [repl writer worker 9] build index on: db30.coll30 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db30.coll30" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.613-0400 m31201| 2015-07-09T13:58:02.612-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.621-0400 m31202| 2015-07-09T13:58:02.620-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.623-0400 m31100| 2015-07-09T13:58:02.622-0400 I SHARDING [conn40] moveChunk data transfer progress: { active: true, ns: "db30.coll30", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.623-0400 m31201| 2015-07-09T13:58:02.622-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.623-0400 m31200| 2015-07-09T13:58:02.623-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.624-0400 m31200| 2015-07-09T13:58:02.623-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db30.coll30' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.689-0400 m31100| 2015-07-09T13:58:02.688-0400 I SHARDING [conn40] moveChunk data transfer progress: { active: true, ns: "db30.coll30", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.689-0400 m31100| 2015-07-09T13:58:02.688-0400 I SHARDING [conn40] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.690-0400 m31100| 2015-07-09T13:58:02.689-0400 I SHARDING [conn40] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.690-0400 m31100| 2015-07-09T13:58:02.690-0400 I SHARDING [conn40] moveChunk setting version to: 2|0||559eb62aca4787b9985d1cbb [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.695-0400 m31200| 2015-07-09T13:58:02.695-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db30.coll30' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.696-0400 m31200| 2015-07-09T13:58:02.695-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:02.695-0400-559eb62ad5a107a5b9c0db25", server: "bs-osx108-8", clientAddr: "", time: new Date(1436464682695), what: "moveChunk.to", ns: "db30.coll30", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 5: 46, step 2 of 5: 25, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 71, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.750-0400 m31100| 2015-07-09T13:58:02.749-0400 I SHARDING [conn40] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db30.coll30", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.750-0400 m31100| 2015-07-09T13:58:02.749-0400 I SHARDING [conn40] moveChunk updating self version to: 2|1||559eb62aca4787b9985d1cbb through { _id: MinKey } -> { _id: 0 } for collection 'db30.coll30' [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.752-0400 m31100| 2015-07-09T13:58:02.751-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:02.751-0400-559eb62a792e00bb6727496b", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436464682751), what: "moveChunk.commit", ns: "db30.coll30", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.806-0400 m31100| 2015-07-09T13:58:02.805-0400 I SHARDING [conn40] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.806-0400 m31100| 2015-07-09T13:58:02.805-0400 I SHARDING [conn40] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.806-0400 m31100| 2015-07-09T13:58:02.805-0400 I SHARDING [conn40] Deleter starting delete for: db30.coll30 from { _id: 0 } -> { _id: MaxKey }, with opId: 47352 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.806-0400 m31100| 2015-07-09T13:58:02.805-0400 I SHARDING [conn40] rangeDeleter deleted 0 documents for db30.coll30 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.806-0400 m31100| 2015-07-09T13:58:02.805-0400 I SHARDING [conn40] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.807-0400 m31100| 2015-07-09T13:58:02.806-0400 I SHARDING [conn40] distributed lock 'db30.coll30/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.807-0400 m31100| 2015-07-09T13:58:02.807-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:02.806-0400-559eb62a792e00bb6727496c", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436464682806), what: "moveChunk.from", ns: "db30.coll30", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 6: 0, step 2 of 6: 61, step 3 of 6: 2, step 4 of 6: 138, step 5 of 6: 116, step 6 of 6: 0, to: "test-rs1", from: "test-rs0", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.861-0400 m31100| 2015-07-09T13:58:02.860-0400 I COMMAND [conn40] command db30.coll30 command: moveChunk { moveChunk: "db30.coll30", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb62aca4787b9985d1cbb') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 375ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.864-0400 m30999| 2015-07-09T13:58:02.864-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db30.coll30: 1ms sequenceNumber: 137 version: 2|1||559eb62aca4787b9985d1cbb based on: 1|1||559eb62aca4787b9985d1cbb [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.865-0400 m31100| 2015-07-09T13:58:02.865-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db30.coll30", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb62aca4787b9985d1cbb') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.869-0400 m31100| 2015-07-09T13:58:02.869-0400 I SHARDING [conn40] distributed lock 'db30.coll30/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb62a792e00bb6727496d [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.870-0400 m31100| 2015-07-09T13:58:02.869-0400 I SHARDING [conn40] remotely refreshing metadata for db30.coll30 based on current shard version 2|0||559eb62aca4787b9985d1cbb, current metadata version is 2|0||559eb62aca4787b9985d1cbb [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.871-0400 m31100| 2015-07-09T13:58:02.870-0400 I SHARDING [conn40] updating metadata for db30.coll30 from shard version 2|0||559eb62aca4787b9985d1cbb to shard version 2|1||559eb62aca4787b9985d1cbb [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.871-0400 m31100| 2015-07-09T13:58:02.870-0400 I SHARDING [conn40] collection version was loaded at version 2|1||559eb62aca4787b9985d1cbb, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.871-0400 m31100| 2015-07-09T13:58:02.870-0400 I SHARDING [conn40] splitChunk accepted at version 2|1||559eb62aca4787b9985d1cbb [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.873-0400 m31100| 2015-07-09T13:58:02.872-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:02.872-0400-559eb62a792e00bb6727496e", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436464682872), what: "split", ns: "db30.coll30", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559eb62aca4787b9985d1cbb') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559eb62aca4787b9985d1cbb') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.927-0400 m31100| 2015-07-09T13:58:02.927-0400 I SHARDING [conn40] distributed lock 'db30.coll30/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.930-0400 m30999| 2015-07-09T13:58:02.929-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db30.coll30: 0ms sequenceNumber: 138 version: 2|3||559eb62aca4787b9985d1cbb based on: 2|1||559eb62aca4787b9985d1cbb [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.930-0400 m31200| 2015-07-09T13:58:02.930-0400 I SHARDING [conn18] received splitChunk request: { splitChunk: "db30.coll30", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb62aca4787b9985d1cbb') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.934-0400 m31200| 2015-07-09T13:58:02.934-0400 I SHARDING [conn18] distributed lock 'db30.coll30/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb62ad5a107a5b9c0db26 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.935-0400 m31200| 2015-07-09T13:58:02.934-0400 I SHARDING [conn18] remotely refreshing metadata for db30.coll30 based on current shard version 0|0||559eb62aca4787b9985d1cbb, current metadata version is 1|1||559eb62aca4787b9985d1cbb [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.935-0400 m31200| 2015-07-09T13:58:02.935-0400 I SHARDING [conn18] updating metadata for db30.coll30 from shard version 0|0||559eb62aca4787b9985d1cbb to shard version 2|0||559eb62aca4787b9985d1cbb [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.935-0400 m31200| 2015-07-09T13:58:02.935-0400 I SHARDING [conn18] collection version was loaded at version 2|3||559eb62aca4787b9985d1cbb, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.935-0400 m31200| 2015-07-09T13:58:02.935-0400 I SHARDING [conn18] splitChunk accepted at version 2|0||559eb62aca4787b9985d1cbb [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.938-0400 m31200| 2015-07-09T13:58:02.937-0400 I SHARDING [conn18] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:02.937-0400-559eb62ad5a107a5b9c0db27", server: "bs-osx108-8", clientAddr: "127.0.0.1:62585", time: new Date(1436464682937), what: "split", ns: "db30.coll30", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559eb62aca4787b9985d1cbb') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559eb62aca4787b9985d1cbb') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.992-0400 m31200| 2015-07-09T13:58:02.992-0400 I SHARDING [conn18] distributed lock 'db30.coll30/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:02.994-0400 m30999| 2015-07-09T13:58:02.994-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db30.coll30: 0ms sequenceNumber: 139 version: 2|5||559eb62aca4787b9985d1cbb based on: 2|3||559eb62aca4787b9985d1cbb [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.219-0400 m31100| 2015-07-09T13:58:03.218-0400 I COMMAND [conn69] command db30.$cmd command: insert { insert: "coll30", documents: 501, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 2000|3, ObjectId('559eb62aca4787b9985d1cbb') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 510, w: 510 } }, Database: { acquireCount: { w: 510 } }, Collection: { acquireCount: { w: 9 } }, Metadata: { acquireCount: { w: 501 } }, oplog: { acquireCount: { w: 501 } } } protocol:op_command 179ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.281-0400 m31200| 2015-07-09T13:58:03.278-0400 I COMMAND [conn23] command db30.$cmd command: insert { insert: "coll30", documents: 499, ordered: false, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 2000|5, ObjectId('559eb62aca4787b9985d1cbb') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 510, w: 510 } }, Database: { acquireCount: { w: 510 } }, Collection: { acquireCount: { w: 11 } }, Metadata: { acquireCount: { w: 499 } }, oplog: { acquireCount: { w: 499 } } } protocol:op_command 238ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.288-0400 m31100| 2015-07-09T13:58:03.287-0400 I INDEX [conn52] build index on: db30.coll30 properties: { v: 1, key: { createdAt: 1.0 }, name: "createdAt_1", ns: "db30.coll30", expireAfterSeconds: 3600.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.288-0400 m31100| 2015-07-09T13:58:03.288-0400 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.292-0400 m31200| 2015-07-09T13:58:03.291-0400 I INDEX [conn32] build index on: db30.coll30 properties: { v: 1, key: { createdAt: 1.0 }, name: "createdAt_1", ns: "db30.coll30", expireAfterSeconds: 3600.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.292-0400 m31200| 2015-07-09T13:58:03.291-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.303-0400 m31100| 2015-07-09T13:58:03.302-0400 I INDEX [conn52] build index done. scanned 501 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.304-0400 m31200| 2015-07-09T13:58:03.302-0400 I INDEX [conn32] build index done. scanned 499 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.304-0400 Using 10 threads (requested 10) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.340-0400 m31202| 2015-07-09T13:58:03.339-0400 I INDEX [repl writer worker 3] build index on: db30.coll30 properties: { v: 1, key: { createdAt: 1.0 }, name: "createdAt_1", ns: "db30.coll30", expireAfterSeconds: 3600.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.340-0400 m31202| 2015-07-09T13:58:03.339-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.344-0400 m31102| 2015-07-09T13:58:03.343-0400 I INDEX [repl writer worker 8] build index on: db30.coll30 properties: { v: 1, key: { createdAt: 1.0 }, name: "createdAt_1", ns: "db30.coll30", expireAfterSeconds: 3600.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.344-0400 m31102| 2015-07-09T13:58:03.343-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.356-0400 m31201| 2015-07-09T13:58:03.355-0400 I INDEX [repl writer worker 15] build index on: db30.coll30 properties: { v: 1, key: { createdAt: 1.0 }, name: "createdAt_1", ns: "db30.coll30", expireAfterSeconds: 3600.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.356-0400 m31201| 2015-07-09T13:58:03.355-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.376-0400 m31101| 2015-07-09T13:58:03.372-0400 I INDEX [repl writer worker 14] build index on: db30.coll30 properties: { v: 1, key: { createdAt: 1.0 }, name: "createdAt_1", ns: "db30.coll30", expireAfterSeconds: 3600.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.376-0400 m31101| 2015-07-09T13:58:03.372-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.391-0400 m31102| 2015-07-09T13:58:03.390-0400 I INDEX [repl writer worker 8] build index done. scanned 501 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.392-0400 m31202| 2015-07-09T13:58:03.392-0400 I INDEX [repl writer worker 3] build index done. scanned 499 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.418-0400 m31201| 2015-07-09T13:58:03.417-0400 I INDEX [repl writer worker 15] build index done. scanned 499 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.427-0400 m30999| 2015-07-09T13:58:03.420-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63275 #190 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.427-0400 m31101| 2015-07-09T13:58:03.426-0400 I INDEX [repl writer worker 14] build index done. scanned 501 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.443-0400 m30998| 2015-07-09T13:58:03.443-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63276 #189 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.451-0400 m30998| 2015-07-09T13:58:03.451-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63277 #190 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.458-0400 m30999| 2015-07-09T13:58:03.457-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63278 #191 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.458-0400 m30998| 2015-07-09T13:58:03.457-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63279 #191 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.460-0400 m30999| 2015-07-09T13:58:03.459-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63281 #192 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.466-0400 m30998| 2015-07-09T13:58:03.460-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63280 #192 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.466-0400 m30999| 2015-07-09T13:58:03.460-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63282 #193 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.466-0400 m30999| 2015-07-09T13:58:03.460-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63283 #194 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.467-0400 m30998| 2015-07-09T13:58:03.466-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63284 #193 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.474-0400 setting random seed: 2445528348907 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.474-0400 setting random seed: 778319067321 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.474-0400 setting random seed: 5726711344905 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.474-0400 setting random seed: 1208570566959 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.474-0400 setting random seed: 8837618636898 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.475-0400 m30998| 2015-07-09T13:58:03.475-0400 I SHARDING [conn190] ChunkManager: time to load chunks for db30.coll30: 0ms sequenceNumber: 35 version: 2|5||559eb62aca4787b9985d1cbb based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.475-0400 setting random seed: 2987581919878 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.478-0400 setting random seed: 9615977071225 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.479-0400 setting random seed: 6187480664812 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.479-0400 setting random seed: 1994979232549 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.479-0400 setting random seed: 3856499521061 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.691-0400 m30998| 2015-07-09T13:58:03.691-0400 I NETWORK [conn190] end connection 127.0.0.1:63277 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.693-0400 m30999| 2015-07-09T13:58:03.692-0400 I NETWORK [conn193] end connection 127.0.0.1:63282 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.706-0400 m30998| 2015-07-09T13:58:03.701-0400 I NETWORK [conn189] end connection 127.0.0.1:63276 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.706-0400 m30999| 2015-07-09T13:58:03.702-0400 I NETWORK [conn190] end connection 127.0.0.1:63275 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.707-0400 m30998| 2015-07-09T13:58:03.706-0400 I NETWORK [conn191] end connection 127.0.0.1:63279 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.707-0400 m30999| 2015-07-09T13:58:03.707-0400 I NETWORK [conn191] end connection 127.0.0.1:63278 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.714-0400 m30999| 2015-07-09T13:58:03.710-0400 I NETWORK [conn194] end connection 127.0.0.1:63283 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.723-0400 m30999| 2015-07-09T13:58:03.723-0400 I NETWORK [conn192] end connection 127.0.0.1:63281 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.745-0400 m30998| 2015-07-09T13:58:03.738-0400 I NETWORK [conn192] end connection 127.0.0.1:63280 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.745-0400 m30998| 2015-07-09T13:58:03.740-0400 I NETWORK [conn193] end connection 127.0.0.1:63284 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.762-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.762-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.763-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.763-0400 jstests/concurrency/fsm_workloads/collmod.js: Workload completed in 459 ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.763-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.763-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.763-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.763-0400 m30999| 2015-07-09T13:58:03.762-0400 I COMMAND [conn1] DROP: db30.coll30 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.763-0400 m30999| 2015-07-09T13:58:03.762-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:03.762-0400-559eb62bca4787b9985d1cbd", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464683762), what: "dropCollection.start", ns: "db30.coll30", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.817-0400 m30999| 2015-07-09T13:58:03.816-0400 I SHARDING [conn1] distributed lock 'db30.coll30/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb62bca4787b9985d1cbe [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.817-0400 m31100| 2015-07-09T13:58:03.817-0400 I COMMAND [conn37] CMD: drop db30.coll30 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.820-0400 m31200| 2015-07-09T13:58:03.820-0400 I COMMAND [conn84] CMD: drop db30.coll30 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.827-0400 m31201| 2015-07-09T13:58:03.826-0400 I COMMAND [repl writer worker 12] CMD: drop db30.coll30 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.837-0400 m31101| 2015-07-09T13:58:03.837-0400 I COMMAND [repl writer worker 10] CMD: drop db30.coll30 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.843-0400 m31102| 2015-07-09T13:58:03.842-0400 I COMMAND [repl writer worker 7] CMD: drop db30.coll30 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.846-0400 m31202| 2015-07-09T13:58:03.846-0400 I COMMAND [repl writer worker 7] CMD: drop db30.coll30 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.880-0400 m31100| 2015-07-09T13:58:03.879-0400 I SHARDING [conn37] remotely refreshing metadata for db30.coll30 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559eb62aca4787b9985d1cbb, current metadata version is 2|3||559eb62aca4787b9985d1cbb [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.882-0400 m31100| 2015-07-09T13:58:03.881-0400 W SHARDING [conn37] no chunks found when reloading db30.coll30, previous version was 0|0||559eb62aca4787b9985d1cbb, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.882-0400 m31100| 2015-07-09T13:58:03.881-0400 I SHARDING [conn37] dropping metadata for db30.coll30 at shard version 2|3||559eb62aca4787b9985d1cbb, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.884-0400 m31200| 2015-07-09T13:58:03.883-0400 I SHARDING [conn84] remotely refreshing metadata for db30.coll30 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559eb62aca4787b9985d1cbb, current metadata version is 2|5||559eb62aca4787b9985d1cbb [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.886-0400 m31200| 2015-07-09T13:58:03.886-0400 W SHARDING [conn84] no chunks found when reloading db30.coll30, previous version was 0|0||559eb62aca4787b9985d1cbb, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.887-0400 m31200| 2015-07-09T13:58:03.886-0400 I SHARDING [conn84] dropping metadata for db30.coll30 at shard version 2|5||559eb62aca4787b9985d1cbb, took 2ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.888-0400 m30999| 2015-07-09T13:58:03.887-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:03.887-0400-559eb62bca4787b9985d1cbf", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464683887), what: "dropCollection", ns: "db30.coll30", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.942-0400 m30999| 2015-07-09T13:58:03.942-0400 I SHARDING [conn1] distributed lock 'db30.coll30/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.999-0400 m30999| 2015-07-09T13:58:03.998-0400 I COMMAND [conn1] DROP DATABASE: db30 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.999-0400 m30999| 2015-07-09T13:58:03.999-0400 I SHARDING [conn1] DBConfig::dropDatabase: db30 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:03.999-0400 m30999| 2015-07-09T13:58:03.999-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:03.999-0400-559eb62bca4787b9985d1cc0", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464683999), what: "dropDatabase.start", ns: "db30", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.107-0400 m30999| 2015-07-09T13:58:04.106-0400 I SHARDING [conn1] DBConfig::dropDatabase: db30 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.107-0400 m31100| 2015-07-09T13:58:04.107-0400 I COMMAND [conn28] dropDatabase db30 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.108-0400 m31100| 2015-07-09T13:58:04.107-0400 I COMMAND [conn28] dropDatabase db30 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.109-0400 m30999| 2015-07-09T13:58:04.108-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:04.108-0400-559eb62cca4787b9985d1cc1", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464684108), what: "dropDatabase", ns: "db30", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.109-0400 m31102| 2015-07-09T13:58:04.109-0400 I COMMAND [repl writer worker 9] dropDatabase db30 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.110-0400 m31102| 2015-07-09T13:58:04.109-0400 I COMMAND [repl writer worker 9] dropDatabase db30 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.110-0400 m31101| 2015-07-09T13:58:04.109-0400 I COMMAND [repl writer worker 15] dropDatabase db30 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.110-0400 m31101| 2015-07-09T13:58:04.109-0400 I COMMAND [repl writer worker 15] dropDatabase db30 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.197-0400 m31100| 2015-07-09T13:58:04.197-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.201-0400 m31101| 2015-07-09T13:58:04.201-0400 I COMMAND [repl writer worker 12] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.201-0400 m31102| 2015-07-09T13:58:04.201-0400 I COMMAND [repl writer worker 13] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.232-0400 m31200| 2015-07-09T13:58:04.232-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.235-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.235-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.235-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.236-0400 jstests/concurrency/fsm_workloads/yield_id_hack.js [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.236-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.236-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.236-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.236-0400 m31201| 2015-07-09T13:58:04.236-0400 I COMMAND [repl writer worker 5] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.243-0400 m31202| 2015-07-09T13:58:04.242-0400 I COMMAND [repl writer worker 12] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.243-0400 m30999| 2015-07-09T13:58:04.243-0400 I SHARDING [conn1] distributed lock 'db31/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb62cca4787b9985d1cc2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.247-0400 m30999| 2015-07-09T13:58:04.247-0400 I SHARDING [conn1] Placing [db31] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.247-0400 m30999| 2015-07-09T13:58:04.247-0400 I SHARDING [conn1] Enabling sharding for database [db31] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.301-0400 m30999| 2015-07-09T13:58:04.300-0400 I SHARDING [conn1] distributed lock 'db31/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.322-0400 m31100| 2015-07-09T13:58:04.320-0400 I INDEX [conn69] build index on: db31.coll31 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db31.coll31" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.322-0400 m31100| 2015-07-09T13:58:04.320-0400 I INDEX [conn69] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.330-0400 m31100| 2015-07-09T13:58:04.330-0400 I INDEX [conn69] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.332-0400 m30999| 2015-07-09T13:58:04.331-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db31.coll31", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.336-0400 m30999| 2015-07-09T13:58:04.336-0400 I SHARDING [conn1] distributed lock 'db31.coll31/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb62cca4787b9985d1cc3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.337-0400 m30999| 2015-07-09T13:58:04.336-0400 I SHARDING [conn1] enable sharding on: db31.coll31 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.338-0400 m30999| 2015-07-09T13:58:04.337-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:04.337-0400-559eb62cca4787b9985d1cc4", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464684337), what: "shardCollection.start", ns: "db31.coll31", details: { shardKey: { _id: "hashed" }, collection: "db31.coll31", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.342-0400 m31102| 2015-07-09T13:58:04.341-0400 I INDEX [repl writer worker 2] build index on: db31.coll31 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db31.coll31" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.342-0400 m31102| 2015-07-09T13:58:04.341-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.345-0400 m31101| 2015-07-09T13:58:04.345-0400 I INDEX [repl writer worker 8] build index on: db31.coll31 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db31.coll31" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.346-0400 m31101| 2015-07-09T13:58:04.345-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.354-0400 m31101| 2015-07-09T13:58:04.354-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.355-0400 m31102| 2015-07-09T13:58:04.354-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.390-0400 m30999| 2015-07-09T13:58:04.390-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db31.coll31 using new epoch 559eb62cca4787b9985d1cc5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.481-0400 m30999| 2015-07-09T13:58:04.481-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T13:58:04.475-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30999:1436464534:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.482-0400 m30999| 2015-07-09T13:58:04.481-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db31.coll31: 0ms sequenceNumber: 140 version: 1|1||559eb62cca4787b9985d1cc5 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.537-0400 m30999| 2015-07-09T13:58:04.537-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db31.coll31: 1ms sequenceNumber: 141 version: 1|1||559eb62cca4787b9985d1cc5 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.539-0400 m31100| 2015-07-09T13:58:04.539-0400 I SHARDING [conn52] remotely refreshing metadata for db31.coll31 with requested shard version 1|1||559eb62cca4787b9985d1cc5, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.541-0400 m31100| 2015-07-09T13:58:04.540-0400 I SHARDING [conn52] collection db31.coll31 was previously unsharded, new metadata loaded with shard version 1|1||559eb62cca4787b9985d1cc5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.541-0400 m31100| 2015-07-09T13:58:04.540-0400 I SHARDING [conn52] collection version was loaded at version 1|1||559eb62cca4787b9985d1cc5, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.541-0400 m30999| 2015-07-09T13:58:04.541-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:04.541-0400-559eb62cca4787b9985d1cc6", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464684541), what: "shardCollection", ns: "db31.coll31", details: { version: "1|1||559eb62cca4787b9985d1cc5" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.595-0400 m30999| 2015-07-09T13:58:04.595-0400 I SHARDING [conn1] distributed lock 'db31.coll31/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.597-0400 m30999| 2015-07-09T13:58:04.596-0400 I SHARDING [conn1] moving chunk ns: db31.coll31 moving ( ns: db31.coll31, shard: test-rs0, lastmod: 1|1||000000000000000000000000, min: { _id: 0 }, max: { _id: MaxKey }) test-rs0 -> test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.597-0400 m31100| 2015-07-09T13:58:04.597-0400 I SHARDING [conn37] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.598-0400 m31100| 2015-07-09T13:58:04.598-0400 I SHARDING [conn37] received moveChunk request: { moveChunk: "db31.coll31", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb62cca4787b9985d1cc5') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.603-0400 m31100| 2015-07-09T13:58:04.602-0400 I SHARDING [conn37] distributed lock 'db31.coll31/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb62c792e00bb67274970 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.603-0400 m31100| 2015-07-09T13:58:04.603-0400 I SHARDING [conn37] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:04.603-0400-559eb62c792e00bb67274971", server: "bs-osx108-8", clientAddr: "127.0.0.1:62639", time: new Date(1436464684603), what: "moveChunk.start", ns: "db31.coll31", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.656-0400 m31100| 2015-07-09T13:58:04.656-0400 I SHARDING [conn37] remotely refreshing metadata for db31.coll31 based on current shard version 1|1||559eb62cca4787b9985d1cc5, current metadata version is 1|1||559eb62cca4787b9985d1cc5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.658-0400 m31100| 2015-07-09T13:58:04.657-0400 I SHARDING [conn37] metadata of collection db31.coll31 already up to date (shard version : 1|1||559eb62cca4787b9985d1cc5, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.658-0400 m31100| 2015-07-09T13:58:04.658-0400 I SHARDING [conn37] moveChunk request accepted at version 1|1||559eb62cca4787b9985d1cc5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.659-0400 m31100| 2015-07-09T13:58:04.659-0400 I SHARDING [conn37] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.660-0400 m31200| 2015-07-09T13:58:04.659-0400 I SHARDING [conn16] remotely refreshing metadata for db31.coll31, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.661-0400 m31200| 2015-07-09T13:58:04.661-0400 I SHARDING [conn16] collection db31.coll31 was previously unsharded, new metadata loaded with shard version 0|0||559eb62cca4787b9985d1cc5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.661-0400 m31200| 2015-07-09T13:58:04.661-0400 I SHARDING [conn16] collection version was loaded at version 1|1||559eb62cca4787b9985d1cc5, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.662-0400 m31200| 2015-07-09T13:58:04.661-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: 0 } -> { _id: MaxKey } for collection db31.coll31 from test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 at epoch 559eb62cca4787b9985d1cc5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.664-0400 m31100| 2015-07-09T13:58:04.663-0400 I SHARDING [conn37] moveChunk data transfer progress: { active: true, ns: "db31.coll31", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.667-0400 m31100| 2015-07-09T13:58:04.667-0400 I SHARDING [conn37] moveChunk data transfer progress: { active: true, ns: "db31.coll31", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.673-0400 m31100| 2015-07-09T13:58:04.672-0400 I SHARDING [conn37] moveChunk data transfer progress: { active: true, ns: "db31.coll31", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.682-0400 m31100| 2015-07-09T13:58:04.682-0400 I SHARDING [conn37] moveChunk data transfer progress: { active: true, ns: "db31.coll31", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.687-0400 m31200| 2015-07-09T13:58:04.686-0400 I INDEX [migrateThread] build index on: db31.coll31 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db31.coll31" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.687-0400 m31200| 2015-07-09T13:58:04.687-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.693-0400 m31200| 2015-07-09T13:58:04.693-0400 I INDEX [migrateThread] build index on: db31.coll31 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db31.coll31" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.694-0400 m31200| 2015-07-09T13:58:04.693-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.702-0400 m31100| 2015-07-09T13:58:04.700-0400 I SHARDING [conn37] moveChunk data transfer progress: { active: true, ns: "db31.coll31", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.708-0400 m31200| 2015-07-09T13:58:04.707-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.709-0400 m31200| 2015-07-09T13:58:04.709-0400 I SHARDING [migrateThread] Deleter starting delete for: db31.coll31 from { _id: 0 } -> { _id: MaxKey }, with opId: 54880 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.712-0400 m31200| 2015-07-09T13:58:04.711-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db31.coll31 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.716-0400 m31201| 2015-07-09T13:58:04.715-0400 I INDEX [repl writer worker 2] build index on: db31.coll31 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db31.coll31" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.716-0400 m31201| 2015-07-09T13:58:04.715-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.724-0400 m31201| 2015-07-09T13:58:04.724-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.727-0400 m31200| 2015-07-09T13:58:04.726-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.727-0400 m31200| 2015-07-09T13:58:04.726-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db31.coll31' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.727-0400 m31202| 2015-07-09T13:58:04.727-0400 I INDEX [repl writer worker 14] build index on: db31.coll31 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db31.coll31" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.728-0400 m31202| 2015-07-09T13:58:04.727-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.732-0400 m31202| 2015-07-09T13:58:04.732-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.733-0400 m31100| 2015-07-09T13:58:04.733-0400 I SHARDING [conn37] moveChunk data transfer progress: { active: true, ns: "db31.coll31", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.734-0400 m31100| 2015-07-09T13:58:04.733-0400 I SHARDING [conn37] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.734-0400 m31100| 2015-07-09T13:58:04.734-0400 I SHARDING [conn37] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.734-0400 m31100| 2015-07-09T13:58:04.734-0400 I SHARDING [conn37] moveChunk setting version to: 2|0||559eb62cca4787b9985d1cc5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.738-0400 m31200| 2015-07-09T13:58:04.738-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db31.coll31' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.739-0400 m31200| 2015-07-09T13:58:04.738-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:04.738-0400-559eb62cd5a107a5b9c0db28", server: "bs-osx108-8", clientAddr: "", time: new Date(1436464684738), what: "moveChunk.to", ns: "db31.coll31", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 5: 47, step 2 of 5: 16, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 11, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.793-0400 m31100| 2015-07-09T13:58:04.792-0400 I SHARDING [conn37] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db31.coll31", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.793-0400 m31100| 2015-07-09T13:58:04.792-0400 I SHARDING [conn37] moveChunk updating self version to: 2|1||559eb62cca4787b9985d1cc5 through { _id: MinKey } -> { _id: 0 } for collection 'db31.coll31' [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.794-0400 m31100| 2015-07-09T13:58:04.793-0400 I SHARDING [conn37] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:04.793-0400-559eb62c792e00bb67274972", server: "bs-osx108-8", clientAddr: "127.0.0.1:62639", time: new Date(1436464684793), what: "moveChunk.commit", ns: "db31.coll31", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.848-0400 m31100| 2015-07-09T13:58:04.847-0400 I SHARDING [conn37] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.848-0400 m31100| 2015-07-09T13:58:04.848-0400 I SHARDING [conn37] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.848-0400 m31100| 2015-07-09T13:58:04.848-0400 I SHARDING [conn37] Deleter starting delete for: db31.coll31 from { _id: 0 } -> { _id: MaxKey }, with opId: 48350 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.849-0400 m31100| 2015-07-09T13:58:04.848-0400 I SHARDING [conn37] rangeDeleter deleted 0 documents for db31.coll31 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.849-0400 m31100| 2015-07-09T13:58:04.848-0400 I SHARDING [conn37] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.850-0400 m31100| 2015-07-09T13:58:04.849-0400 I SHARDING [conn37] distributed lock 'db31.coll31/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.850-0400 m31100| 2015-07-09T13:58:04.849-0400 I SHARDING [conn37] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:04.849-0400-559eb62c792e00bb67274973", server: "bs-osx108-8", clientAddr: "127.0.0.1:62639", time: new Date(1436464684849), what: "moveChunk.from", ns: "db31.coll31", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 6: 0, step 2 of 6: 59, step 3 of 6: 3, step 4 of 6: 71, step 5 of 6: 114, step 6 of 6: 0, to: "test-rs1", from: "test-rs0", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.903-0400 m31100| 2015-07-09T13:58:04.902-0400 I COMMAND [conn37] command db31.coll31 command: moveChunk { moveChunk: "db31.coll31", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb62cca4787b9985d1cc5') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 305ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.905-0400 m30999| 2015-07-09T13:58:04.905-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db31.coll31: 1ms sequenceNumber: 142 version: 2|1||559eb62cca4787b9985d1cc5 based on: 1|1||559eb62cca4787b9985d1cc5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.907-0400 m31100| 2015-07-09T13:58:04.906-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db31.coll31", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb62cca4787b9985d1cc5') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.912-0400 m31100| 2015-07-09T13:58:04.912-0400 I SHARDING [conn37] distributed lock 'db31.coll31/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb62c792e00bb67274974 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.912-0400 m31100| 2015-07-09T13:58:04.912-0400 I SHARDING [conn37] remotely refreshing metadata for db31.coll31 based on current shard version 2|0||559eb62cca4787b9985d1cc5, current metadata version is 2|0||559eb62cca4787b9985d1cc5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.914-0400 m31100| 2015-07-09T13:58:04.914-0400 I SHARDING [conn37] updating metadata for db31.coll31 from shard version 2|0||559eb62cca4787b9985d1cc5 to shard version 2|1||559eb62cca4787b9985d1cc5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.914-0400 m31100| 2015-07-09T13:58:04.914-0400 I SHARDING [conn37] collection version was loaded at version 2|1||559eb62cca4787b9985d1cc5, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.915-0400 m31100| 2015-07-09T13:58:04.914-0400 I SHARDING [conn37] splitChunk accepted at version 2|1||559eb62cca4787b9985d1cc5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.916-0400 m31100| 2015-07-09T13:58:04.915-0400 I SHARDING [conn37] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:04.915-0400-559eb62c792e00bb67274975", server: "bs-osx108-8", clientAddr: "127.0.0.1:62639", time: new Date(1436464684915), what: "split", ns: "db31.coll31", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559eb62cca4787b9985d1cc5') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559eb62cca4787b9985d1cc5') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.970-0400 m31100| 2015-07-09T13:58:04.970-0400 I SHARDING [conn37] distributed lock 'db31.coll31/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.972-0400 m30999| 2015-07-09T13:58:04.972-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db31.coll31: 0ms sequenceNumber: 143 version: 2|3||559eb62cca4787b9985d1cc5 based on: 2|1||559eb62cca4787b9985d1cc5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.973-0400 m31200| 2015-07-09T13:58:04.972-0400 I SHARDING [conn84] received splitChunk request: { splitChunk: "db31.coll31", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb62cca4787b9985d1cc5') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.977-0400 m31200| 2015-07-09T13:58:04.976-0400 I SHARDING [conn84] distributed lock 'db31.coll31/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb62cd5a107a5b9c0db29 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.977-0400 m31200| 2015-07-09T13:58:04.976-0400 I SHARDING [conn84] remotely refreshing metadata for db31.coll31 based on current shard version 0|0||559eb62cca4787b9985d1cc5, current metadata version is 1|1||559eb62cca4787b9985d1cc5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.978-0400 m31200| 2015-07-09T13:58:04.978-0400 I SHARDING [conn84] updating metadata for db31.coll31 from shard version 0|0||559eb62cca4787b9985d1cc5 to shard version 2|0||559eb62cca4787b9985d1cc5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.979-0400 m31200| 2015-07-09T13:58:04.978-0400 I SHARDING [conn84] collection version was loaded at version 2|3||559eb62cca4787b9985d1cc5, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.979-0400 m31200| 2015-07-09T13:58:04.978-0400 I SHARDING [conn84] splitChunk accepted at version 2|0||559eb62cca4787b9985d1cc5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:04.980-0400 m31200| 2015-07-09T13:58:04.980-0400 I SHARDING [conn84] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:04.980-0400-559eb62cd5a107a5b9c0db2a", server: "bs-osx108-8", clientAddr: "127.0.0.1:63007", time: new Date(1436464684980), what: "split", ns: "db31.coll31", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559eb62cca4787b9985d1cc5') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559eb62cca4787b9985d1cc5') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:05.035-0400 m31200| 2015-07-09T13:58:05.034-0400 I SHARDING [conn84] distributed lock 'db31.coll31/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:05.037-0400 m30999| 2015-07-09T13:58:05.036-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db31.coll31: 0ms sequenceNumber: 144 version: 2|5||559eb62cca4787b9985d1cc5 based on: 2|3||559eb62cca4787b9985d1cc5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:05.124-0400 Using 5 threads (requested 5) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:05.170-0400 m30998| 2015-07-09T13:58:05.169-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63285 #194 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:05.180-0400 m30998| 2015-07-09T13:58:05.180-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63286 #195 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:05.182-0400 m30998| 2015-07-09T13:58:05.182-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63287 #196 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:05.182-0400 m30999| 2015-07-09T13:58:05.182-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63288 #195 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:05.190-0400 m30999| 2015-07-09T13:58:05.190-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63289 #196 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:05.198-0400 setting random seed: 7118619745597 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:05.198-0400 setting random seed: 2642119964584 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:05.198-0400 setting random seed: 6110265450552 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:05.198-0400 setting random seed: 2289350782521 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:05.198-0400 setting random seed: 5231475248001 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:05.201-0400 m30998| 2015-07-09T13:58:05.201-0400 I SHARDING [conn194] ChunkManager: time to load chunks for db31.coll31: 0ms sequenceNumber: 36 version: 2|5||559eb62cca4787b9985d1cc5 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:05.896-0400 m30998| 2015-07-09T13:58:05.896-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T13:58:05.895-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30998:1436464535:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:06.801-0400 m31100| 2015-07-09T13:58:06.800-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T13:58:06.799-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31100:1436464536:197041335', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:07.220-0400 m31200| 2015-07-09T13:58:07.220-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T13:58:07.219-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31200:1436464537:809424560', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:07.379-0400 m30998| 2015-07-09T13:58:07.379-0400 I NETWORK [conn194] end connection 127.0.0.1:63285 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:07.446-0400 m30999| 2015-07-09T13:58:07.445-0400 I NETWORK [conn196] end connection 127.0.0.1:63289 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:07.770-0400 m30998| 2015-07-09T13:58:07.770-0400 I NETWORK [conn195] end connection 127.0.0.1:63286 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:08.251-0400 m30998| 2015-07-09T13:58:08.250-0400 I NETWORK [conn196] end connection 127.0.0.1:63287 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:08.993-0400 m30999| 2015-07-09T13:58:08.992-0400 I NETWORK [conn195] end connection 127.0.0.1:63288 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.025-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.026-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.026-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.026-0400 jstests/concurrency/fsm_workloads/yield_id_hack.js: Workload completed in 3891 ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.026-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.026-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.026-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.026-0400 m30999| 2015-07-09T13:58:09.026-0400 I COMMAND [conn1] DROP: db31.coll31 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.027-0400 m30999| 2015-07-09T13:58:09.026-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:09.026-0400-559eb631ca4787b9985d1cc7", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464689026), what: "dropCollection.start", ns: "db31.coll31", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.083-0400 m30999| 2015-07-09T13:58:09.082-0400 I SHARDING [conn1] distributed lock 'db31.coll31/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb631ca4787b9985d1cc8 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.083-0400 m31100| 2015-07-09T13:58:09.083-0400 I COMMAND [conn37] CMD: drop db31.coll31 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.085-0400 m31200| 2015-07-09T13:58:09.085-0400 I COMMAND [conn84] CMD: drop db31.coll31 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.087-0400 m31102| 2015-07-09T13:58:09.087-0400 I COMMAND [repl writer worker 2] CMD: drop db31.coll31 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.090-0400 m31101| 2015-07-09T13:58:09.089-0400 I COMMAND [repl writer worker 6] CMD: drop db31.coll31 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.092-0400 m31201| 2015-07-09T13:58:09.092-0400 I COMMAND [repl writer worker 14] CMD: drop db31.coll31 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.093-0400 m31202| 2015-07-09T13:58:09.093-0400 I COMMAND [repl writer worker 14] CMD: drop db31.coll31 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.144-0400 m31100| 2015-07-09T13:58:09.143-0400 I SHARDING [conn37] remotely refreshing metadata for db31.coll31 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559eb62cca4787b9985d1cc5, current metadata version is 2|3||559eb62cca4787b9985d1cc5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.145-0400 m31100| 2015-07-09T13:58:09.145-0400 W SHARDING [conn37] no chunks found when reloading db31.coll31, previous version was 0|0||559eb62cca4787b9985d1cc5, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.146-0400 m31100| 2015-07-09T13:58:09.145-0400 I SHARDING [conn37] dropping metadata for db31.coll31 at shard version 2|3||559eb62cca4787b9985d1cc5, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.147-0400 m31200| 2015-07-09T13:58:09.147-0400 I SHARDING [conn84] remotely refreshing metadata for db31.coll31 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559eb62cca4787b9985d1cc5, current metadata version is 2|5||559eb62cca4787b9985d1cc5 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.148-0400 m31200| 2015-07-09T13:58:09.148-0400 W SHARDING [conn84] no chunks found when reloading db31.coll31, previous version was 0|0||559eb62cca4787b9985d1cc5, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.149-0400 m31200| 2015-07-09T13:58:09.148-0400 I SHARDING [conn84] dropping metadata for db31.coll31 at shard version 2|5||559eb62cca4787b9985d1cc5, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.150-0400 m30999| 2015-07-09T13:58:09.149-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:09.149-0400-559eb631ca4787b9985d1cc9", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464689149), what: "dropCollection", ns: "db31.coll31", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.204-0400 m30999| 2015-07-09T13:58:09.204-0400 I SHARDING [conn1] distributed lock 'db31.coll31/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.260-0400 m30999| 2015-07-09T13:58:09.260-0400 I COMMAND [conn1] DROP DATABASE: db31 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.261-0400 m30999| 2015-07-09T13:58:09.260-0400 I SHARDING [conn1] DBConfig::dropDatabase: db31 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.261-0400 m30999| 2015-07-09T13:58:09.260-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:09.260-0400-559eb631ca4787b9985d1cca", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464689260), what: "dropDatabase.start", ns: "db31", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.368-0400 m30999| 2015-07-09T13:58:09.368-0400 I SHARDING [conn1] DBConfig::dropDatabase: db31 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.369-0400 m31100| 2015-07-09T13:58:09.368-0400 I COMMAND [conn28] dropDatabase db31 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.369-0400 m31100| 2015-07-09T13:58:09.368-0400 I COMMAND [conn28] dropDatabase db31 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.370-0400 m30999| 2015-07-09T13:58:09.369-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:09.369-0400-559eb631ca4787b9985d1ccb", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464689369), what: "dropDatabase", ns: "db31", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.370-0400 m31102| 2015-07-09T13:58:09.369-0400 I COMMAND [repl writer worker 8] dropDatabase db31 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.370-0400 m31102| 2015-07-09T13:58:09.369-0400 I COMMAND [repl writer worker 8] dropDatabase db31 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.371-0400 m31101| 2015-07-09T13:58:09.370-0400 I COMMAND [repl writer worker 15] dropDatabase db31 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.371-0400 m31101| 2015-07-09T13:58:09.370-0400 I COMMAND [repl writer worker 15] dropDatabase db31 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.459-0400 m31100| 2015-07-09T13:58:09.458-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.462-0400 m31102| 2015-07-09T13:58:09.462-0400 I COMMAND [repl writer worker 5] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.463-0400 m31101| 2015-07-09T13:58:09.462-0400 I COMMAND [repl writer worker 10] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.497-0400 m31200| 2015-07-09T13:58:09.497-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.500-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.500-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.500-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.501-0400 jstests/concurrency/fsm_workloads/indexed_insert_multikey.js [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.501-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.501-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.501-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.501-0400 m31201| 2015-07-09T13:58:09.501-0400 I COMMAND [repl writer worker 1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.503-0400 m31202| 2015-07-09T13:58:09.502-0400 I COMMAND [repl writer worker 11] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.510-0400 m30999| 2015-07-09T13:58:09.509-0400 I SHARDING [conn1] distributed lock 'db32/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb631ca4787b9985d1ccc [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.514-0400 m30999| 2015-07-09T13:58:09.514-0400 I SHARDING [conn1] Placing [db32] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.514-0400 m30999| 2015-07-09T13:58:09.514-0400 I SHARDING [conn1] Enabling sharding for database [db32] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.567-0400 m30999| 2015-07-09T13:58:09.567-0400 I SHARDING [conn1] distributed lock 'db32/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.594-0400 m31100| 2015-07-09T13:58:09.593-0400 I INDEX [conn69] build index on: db32.coll32 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db32.coll32" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.594-0400 m31100| 2015-07-09T13:58:09.594-0400 I INDEX [conn69] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.603-0400 m31100| 2015-07-09T13:58:09.603-0400 I INDEX [conn69] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.604-0400 m30999| 2015-07-09T13:58:09.604-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db32.coll32", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.607-0400 m30999| 2015-07-09T13:58:09.606-0400 I SHARDING [conn1] distributed lock 'db32.coll32/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb631ca4787b9985d1ccd [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.608-0400 m30999| 2015-07-09T13:58:09.608-0400 I SHARDING [conn1] enable sharding on: db32.coll32 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.609-0400 m30999| 2015-07-09T13:58:09.608-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:09.608-0400-559eb631ca4787b9985d1cce", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464689608), what: "shardCollection.start", ns: "db32.coll32", details: { shardKey: { _id: "hashed" }, collection: "db32.coll32", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.609-0400 m31101| 2015-07-09T13:58:09.608-0400 I INDEX [repl writer worker 14] build index on: db32.coll32 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db32.coll32" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.609-0400 m31101| 2015-07-09T13:58:09.608-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.610-0400 m31102| 2015-07-09T13:58:09.609-0400 I INDEX [repl writer worker 11] build index on: db32.coll32 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db32.coll32" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.610-0400 m31102| 2015-07-09T13:58:09.609-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.611-0400 m31101| 2015-07-09T13:58:09.610-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.615-0400 m31102| 2015-07-09T13:58:09.614-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.662-0400 m30999| 2015-07-09T13:58:09.662-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db32.coll32 using new epoch 559eb631ca4787b9985d1ccf [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.770-0400 m30999| 2015-07-09T13:58:09.769-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db32.coll32: 0ms sequenceNumber: 145 version: 1|1||559eb631ca4787b9985d1ccf based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.828-0400 m30999| 2015-07-09T13:58:09.827-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db32.coll32: 1ms sequenceNumber: 146 version: 1|1||559eb631ca4787b9985d1ccf based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.830-0400 m31100| 2015-07-09T13:58:09.829-0400 I SHARDING [conn52] remotely refreshing metadata for db32.coll32 with requested shard version 1|1||559eb631ca4787b9985d1ccf, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.831-0400 m31100| 2015-07-09T13:58:09.831-0400 I SHARDING [conn52] collection db32.coll32 was previously unsharded, new metadata loaded with shard version 1|1||559eb631ca4787b9985d1ccf [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.831-0400 m31100| 2015-07-09T13:58:09.831-0400 I SHARDING [conn52] collection version was loaded at version 1|1||559eb631ca4787b9985d1ccf, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.832-0400 m30999| 2015-07-09T13:58:09.831-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:09.831-0400-559eb631ca4787b9985d1cd0", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464689831), what: "shardCollection", ns: "db32.coll32", details: { version: "1|1||559eb631ca4787b9985d1ccf" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.886-0400 m30999| 2015-07-09T13:58:09.885-0400 I SHARDING [conn1] distributed lock 'db32.coll32/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.887-0400 m30999| 2015-07-09T13:58:09.886-0400 I SHARDING [conn1] moving chunk ns: db32.coll32 moving ( ns: db32.coll32, shard: test-rs0, lastmod: 1|1||000000000000000000000000, min: { _id: 0 }, max: { _id: MaxKey }) test-rs0 -> test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.887-0400 m31100| 2015-07-09T13:58:09.887-0400 I SHARDING [conn37] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.888-0400 m31100| 2015-07-09T13:58:09.887-0400 I SHARDING [conn37] received moveChunk request: { moveChunk: "db32.coll32", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb631ca4787b9985d1ccf') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.891-0400 m31100| 2015-07-09T13:58:09.891-0400 I SHARDING [conn37] distributed lock 'db32.coll32/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb631792e00bb67274977 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.892-0400 m31100| 2015-07-09T13:58:09.891-0400 I SHARDING [conn37] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:09.891-0400-559eb631792e00bb67274978", server: "bs-osx108-8", clientAddr: "127.0.0.1:62639", time: new Date(1436464689891), what: "moveChunk.start", ns: "db32.coll32", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.945-0400 m31100| 2015-07-09T13:58:09.944-0400 I SHARDING [conn37] remotely refreshing metadata for db32.coll32 based on current shard version 1|1||559eb631ca4787b9985d1ccf, current metadata version is 1|1||559eb631ca4787b9985d1ccf [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.946-0400 m31100| 2015-07-09T13:58:09.945-0400 I SHARDING [conn37] metadata of collection db32.coll32 already up to date (shard version : 1|1||559eb631ca4787b9985d1ccf, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.946-0400 m31100| 2015-07-09T13:58:09.946-0400 I SHARDING [conn37] moveChunk request accepted at version 1|1||559eb631ca4787b9985d1ccf [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.946-0400 m31100| 2015-07-09T13:58:09.946-0400 I SHARDING [conn37] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.947-0400 m31200| 2015-07-09T13:58:09.946-0400 I SHARDING [conn16] remotely refreshing metadata for db32.coll32, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.948-0400 m31200| 2015-07-09T13:58:09.948-0400 I SHARDING [conn16] collection db32.coll32 was previously unsharded, new metadata loaded with shard version 0|0||559eb631ca4787b9985d1ccf [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.949-0400 m31200| 2015-07-09T13:58:09.948-0400 I SHARDING [conn16] collection version was loaded at version 1|1||559eb631ca4787b9985d1ccf, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.949-0400 m31200| 2015-07-09T13:58:09.949-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: 0 } -> { _id: MaxKey } for collection db32.coll32 from test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 at epoch 559eb631ca4787b9985d1ccf [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.951-0400 m31100| 2015-07-09T13:58:09.951-0400 I SHARDING [conn37] moveChunk data transfer progress: { active: true, ns: "db32.coll32", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.955-0400 m31100| 2015-07-09T13:58:09.954-0400 I SHARDING [conn37] moveChunk data transfer progress: { active: true, ns: "db32.coll32", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.960-0400 m31100| 2015-07-09T13:58:09.959-0400 I SHARDING [conn37] moveChunk data transfer progress: { active: true, ns: "db32.coll32", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.966-0400 m31200| 2015-07-09T13:58:09.966-0400 I INDEX [migrateThread] build index on: db32.coll32 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db32.coll32" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.966-0400 m31200| 2015-07-09T13:58:09.966-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.969-0400 m31100| 2015-07-09T13:58:09.968-0400 I SHARDING [conn37] moveChunk data transfer progress: { active: true, ns: "db32.coll32", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.980-0400 m31200| 2015-07-09T13:58:09.980-0400 I INDEX [migrateThread] build index on: db32.coll32 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db32.coll32" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.981-0400 m31200| 2015-07-09T13:58:09.980-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.987-0400 m31100| 2015-07-09T13:58:09.986-0400 I SHARDING [conn37] moveChunk data transfer progress: { active: true, ns: "db32.coll32", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.990-0400 m31200| 2015-07-09T13:58:09.990-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.991-0400 m31200| 2015-07-09T13:58:09.991-0400 I SHARDING [migrateThread] Deleter starting delete for: db32.coll32 from { _id: 0 } -> { _id: MaxKey }, with opId: 64498 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:09.995-0400 m31200| 2015-07-09T13:58:09.994-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db32.coll32 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.000-0400 m31202| 2015-07-09T13:58:09.999-0400 I INDEX [repl writer worker 12] build index on: db32.coll32 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db32.coll32" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.000-0400 m31202| 2015-07-09T13:58:09.999-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.005-0400 m31201| 2015-07-09T13:58:10.005-0400 I INDEX [repl writer worker 4] build index on: db32.coll32 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db32.coll32" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.005-0400 m31201| 2015-07-09T13:58:10.005-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.008-0400 m31202| 2015-07-09T13:58:10.008-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.010-0400 m31200| 2015-07-09T13:58:10.009-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.011-0400 m31200| 2015-07-09T13:58:10.009-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db32.coll32' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.011-0400 m31201| 2015-07-09T13:58:10.011-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.020-0400 m31100| 2015-07-09T13:58:10.019-0400 I SHARDING [conn37] moveChunk data transfer progress: { active: true, ns: "db32.coll32", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.020-0400 m31100| 2015-07-09T13:58:10.019-0400 I SHARDING [conn37] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.020-0400 m31100| 2015-07-09T13:58:10.020-0400 I SHARDING [conn37] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.020-0400 m31100| 2015-07-09T13:58:10.020-0400 I SHARDING [conn37] moveChunk setting version to: 2|0||559eb631ca4787b9985d1ccf [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.022-0400 m31200| 2015-07-09T13:58:10.021-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db32.coll32' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.022-0400 m31200| 2015-07-09T13:58:10.021-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:10.021-0400-559eb632d5a107a5b9c0db2b", server: "bs-osx108-8", clientAddr: "", time: new Date(1436464690021), what: "moveChunk.to", ns: "db32.coll32", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 5: 41, step 2 of 5: 17, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 12, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.076-0400 m31100| 2015-07-09T13:58:10.075-0400 I SHARDING [conn37] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db32.coll32", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.076-0400 m31100| 2015-07-09T13:58:10.075-0400 I SHARDING [conn37] moveChunk updating self version to: 2|1||559eb631ca4787b9985d1ccf through { _id: MinKey } -> { _id: 0 } for collection 'db32.coll32' [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.077-0400 m31100| 2015-07-09T13:58:10.076-0400 I SHARDING [conn37] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:10.076-0400-559eb632792e00bb67274979", server: "bs-osx108-8", clientAddr: "127.0.0.1:62639", time: new Date(1436464690076), what: "moveChunk.commit", ns: "db32.coll32", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.130-0400 m31100| 2015-07-09T13:58:10.130-0400 I SHARDING [conn37] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.131-0400 m31100| 2015-07-09T13:58:10.130-0400 I SHARDING [conn37] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.131-0400 m31100| 2015-07-09T13:58:10.130-0400 I SHARDING [conn37] Deleter starting delete for: db32.coll32 from { _id: 0 } -> { _id: MaxKey }, with opId: 55787 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.131-0400 m31100| 2015-07-09T13:58:10.130-0400 I SHARDING [conn37] rangeDeleter deleted 0 documents for db32.coll32 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.131-0400 m31100| 2015-07-09T13:58:10.130-0400 I SHARDING [conn37] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.132-0400 m31100| 2015-07-09T13:58:10.131-0400 I SHARDING [conn37] distributed lock 'db32.coll32/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.133-0400 m31100| 2015-07-09T13:58:10.132-0400 I SHARDING [conn37] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:10.132-0400-559eb632792e00bb6727497a", server: "bs-osx108-8", clientAddr: "127.0.0.1:62639", time: new Date(1436464690132), what: "moveChunk.from", ns: "db32.coll32", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 6: 0, step 2 of 6: 57, step 3 of 6: 3, step 4 of 6: 70, step 5 of 6: 110, step 6 of 6: 0, to: "test-rs1", from: "test-rs0", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.187-0400 m31100| 2015-07-09T13:58:10.186-0400 I COMMAND [conn37] command db32.coll32 command: moveChunk { moveChunk: "db32.coll32", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb631ca4787b9985d1ccf') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 299ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.188-0400 m30999| 2015-07-09T13:58:10.188-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db32.coll32: 0ms sequenceNumber: 147 version: 2|1||559eb631ca4787b9985d1ccf based on: 1|1||559eb631ca4787b9985d1ccf [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.189-0400 m31100| 2015-07-09T13:58:10.189-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db32.coll32", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb631ca4787b9985d1ccf') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.192-0400 m31100| 2015-07-09T13:58:10.192-0400 I SHARDING [conn37] distributed lock 'db32.coll32/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb632792e00bb6727497b [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.193-0400 m31100| 2015-07-09T13:58:10.192-0400 I SHARDING [conn37] remotely refreshing metadata for db32.coll32 based on current shard version 2|0||559eb631ca4787b9985d1ccf, current metadata version is 2|0||559eb631ca4787b9985d1ccf [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.194-0400 m31100| 2015-07-09T13:58:10.193-0400 I SHARDING [conn37] updating metadata for db32.coll32 from shard version 2|0||559eb631ca4787b9985d1ccf to shard version 2|1||559eb631ca4787b9985d1ccf [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.194-0400 m31100| 2015-07-09T13:58:10.194-0400 I SHARDING [conn37] collection version was loaded at version 2|1||559eb631ca4787b9985d1ccf, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.194-0400 m31100| 2015-07-09T13:58:10.194-0400 I SHARDING [conn37] splitChunk accepted at version 2|1||559eb631ca4787b9985d1ccf [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.196-0400 m31100| 2015-07-09T13:58:10.195-0400 I SHARDING [conn37] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:10.195-0400-559eb632792e00bb6727497c", server: "bs-osx108-8", clientAddr: "127.0.0.1:62639", time: new Date(1436464690195), what: "split", ns: "db32.coll32", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559eb631ca4787b9985d1ccf') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559eb631ca4787b9985d1ccf') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.251-0400 m31100| 2015-07-09T13:58:10.251-0400 I SHARDING [conn37] distributed lock 'db32.coll32/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.253-0400 m30999| 2015-07-09T13:58:10.253-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db32.coll32: 0ms sequenceNumber: 148 version: 2|3||559eb631ca4787b9985d1ccf based on: 2|1||559eb631ca4787b9985d1ccf [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.254-0400 m31200| 2015-07-09T13:58:10.253-0400 I SHARDING [conn84] received splitChunk request: { splitChunk: "db32.coll32", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb631ca4787b9985d1ccf') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.257-0400 m31200| 2015-07-09T13:58:10.256-0400 I SHARDING [conn84] distributed lock 'db32.coll32/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb632d5a107a5b9c0db2c [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.257-0400 m31200| 2015-07-09T13:58:10.256-0400 I SHARDING [conn84] remotely refreshing metadata for db32.coll32 based on current shard version 0|0||559eb631ca4787b9985d1ccf, current metadata version is 1|1||559eb631ca4787b9985d1ccf [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.258-0400 m31200| 2015-07-09T13:58:10.258-0400 I SHARDING [conn84] updating metadata for db32.coll32 from shard version 0|0||559eb631ca4787b9985d1ccf to shard version 2|0||559eb631ca4787b9985d1ccf [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.258-0400 m31200| 2015-07-09T13:58:10.258-0400 I SHARDING [conn84] collection version was loaded at version 2|3||559eb631ca4787b9985d1ccf, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.259-0400 m31200| 2015-07-09T13:58:10.258-0400 I SHARDING [conn84] splitChunk accepted at version 2|0||559eb631ca4787b9985d1ccf [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.260-0400 m31200| 2015-07-09T13:58:10.259-0400 I SHARDING [conn84] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:10.259-0400-559eb632d5a107a5b9c0db2d", server: "bs-osx108-8", clientAddr: "127.0.0.1:63007", time: new Date(1436464690259), what: "split", ns: "db32.coll32", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559eb631ca4787b9985d1ccf') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559eb631ca4787b9985d1ccf') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.315-0400 m31200| 2015-07-09T13:58:10.314-0400 I SHARDING [conn84] distributed lock 'db32.coll32/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.317-0400 m30999| 2015-07-09T13:58:10.317-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db32.coll32: 0ms sequenceNumber: 149 version: 2|5||559eb631ca4787b9985d1ccf based on: 2|3||559eb631ca4787b9985d1ccf [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.327-0400 m31100| 2015-07-09T13:58:10.327-0400 I INDEX [conn52] build index on: db32.coll32 properties: { v: 1, key: { indexed_insert_multikey: 1.0 }, name: "indexed_insert_multikey_1", ns: "db32.coll32" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.327-0400 m31100| 2015-07-09T13:58:10.327-0400 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.328-0400 m31200| 2015-07-09T13:58:10.327-0400 I INDEX [conn32] build index on: db32.coll32 properties: { v: 1, key: { indexed_insert_multikey: 1.0 }, name: "indexed_insert_multikey_1", ns: "db32.coll32" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.328-0400 m31200| 2015-07-09T13:58:10.327-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.332-0400 m31200| 2015-07-09T13:58:10.331-0400 I INDEX [conn32] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.333-0400 m31100| 2015-07-09T13:58:10.332-0400 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.335-0400 Using 20 threads (requested 20) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.393-0400 m31101| 2015-07-09T13:58:10.391-0400 I INDEX [repl writer worker 13] build index on: db32.coll32 properties: { v: 1, key: { indexed_insert_multikey: 1.0 }, name: "indexed_insert_multikey_1", ns: "db32.coll32" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.393-0400 m31101| 2015-07-09T13:58:10.391-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.403-0400 m31201| 2015-07-09T13:58:10.396-0400 I INDEX [repl writer worker 11] build index on: db32.coll32 properties: { v: 1, key: { indexed_insert_multikey: 1.0 }, name: "indexed_insert_multikey_1", ns: "db32.coll32" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.406-0400 m31201| 2015-07-09T13:58:10.396-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.410-0400 m31102| 2015-07-09T13:58:10.404-0400 I INDEX [repl writer worker 12] build index on: db32.coll32 properties: { v: 1, key: { indexed_insert_multikey: 1.0 }, name: "indexed_insert_multikey_1", ns: "db32.coll32" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.410-0400 m31102| 2015-07-09T13:58:10.404-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.410-0400 m31202| 2015-07-09T13:58:10.407-0400 I INDEX [repl writer worker 5] build index on: db32.coll32 properties: { v: 1, key: { indexed_insert_multikey: 1.0 }, name: "indexed_insert_multikey_1", ns: "db32.coll32" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.411-0400 m31202| 2015-07-09T13:58:10.407-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.434-0400 m31202| 2015-07-09T13:58:10.430-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.440-0400 m31201| 2015-07-09T13:58:10.438-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.502-0400 m31101| 2015-07-09T13:58:10.501-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.512-0400 m31102| 2015-07-09T13:58:10.511-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.546-0400 m30998| 2015-07-09T13:58:10.545-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63292 #197 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.557-0400 m30998| 2015-07-09T13:58:10.556-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63293 #198 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.588-0400 m30998| 2015-07-09T13:58:10.587-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63294 #199 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.588-0400 m30999| 2015-07-09T13:58:10.588-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63295 #197 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.599-0400 m30999| 2015-07-09T13:58:10.598-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63296 #198 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.602-0400 m30998| 2015-07-09T13:58:10.601-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63298 #200 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.604-0400 m30998| 2015-07-09T13:58:10.603-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63299 #201 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.604-0400 m30999| 2015-07-09T13:58:10.603-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63297 #199 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.612-0400 m30998| 2015-07-09T13:58:10.611-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63300 #202 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.612-0400 m30998| 2015-07-09T13:58:10.612-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63301 #203 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.613-0400 m30999| 2015-07-09T13:58:10.612-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63302 #200 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.618-0400 m30999| 2015-07-09T13:58:10.618-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63303 #201 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.619-0400 m30999| 2015-07-09T13:58:10.619-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63304 #202 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.619-0400 m30998| 2015-07-09T13:58:10.619-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63305 #204 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.620-0400 m30999| 2015-07-09T13:58:10.620-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63306 #203 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.620-0400 m30998| 2015-07-09T13:58:10.620-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63307 #205 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.625-0400 m30998| 2015-07-09T13:58:10.625-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63308 #206 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.626-0400 m30999| 2015-07-09T13:58:10.625-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63309 #204 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.626-0400 m30999| 2015-07-09T13:58:10.626-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63310 #205 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.628-0400 m30999| 2015-07-09T13:58:10.628-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63311 #206 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.638-0400 setting random seed: 7020283360034 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.638-0400 setting random seed: 6740200095809 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.639-0400 setting random seed: 6378592280671 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.639-0400 setting random seed: 6048496798612 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.641-0400 setting random seed: 2543063028715 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.642-0400 setting random seed: 3591044666245 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.643-0400 setting random seed: 5404534102417 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.645-0400 setting random seed: 1110441721975 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.647-0400 setting random seed: 3194029936566 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.655-0400 m30998| 2015-07-09T13:58:10.647-0400 I SHARDING [conn198] ChunkManager: time to load chunks for db32.coll32: 0ms sequenceNumber: 37 version: 2|5||559eb631ca4787b9985d1ccf based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.659-0400 setting random seed: 1260343524627 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.661-0400 setting random seed: 9902487033978 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.663-0400 setting random seed: 4687733692117 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.663-0400 setting random seed: 1944787795655 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.664-0400 setting random seed: 5000511156395 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.670-0400 setting random seed: 7085741609334 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.680-0400 setting random seed: 1099657570011 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.681-0400 setting random seed: 59283548034 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.683-0400 setting random seed: 2061168877407 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.693-0400 setting random seed: 4004701627418 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:10.738-0400 setting random seed: 2593628275208 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.176-0400 m30998| 2015-07-09T13:58:11.175-0400 I NETWORK [conn197] end connection 127.0.0.1:63292 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.277-0400 m30999| 2015-07-09T13:58:11.276-0400 I NETWORK [conn198] end connection 127.0.0.1:63296 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.278-0400 m30999| 2015-07-09T13:58:11.278-0400 I NETWORK [conn197] end connection 127.0.0.1:63295 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.289-0400 m30998| 2015-07-09T13:58:11.289-0400 I NETWORK [conn199] end connection 127.0.0.1:63294 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.290-0400 m30999| 2015-07-09T13:58:11.289-0400 I NETWORK [conn199] end connection 127.0.0.1:63297 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.324-0400 m30999| 2015-07-09T13:58:11.324-0400 I NETWORK [conn203] end connection 127.0.0.1:63306 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.332-0400 m30998| 2015-07-09T13:58:11.332-0400 I NETWORK [conn203] end connection 127.0.0.1:63301 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.336-0400 m30998| 2015-07-09T13:58:11.335-0400 I NETWORK [conn201] end connection 127.0.0.1:63299 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.336-0400 m30998| 2015-07-09T13:58:11.336-0400 I NETWORK [conn198] end connection 127.0.0.1:63293 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.342-0400 m30999| 2015-07-09T13:58:11.341-0400 I NETWORK [conn200] end connection 127.0.0.1:63302 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.345-0400 m30998| 2015-07-09T13:58:11.345-0400 I NETWORK [conn205] end connection 127.0.0.1:63307 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.377-0400 m30998| 2015-07-09T13:58:11.377-0400 I NETWORK [conn202] end connection 127.0.0.1:63300 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.393-0400 m30998| 2015-07-09T13:58:11.392-0400 I NETWORK [conn200] end connection 127.0.0.1:63298 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.412-0400 m30999| 2015-07-09T13:58:11.412-0400 I NETWORK [conn201] end connection 127.0.0.1:63303 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.429-0400 m30998| 2015-07-09T13:58:11.429-0400 I NETWORK [conn206] end connection 127.0.0.1:63308 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.436-0400 m30999| 2015-07-09T13:58:11.436-0400 I NETWORK [conn202] end connection 127.0.0.1:63304 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.458-0400 m30998| 2015-07-09T13:58:11.458-0400 I NETWORK [conn204] end connection 127.0.0.1:63305 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.478-0400 m30999| 2015-07-09T13:58:11.478-0400 I NETWORK [conn206] end connection 127.0.0.1:63311 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.486-0400 m30999| 2015-07-09T13:58:11.486-0400 I NETWORK [conn205] end connection 127.0.0.1:63310 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.491-0400 m30999| 2015-07-09T13:58:11.491-0400 I NETWORK [conn204] end connection 127.0.0.1:63309 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.508-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.509-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.509-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.509-0400 jstests/concurrency/fsm_workloads/indexed_insert_multikey.js: Workload completed in 1174 ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.509-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.509-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.509-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.509-0400 m30999| 2015-07-09T13:58:11.509-0400 I COMMAND [conn1] DROP: db32.coll32 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.509-0400 m30999| 2015-07-09T13:58:11.509-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:11.509-0400-559eb633ca4787b9985d1cd1", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464691509), what: "dropCollection.start", ns: "db32.coll32", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.566-0400 m30999| 2015-07-09T13:58:11.565-0400 I SHARDING [conn1] distributed lock 'db32.coll32/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb633ca4787b9985d1cd2 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.567-0400 m31100| 2015-07-09T13:58:11.566-0400 I COMMAND [conn37] CMD: drop db32.coll32 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.569-0400 m31200| 2015-07-09T13:58:11.569-0400 I COMMAND [conn84] CMD: drop db32.coll32 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.571-0400 m31101| 2015-07-09T13:58:11.570-0400 I COMMAND [repl writer worker 4] CMD: drop db32.coll32 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.571-0400 m31102| 2015-07-09T13:58:11.570-0400 I COMMAND [repl writer worker 8] CMD: drop db32.coll32 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.575-0400 m31202| 2015-07-09T13:58:11.574-0400 I COMMAND [repl writer worker 10] CMD: drop db32.coll32 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.575-0400 m31201| 2015-07-09T13:58:11.575-0400 I COMMAND [repl writer worker 5] CMD: drop db32.coll32 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.628-0400 m31100| 2015-07-09T13:58:11.627-0400 I SHARDING [conn37] remotely refreshing metadata for db32.coll32 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559eb631ca4787b9985d1ccf, current metadata version is 2|3||559eb631ca4787b9985d1ccf [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.629-0400 m31100| 2015-07-09T13:58:11.628-0400 W SHARDING [conn37] no chunks found when reloading db32.coll32, previous version was 0|0||559eb631ca4787b9985d1ccf, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.629-0400 m31100| 2015-07-09T13:58:11.629-0400 I SHARDING [conn37] dropping metadata for db32.coll32 at shard version 2|3||559eb631ca4787b9985d1ccf, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.630-0400 m31200| 2015-07-09T13:58:11.630-0400 I SHARDING [conn84] remotely refreshing metadata for db32.coll32 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559eb631ca4787b9985d1ccf, current metadata version is 2|5||559eb631ca4787b9985d1ccf [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.632-0400 m31200| 2015-07-09T13:58:11.631-0400 W SHARDING [conn84] no chunks found when reloading db32.coll32, previous version was 0|0||559eb631ca4787b9985d1ccf, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.632-0400 m31200| 2015-07-09T13:58:11.632-0400 I SHARDING [conn84] dropping metadata for db32.coll32 at shard version 2|5||559eb631ca4787b9985d1ccf, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.633-0400 m30999| 2015-07-09T13:58:11.632-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:11.632-0400-559eb633ca4787b9985d1cd3", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464691632), what: "dropCollection", ns: "db32.coll32", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.687-0400 m30999| 2015-07-09T13:58:11.687-0400 I SHARDING [conn1] distributed lock 'db32.coll32/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.744-0400 m30999| 2015-07-09T13:58:11.743-0400 I COMMAND [conn1] DROP DATABASE: db32 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.744-0400 m30999| 2015-07-09T13:58:11.743-0400 I SHARDING [conn1] DBConfig::dropDatabase: db32 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.744-0400 m30999| 2015-07-09T13:58:11.743-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:11.743-0400-559eb633ca4787b9985d1cd4", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464691743), what: "dropDatabase.start", ns: "db32", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.851-0400 m30999| 2015-07-09T13:58:11.850-0400 I SHARDING [conn1] DBConfig::dropDatabase: db32 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.851-0400 m31100| 2015-07-09T13:58:11.850-0400 I COMMAND [conn28] dropDatabase db32 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.851-0400 m31100| 2015-07-09T13:58:11.851-0400 I COMMAND [conn28] dropDatabase db32 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.852-0400 m30999| 2015-07-09T13:58:11.851-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:11.851-0400-559eb633ca4787b9985d1cd5", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464691851), what: "dropDatabase", ns: "db32", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.852-0400 m31102| 2015-07-09T13:58:11.852-0400 I COMMAND [repl writer worker 4] dropDatabase db32 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.853-0400 m31102| 2015-07-09T13:58:11.852-0400 I COMMAND [repl writer worker 4] dropDatabase db32 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.853-0400 m31101| 2015-07-09T13:58:11.852-0400 I COMMAND [repl writer worker 15] dropDatabase db32 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.853-0400 m31101| 2015-07-09T13:58:11.852-0400 I COMMAND [repl writer worker 15] dropDatabase db32 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.939-0400 m31100| 2015-07-09T13:58:11.939-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.941-0400 m31101| 2015-07-09T13:58:11.941-0400 I COMMAND [repl writer worker 7] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.943-0400 m31102| 2015-07-09T13:58:11.943-0400 I COMMAND [repl writer worker 2] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.983-0400 m31200| 2015-07-09T13:58:11.982-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.985-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.985-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.985-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.985-0400 jstests/concurrency/fsm_workloads/indexed_insert_text_multikey.js [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.985-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.985-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.986-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.987-0400 m31201| 2015-07-09T13:58:11.986-0400 I COMMAND [repl writer worker 14] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.988-0400 m31202| 2015-07-09T13:58:11.987-0400 I COMMAND [repl writer worker 6] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.991-0400 m30999| 2015-07-09T13:58:11.991-0400 I SHARDING [conn1] distributed lock 'db33/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb633ca4787b9985d1cd6 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.995-0400 m30999| 2015-07-09T13:58:11.995-0400 I SHARDING [conn1] Placing [db33] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:11.995-0400 m30999| 2015-07-09T13:58:11.995-0400 I SHARDING [conn1] Enabling sharding for database [db33] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.049-0400 m30999| 2015-07-09T13:58:12.049-0400 I SHARDING [conn1] distributed lock 'db33/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.071-0400 m31100| 2015-07-09T13:58:12.070-0400 I INDEX [conn68] build index on: db33.coll33 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db33.coll33" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.071-0400 m31100| 2015-07-09T13:58:12.070-0400 I INDEX [conn68] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.085-0400 m31100| 2015-07-09T13:58:12.085-0400 I INDEX [conn68] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.087-0400 m30999| 2015-07-09T13:58:12.086-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db33.coll33", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.090-0400 m30999| 2015-07-09T13:58:12.089-0400 I SHARDING [conn1] distributed lock 'db33.coll33/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb634ca4787b9985d1cd7 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.091-0400 m30999| 2015-07-09T13:58:12.091-0400 I SHARDING [conn1] enable sharding on: db33.coll33 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.092-0400 m30999| 2015-07-09T13:58:12.091-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:12.091-0400-559eb634ca4787b9985d1cd8", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464692091), what: "shardCollection.start", ns: "db33.coll33", details: { shardKey: { _id: "hashed" }, collection: "db33.coll33", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.094-0400 m31101| 2015-07-09T13:58:12.094-0400 I INDEX [repl writer worker 12] build index on: db33.coll33 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db33.coll33" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.094-0400 m31101| 2015-07-09T13:58:12.094-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.103-0400 m31101| 2015-07-09T13:58:12.103-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.103-0400 m31102| 2015-07-09T13:58:12.103-0400 I INDEX [repl writer worker 13] build index on: db33.coll33 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db33.coll33" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.104-0400 m31102| 2015-07-09T13:58:12.103-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.107-0400 m31102| 2015-07-09T13:58:12.107-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.144-0400 m30999| 2015-07-09T13:58:12.144-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db33.coll33 using new epoch 559eb634ca4787b9985d1cd9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.250-0400 m30999| 2015-07-09T13:58:12.249-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db33.coll33: 0ms sequenceNumber: 150 version: 1|1||559eb634ca4787b9985d1cd9 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.306-0400 m30999| 2015-07-09T13:58:12.305-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db33.coll33: 0ms sequenceNumber: 151 version: 1|1||559eb634ca4787b9985d1cd9 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.307-0400 m31100| 2015-07-09T13:58:12.307-0400 I SHARDING [conn20] remotely refreshing metadata for db33.coll33 with requested shard version 1|1||559eb634ca4787b9985d1cd9, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.309-0400 m31100| 2015-07-09T13:58:12.309-0400 I SHARDING [conn20] collection db33.coll33 was previously unsharded, new metadata loaded with shard version 1|1||559eb634ca4787b9985d1cd9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.309-0400 m31100| 2015-07-09T13:58:12.309-0400 I SHARDING [conn20] collection version was loaded at version 1|1||559eb634ca4787b9985d1cd9, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.310-0400 m30999| 2015-07-09T13:58:12.309-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:12.309-0400-559eb634ca4787b9985d1cda", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464692309), what: "shardCollection", ns: "db33.coll33", details: { version: "1|1||559eb634ca4787b9985d1cd9" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.365-0400 m30999| 2015-07-09T13:58:12.364-0400 I SHARDING [conn1] distributed lock 'db33.coll33/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.366-0400 m30999| 2015-07-09T13:58:12.366-0400 I SHARDING [conn1] moving chunk ns: db33.coll33 moving ( ns: db33.coll33, shard: test-rs0, lastmod: 1|1||000000000000000000000000, min: { _id: 0 }, max: { _id: MaxKey }) test-rs0 -> test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.366-0400 m31100| 2015-07-09T13:58:12.366-0400 I SHARDING [conn37] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.368-0400 m31100| 2015-07-09T13:58:12.367-0400 I SHARDING [conn37] received moveChunk request: { moveChunk: "db33.coll33", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb634ca4787b9985d1cd9') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.371-0400 m31100| 2015-07-09T13:58:12.371-0400 I SHARDING [conn37] distributed lock 'db33.coll33/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb634792e00bb6727497e [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.371-0400 m31100| 2015-07-09T13:58:12.371-0400 I SHARDING [conn37] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:12.371-0400-559eb634792e00bb6727497f", server: "bs-osx108-8", clientAddr: "127.0.0.1:62639", time: new Date(1436464692371), what: "moveChunk.start", ns: "db33.coll33", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.424-0400 m31100| 2015-07-09T13:58:12.423-0400 I SHARDING [conn37] remotely refreshing metadata for db33.coll33 based on current shard version 1|1||559eb634ca4787b9985d1cd9, current metadata version is 1|1||559eb634ca4787b9985d1cd9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.425-0400 m31100| 2015-07-09T13:58:12.425-0400 I SHARDING [conn37] metadata of collection db33.coll33 already up to date (shard version : 1|1||559eb634ca4787b9985d1cd9, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.425-0400 m31100| 2015-07-09T13:58:12.425-0400 I SHARDING [conn37] moveChunk request accepted at version 1|1||559eb634ca4787b9985d1cd9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.426-0400 m31100| 2015-07-09T13:58:12.425-0400 I SHARDING [conn37] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.426-0400 m31200| 2015-07-09T13:58:12.426-0400 I SHARDING [conn16] remotely refreshing metadata for db33.coll33, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.428-0400 m31200| 2015-07-09T13:58:12.427-0400 I SHARDING [conn16] collection db33.coll33 was previously unsharded, new metadata loaded with shard version 0|0||559eb634ca4787b9985d1cd9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.428-0400 m31200| 2015-07-09T13:58:12.428-0400 I SHARDING [conn16] collection version was loaded at version 1|1||559eb634ca4787b9985d1cd9, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.428-0400 m31200| 2015-07-09T13:58:12.428-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: 0 } -> { _id: MaxKey } for collection db33.coll33 from test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 at epoch 559eb634ca4787b9985d1cd9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.430-0400 m31100| 2015-07-09T13:58:12.430-0400 I SHARDING [conn37] moveChunk data transfer progress: { active: true, ns: "db33.coll33", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.434-0400 m31100| 2015-07-09T13:58:12.433-0400 I SHARDING [conn37] moveChunk data transfer progress: { active: true, ns: "db33.coll33", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.439-0400 m31100| 2015-07-09T13:58:12.438-0400 I SHARDING [conn37] moveChunk data transfer progress: { active: true, ns: "db33.coll33", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.442-0400 m31200| 2015-07-09T13:58:12.441-0400 I INDEX [migrateThread] build index on: db33.coll33 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db33.coll33" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.442-0400 m31200| 2015-07-09T13:58:12.441-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.449-0400 m31100| 2015-07-09T13:58:12.448-0400 I SHARDING [conn37] moveChunk data transfer progress: { active: true, ns: "db33.coll33", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.452-0400 m31200| 2015-07-09T13:58:12.451-0400 I INDEX [migrateThread] build index on: db33.coll33 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db33.coll33" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.452-0400 m31200| 2015-07-09T13:58:12.451-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.466-0400 m31100| 2015-07-09T13:58:12.465-0400 I SHARDING [conn37] moveChunk data transfer progress: { active: true, ns: "db33.coll33", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.470-0400 m31200| 2015-07-09T13:58:12.469-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.472-0400 m31200| 2015-07-09T13:58:12.472-0400 I SHARDING [migrateThread] Deleter starting delete for: db33.coll33 from { _id: 0 } -> { _id: MaxKey }, with opId: 65952 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.473-0400 m31200| 2015-07-09T13:58:12.473-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db33.coll33 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.483-0400 m31202| 2015-07-09T13:58:12.483-0400 I INDEX [repl writer worker 15] build index on: db33.coll33 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db33.coll33" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.483-0400 m31202| 2015-07-09T13:58:12.483-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.484-0400 m31201| 2015-07-09T13:58:12.483-0400 I INDEX [repl writer worker 15] build index on: db33.coll33 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db33.coll33" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.485-0400 m31201| 2015-07-09T13:58:12.483-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.492-0400 m31201| 2015-07-09T13:58:12.492-0400 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.492-0400 m31202| 2015-07-09T13:58:12.492-0400 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.494-0400 m31200| 2015-07-09T13:58:12.493-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.494-0400 m31200| 2015-07-09T13:58:12.493-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db33.coll33' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.500-0400 m31100| 2015-07-09T13:58:12.499-0400 I SHARDING [conn37] moveChunk data transfer progress: { active: true, ns: "db33.coll33", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.500-0400 m31100| 2015-07-09T13:58:12.499-0400 I SHARDING [conn37] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.500-0400 m31100| 2015-07-09T13:58:12.500-0400 I SHARDING [conn37] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.501-0400 m31100| 2015-07-09T13:58:12.500-0400 I SHARDING [conn37] moveChunk setting version to: 2|0||559eb634ca4787b9985d1cd9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.506-0400 m31200| 2015-07-09T13:58:12.505-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db33.coll33' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.506-0400 m31200| 2015-07-09T13:58:12.506-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:12.506-0400-559eb634d5a107a5b9c0db2e", server: "bs-osx108-8", clientAddr: "", time: new Date(1436464692506), what: "moveChunk.to", ns: "db33.coll33", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 5: 43, step 2 of 5: 21, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 12, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.560-0400 m31100| 2015-07-09T13:58:12.559-0400 I SHARDING [conn37] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db33.coll33", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.560-0400 m31100| 2015-07-09T13:58:12.559-0400 I SHARDING [conn37] moveChunk updating self version to: 2|1||559eb634ca4787b9985d1cd9 through { _id: MinKey } -> { _id: 0 } for collection 'db33.coll33' [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.561-0400 m31100| 2015-07-09T13:58:12.560-0400 I SHARDING [conn37] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:12.560-0400-559eb634792e00bb67274980", server: "bs-osx108-8", clientAddr: "127.0.0.1:62639", time: new Date(1436464692560), what: "moveChunk.commit", ns: "db33.coll33", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.614-0400 m31100| 2015-07-09T13:58:12.614-0400 I SHARDING [conn37] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.615-0400 m31100| 2015-07-09T13:58:12.614-0400 I SHARDING [conn37] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.615-0400 m31100| 2015-07-09T13:58:12.614-0400 I SHARDING [conn37] Deleter starting delete for: db33.coll33 from { _id: 0 } -> { _id: MaxKey }, with opId: 57424 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.615-0400 m31100| 2015-07-09T13:58:12.614-0400 I SHARDING [conn37] rangeDeleter deleted 0 documents for db33.coll33 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.615-0400 m31100| 2015-07-09T13:58:12.614-0400 I SHARDING [conn37] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.616-0400 m31100| 2015-07-09T13:58:12.616-0400 I SHARDING [conn37] distributed lock 'db33.coll33/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.617-0400 m31100| 2015-07-09T13:58:12.616-0400 I SHARDING [conn37] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:12.616-0400-559eb634792e00bb67274981", server: "bs-osx108-8", clientAddr: "127.0.0.1:62639", time: new Date(1436464692616), what: "moveChunk.from", ns: "db33.coll33", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 6: 0, step 2 of 6: 57, step 3 of 6: 2, step 4 of 6: 71, step 5 of 6: 114, step 6 of 6: 0, to: "test-rs1", from: "test-rs0", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.671-0400 m31100| 2015-07-09T13:58:12.670-0400 I COMMAND [conn37] command db33.coll33 command: moveChunk { moveChunk: "db33.coll33", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb634ca4787b9985d1cd9') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 303ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.672-0400 m30999| 2015-07-09T13:58:12.672-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db33.coll33: 0ms sequenceNumber: 152 version: 2|1||559eb634ca4787b9985d1cd9 based on: 1|1||559eb634ca4787b9985d1cd9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.674-0400 m31100| 2015-07-09T13:58:12.673-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db33.coll33", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb634ca4787b9985d1cd9') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.677-0400 m31100| 2015-07-09T13:58:12.676-0400 I SHARDING [conn37] distributed lock 'db33.coll33/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb634792e00bb67274982 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.677-0400 m31100| 2015-07-09T13:58:12.676-0400 I SHARDING [conn37] remotely refreshing metadata for db33.coll33 based on current shard version 2|0||559eb634ca4787b9985d1cd9, current metadata version is 2|0||559eb634ca4787b9985d1cd9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.678-0400 m31100| 2015-07-09T13:58:12.678-0400 I SHARDING [conn37] updating metadata for db33.coll33 from shard version 2|0||559eb634ca4787b9985d1cd9 to shard version 2|1||559eb634ca4787b9985d1cd9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.678-0400 m31100| 2015-07-09T13:58:12.678-0400 I SHARDING [conn37] collection version was loaded at version 2|1||559eb634ca4787b9985d1cd9, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.678-0400 m31100| 2015-07-09T13:58:12.678-0400 I SHARDING [conn37] splitChunk accepted at version 2|1||559eb634ca4787b9985d1cd9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.680-0400 m31100| 2015-07-09T13:58:12.679-0400 I SHARDING [conn37] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:12.679-0400-559eb634792e00bb67274983", server: "bs-osx108-8", clientAddr: "127.0.0.1:62639", time: new Date(1436464692679), what: "split", ns: "db33.coll33", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559eb634ca4787b9985d1cd9') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559eb634ca4787b9985d1cd9') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.734-0400 m31100| 2015-07-09T13:58:12.733-0400 I SHARDING [conn37] distributed lock 'db33.coll33/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.736-0400 m30999| 2015-07-09T13:58:12.735-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db33.coll33: 0ms sequenceNumber: 153 version: 2|3||559eb634ca4787b9985d1cd9 based on: 2|1||559eb634ca4787b9985d1cd9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.737-0400 m31200| 2015-07-09T13:58:12.736-0400 I SHARDING [conn84] received splitChunk request: { splitChunk: "db33.coll33", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb634ca4787b9985d1cd9') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.741-0400 m31200| 2015-07-09T13:58:12.740-0400 I SHARDING [conn84] distributed lock 'db33.coll33/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb634d5a107a5b9c0db2f [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.741-0400 m31200| 2015-07-09T13:58:12.741-0400 I SHARDING [conn84] remotely refreshing metadata for db33.coll33 based on current shard version 0|0||559eb634ca4787b9985d1cd9, current metadata version is 1|1||559eb634ca4787b9985d1cd9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.742-0400 m31200| 2015-07-09T13:58:12.742-0400 I SHARDING [conn84] updating metadata for db33.coll33 from shard version 0|0||559eb634ca4787b9985d1cd9 to shard version 2|0||559eb634ca4787b9985d1cd9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.743-0400 m31200| 2015-07-09T13:58:12.742-0400 I SHARDING [conn84] collection version was loaded at version 2|3||559eb634ca4787b9985d1cd9, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.743-0400 m31200| 2015-07-09T13:58:12.742-0400 I SHARDING [conn84] splitChunk accepted at version 2|0||559eb634ca4787b9985d1cd9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.744-0400 m31200| 2015-07-09T13:58:12.743-0400 I SHARDING [conn84] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:12.743-0400-559eb634d5a107a5b9c0db30", server: "bs-osx108-8", clientAddr: "127.0.0.1:63007", time: new Date(1436464692743), what: "split", ns: "db33.coll33", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559eb634ca4787b9985d1cd9') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559eb634ca4787b9985d1cd9') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.797-0400 m31200| 2015-07-09T13:58:12.797-0400 I SHARDING [conn84] distributed lock 'db33.coll33/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.799-0400 m30999| 2015-07-09T13:58:12.799-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db33.coll33: 0ms sequenceNumber: 154 version: 2|5||559eb634ca4787b9985d1cd9 based on: 2|3||559eb634ca4787b9985d1cd9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.808-0400 m31100| 2015-07-09T13:58:12.806-0400 I INDEX [conn20] build index on: db33.coll33 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "indexed_insert_text_text", ns: "db33.coll33", weights: { indexed_insert_text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.808-0400 m31100| 2015-07-09T13:58:12.806-0400 I INDEX [conn20] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.809-0400 m31200| 2015-07-09T13:58:12.806-0400 I INDEX [conn19] build index on: db33.coll33 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "indexed_insert_text_text", ns: "db33.coll33", weights: { indexed_insert_text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.809-0400 m31200| 2015-07-09T13:58:12.807-0400 I INDEX [conn19] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.812-0400 m31200| 2015-07-09T13:58:12.812-0400 I INDEX [conn19] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.814-0400 m31100| 2015-07-09T13:58:12.813-0400 I INDEX [conn20] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.814-0400 Using 20 threads (requested 20) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.916-0400 m31202| 2015-07-09T13:58:12.915-0400 I INDEX [repl writer worker 12] build index on: db33.coll33 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "indexed_insert_text_text", ns: "db33.coll33", weights: { indexed_insert_text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.916-0400 m31202| 2015-07-09T13:58:12.915-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.922-0400 m31102| 2015-07-09T13:58:12.922-0400 I INDEX [repl writer worker 15] build index on: db33.coll33 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "indexed_insert_text_text", ns: "db33.coll33", weights: { indexed_insert_text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.922-0400 m31102| 2015-07-09T13:58:12.922-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.924-0400 m31101| 2015-07-09T13:58:12.924-0400 I INDEX [repl writer worker 2] build index on: db33.coll33 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "indexed_insert_text_text", ns: "db33.coll33", weights: { indexed_insert_text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.924-0400 m31101| 2015-07-09T13:58:12.924-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.934-0400 m31201| 2015-07-09T13:58:12.925-0400 I INDEX [repl writer worker 2] build index on: db33.coll33 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "indexed_insert_text_text", ns: "db33.coll33", weights: { indexed_insert_text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.934-0400 m31201| 2015-07-09T13:58:12.926-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.965-0400 m31101| 2015-07-09T13:58:12.964-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.966-0400 m31102| 2015-07-09T13:58:12.965-0400 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.976-0400 m31201| 2015-07-09T13:58:12.969-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:12.976-0400 m31202| 2015-07-09T13:58:12.973-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:13.024-0400 m30999| 2015-07-09T13:58:13.023-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63312 #207 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:13.072-0400 m30998| 2015-07-09T13:58:13.072-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63313 #207 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:13.086-0400 m30998| 2015-07-09T13:58:13.085-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63314 #208 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:13.095-0400 m30999| 2015-07-09T13:58:13.095-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63315 #208 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:13.103-0400 m30999| 2015-07-09T13:58:13.103-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63316 #209 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:13.103-0400 m30998| 2015-07-09T13:58:13.103-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63317 #209 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:13.103-0400 m30999| 2015-07-09T13:58:13.103-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63319 #210 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:13.106-0400 m30998| 2015-07-09T13:58:13.105-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63318 #210 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:13.110-0400 m30998| 2015-07-09T13:58:13.109-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63323 #211 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:13.111-0400 m30999| 2015-07-09T13:58:13.111-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63320 #211 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:13.115-0400 m30998| 2015-07-09T13:58:13.115-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63324 #212 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:13.115-0400 m30998| 2015-07-09T13:58:13.115-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63325 #213 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:13.116-0400 m30998| 2015-07-09T13:58:13.116-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63326 #214 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:13.121-0400 m30999| 2015-07-09T13:58:13.121-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63321 #212 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:13.122-0400 m30998| 2015-07-09T13:58:13.122-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63328 #215 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:13.125-0400 m30999| 2015-07-09T13:58:13.123-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63322 #213 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:13.127-0400 m30999| 2015-07-09T13:58:13.127-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63327 #214 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:13.132-0400 m30998| 2015-07-09T13:58:13.130-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63330 #216 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:13.132-0400 m30999| 2015-07-09T13:58:13.132-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63329 #215 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:13.132-0400 m30999| 2015-07-09T13:58:13.132-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63331 #216 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:13.142-0400 setting random seed: 9550724970176 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:13.143-0400 setting random seed: 8840221674181 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:13.143-0400 setting random seed: 5320409024134 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:13.143-0400 setting random seed: 9978586542420 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:13.144-0400 setting random seed: 104096550494 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:13.146-0400 setting random seed: 1441592206247 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:13.151-0400 setting random seed: 114510264247 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:13.154-0400 m30998| 2015-07-09T13:58:13.154-0400 I SHARDING [conn213] ChunkManager: time to load chunks for db33.coll33: 0ms sequenceNumber: 38 version: 2|5||559eb634ca4787b9985d1cd9 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:13.158-0400 setting random seed: 9792445199564 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:13.177-0400 setting random seed: 948700853623 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:13.184-0400 setting random seed: 7605747152119 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:13.186-0400 setting random seed: 1626545712351 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:13.186-0400 setting random seed: 9034648505039 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:13.187-0400 setting random seed: 2065338725224 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:13.188-0400 setting random seed: 4815254271961 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:13.193-0400 setting random seed: 7353800223208 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:13.215-0400 setting random seed: 5337494905106 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:13.232-0400 setting random seed: 2147971005178 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:13.235-0400 setting random seed: 4574012104421 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:13.245-0400 setting random seed: 3058335310779 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:13.263-0400 setting random seed: 7276911898516 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:13.591-0400 m31100| 2015-07-09T13:58:13.590-0400 I QUERY [conn71] query db33.coll33 query: { $text: { $search: "Pluggable storage engines are first-class players in the MongoDB operations, migrating to the WiredTiger storage engine will workloads with a simple v..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } ntoreturn:0 ntoskip:0 nscanned:268 nscannedObjects:57 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:4 nreturned:57 reslen:16389 locks:{ Global: { acquireCount: { r: 10 } }, Database: { acquireCount: { r: 5 } }, Collection: { acquireCount: { r: 5 } } } 112ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:13.735-0400 m31200| 2015-07-09T13:58:13.735-0400 I QUERY [conn35] query db33.coll33 query: { $text: { $search: "engines that seamlessly integrate with MongoDB. This opens the door for the" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } ntoreturn:0 ntoskip:0 nscanned:127 nscannedObjects:53 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:53 reslen:14255 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 176ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:13.742-0400 m31100| 2015-07-09T13:58:13.741-0400 I QUERY [conn54] query db33.coll33 query: { $text: { $search: "compression rates. For greater compression, at the cost of additional compression rates. For greater compression, at the cost of additional file forma..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } ntoreturn:0 ntoskip:0 nscanned:240 nscannedObjects:64 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:3 nreturned:64 reslen:18633 locks:{ Global: { acquireCount: { r: 8 } }, Database: { acquireCount: { r: 4 } }, Collection: { acquireCount: { r: 4 } } } 176ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:13.771-0400 m31200| 2015-07-09T13:58:13.769-0400 I QUERY [conn52] query db33.coll33 query: { $text: { $search: "Pluggable storage engines are first-class players in the MongoDB operations, migrating to the WiredTiger storage engine will workloads with a simple v..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } ntoreturn:0 ntoskip:0 nscanned:237 nscannedObjects:61 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:4 nreturned:61 reslen:15016 locks:{ Global: { acquireCount: { r: 10 } }, Database: { acquireCount: { r: 5 } }, Collection: { acquireCount: { r: 5 } } } 167ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:13.823-0400 m31100| 2015-07-09T13:58:13.822-0400 I QUERY [conn73] query db33.coll33 query: { $text: { $search: "2.8 release candidate (rc0), headlined by improved concurrency (including" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } ntoreturn:0 ntoskip:0 nscanned:200 nscannedObjects:60 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:3 nreturned:60 reslen:17291 locks:{ Global: { acquireCount: { r: 8 } }, Database: { acquireCount: { r: 4 } }, Collection: { acquireCount: { r: 4 } } } 227ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:13.838-0400 m31100| 2015-07-09T13:58:13.837-0400 I QUERY [conn49] query db33.coll33 query: { $text: { $search: "Pluggable storage engines are first-class players in the MongoDB Improved Concurrency on-disk compression, reducing disk I/O and storage footprint by ..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } ntoreturn:0 ntoskip:0 nscanned:390 nscannedObjects:89 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:4 nreturned:89 reslen:25054 locks:{ Global: { acquireCount: { r: 10 } }, Database: { acquireCount: { r: 5 } }, Collection: { acquireCount: { r: 5 } } } 113ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:13.869-0400 m31200| 2015-07-09T13:58:13.869-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63332 #137 (80 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:13.891-0400 m31200| 2015-07-09T13:58:13.885-0400 I QUERY [conn79] query db33.coll33 query: { $text: { $search: "the coming weeks optimizing and tuning some of the new features. Now it’s workloads with a simple version upgrade. For highly concurrent" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } ntoreturn:0 ntoskip:0 nscanned:175 nscannedObjects:66 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:66 reslen:17847 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 153ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:13.895-0400 m31100| 2015-07-09T13:58:13.894-0400 I QUERY [conn56] query db33.coll33 query: { $text: { $search: "specific workloads, hardware optimizations, or deployment architectures. engines that seamlessly integrate with MongoDB. This opens the door for the a..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } ntoreturn:0 ntoskip:0 nscanned:235 nscannedObjects:81 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:3 nreturned:81 reslen:23841 locks:{ Global: { acquireCount: { r: 8 } }, Database: { acquireCount: { r: 4 } }, Collection: { acquireCount: { r: 4 } } } 165ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:13.988-0400 m31100| 2015-07-09T13:58:13.983-0400 I QUERY [conn57] query db33.coll33 query: { $text: { $search: "We’re truly excited to announce the availability of the first MongoDB file formats, and optionally, compression. WiredTiger is key to MongoDB 2.8 in..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:2342369517604 ntoreturn:0 ntoskip:0 nscanned:542 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:6 nreturned:101 reslen:28071 locks:{ Global: { acquireCount: { r: 14 } }, Database: { acquireCount: { r: 7 } }, Collection: { acquireCount: { r: 7 } } } 130ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:13.993-0400 m31100| 2015-07-09T13:58:13.992-0400 I QUERY [conn54] query db33.coll33 query: { $text: { $search: "make up the 2.8 release. We will begin today with our three headliners: Improved Concurrency in greater utilization of available hardware resources, a..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } ntoreturn:0 ntoskip:0 nscanned:448 nscannedObjects:99 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:4 nreturned:99 reslen:27967 locks:{ Global: { acquireCount: { r: 10 } }, Database: { acquireCount: { r: 5 } }, Collection: { acquireCount: { r: 5 } } } 133ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:14.010-0400 m30998| 2015-07-09T13:58:14.008-0400 I NETWORK [conn210] end connection 127.0.0.1:63318 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:14.011-0400 m31100| 2015-07-09T13:58:14.008-0400 I QUERY [conn48] query db33.coll33 query: { $text: { $search: "the coming weeks optimizing and tuning some of the new features. Now it’s workloads with a simple version upgrade. For highly concurrent" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } ntoreturn:0 ntoskip:0 nscanned:226 nscannedObjects:88 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:4 nreturned:88 reslen:25766 locks:{ Global: { acquireCount: { r: 10 } }, Database: { acquireCount: { r: 5 } }, Collection: { acquireCount: { r: 5 } } } 273ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:14.033-0400 m31200| 2015-07-09T13:58:14.032-0400 I QUERY [conn35] query db33.coll33 query: { $text: { $search: "fully utilize available hardware resources. So whereas CPU make up the 2.8 release. We will begin today with our three headliners: compression rates. ..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } ntoreturn:0 ntoskip:0 nscanned:554 nscannedObjects:98 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:5 nreturned:98 reslen:25512 locks:{ Global: { acquireCount: { r: 12 } }, Database: { acquireCount: { r: 6 } }, Collection: { acquireCount: { r: 6 } } } 121ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:14.040-0400 m31100| 2015-07-09T13:58:14.039-0400 I QUERY [conn59] query db33.coll33 query: { $text: { $search: "use the pluggable storage API. Our original storage engine, now named Prior to 2.8, MongoDB’s concurrency model supported database by participating ..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } ntoreturn:0 ntoskip:0 nscanned:476 nscannedObjects:97 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:6 nreturned:97 reslen:27555 locks:{ Global: { acquireCount: { r: 14 } }, Database: { acquireCount: { r: 7 } }, Collection: { acquireCount: { r: 7 } } } 213ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:14.092-0400 m31100| 2015-07-09T13:58:14.091-0400 I QUERY [conn45] query db33.coll33 query: { $text: { $search: "Improved Concurrency the new WiredTiger storage engine, and brings collection-level now correspond more directly to system throughput." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:2342296785152 ntoreturn:0 ntoskip:0 nscanned:424 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:6 nreturned:101 reslen:28722 locks:{ Global: { acquireCount: { r: 14 } }, Database: { acquireCount: { r: 7 } }, Collection: { acquireCount: { r: 7 } } } 173ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:14.106-0400 m31100| 2015-07-09T13:58:14.104-0400 I QUERY [conn58] query db33.coll33 query: { $text: { $search: "great prizes (details below). MongoDB 2.8 RC0 The new pluggable storage API allows external parties to build custom storage of modern, multi-core ser..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } ntoreturn:0 ntoskip:0 nscanned:440 nscannedObjects:87 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:5 nreturned:87 reslen:24858 locks:{ Global: { acquireCount: { r: 12 } }, Database: { acquireCount: { r: 6 } }, Collection: { acquireCount: { r: 6 } } } 389ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:14.117-0400 m31200| 2015-07-09T13:58:14.116-0400 I QUERY [conn33] query db33.coll33 query: { $text: { $search: "Prior to 2.8, MongoDB’s concurrency model supported database on-disk compression, reducing disk I/O and storage footprint by" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } ntoreturn:0 ntoskip:0 nscanned:341 nscannedObjects:99 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:4 nreturned:99 reslen:26338 locks:{ Global: { acquireCount: { r: 10 } }, Database: { acquireCount: { r: 5 } }, Collection: { acquireCount: { r: 5 } } } 138ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:14.167-0400 m31100| 2015-07-09T13:58:14.166-0400 I QUERY [conn59] query db33.coll33 query: { $text: { $search: "dramatically improve throughput and performance. operations, migrating to the WiredTiger storage engine will storage engine, WiredTiger, that fulfills..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:2342862844834 ntoreturn:0 ntoskip:0 nscanned:437 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:5 nreturned:101 reslen:27920 locks:{ Global: { acquireCount: { r: 12 } }, Database: { acquireCount: { r: 6 } }, Collection: { acquireCount: { r: 6 } } } 125ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:14.202-0400 m31100| 2015-07-09T13:58:14.200-0400 I QUERY [conn55] query db33.coll33 query: { $text: { $search: "Pluggable storage engines are first-class players in the MongoDB next three weeks, we challenge you to test and uncover any lingering issues compressi..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:2342676628272 ntoreturn:0 ntoskip:0 nscanned:463 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:6 nreturned:101 reslen:28286 locks:{ Global: { acquireCount: { r: 14 } }, Database: { acquireCount: { r: 7 } }, Collection: { acquireCount: { r: 7 } } } 217ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:14.212-0400 m31100| 2015-07-09T13:58:14.211-0400 I QUERY [conn49] query db33.coll33 query: { $text: { $search: "the coming weeks optimizing and tuning some of the new features. Now it’s storage engine, WiredTiger, that fulfills our desire to make MongoDB Prior..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:2342101477354 ntoreturn:0 ntoskip:0 nscanned:614 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:6 nreturned:101 reslen:27452 locks:{ Global: { acquireCount: { r: 14 } }, Database: { acquireCount: { r: 7 } }, Collection: { acquireCount: { r: 7 } } } 105ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:14.243-0400 m31200| 2015-07-09T13:58:14.239-0400 I QUERY [conn35] query db33.coll33 query: { $text: { $search: "Improved Concurrency compression, which provides a good compromise between speed and" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } ntoreturn:0 ntoskip:0 nscanned:171 nscannedObjects:85 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:3 nreturned:85 reslen:24034 locks:{ Global: { acquireCount: { r: 8 } }, Database: { acquireCount: { r: 4 } }, Collection: { acquireCount: { r: 4 } } } 124ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:14.261-0400 m31200| 2015-07-09T13:58:14.259-0400 I QUERY [conn80] query db33.coll33 query: { $text: { $search: "to the WiredTiger storage engine, please see the 2.8 Release make up the 2.8 release. We will begin today with our three headliners: index, so users c..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:2307941444360 ntoreturn:0 ntoskip:0 nscanned:696 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:101 reslen:25410 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 } }, Collection: { acquireCount: { r: 8 } } } 113ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:14.271-0400 m31200| 2015-07-09T13:58:14.269-0400 I QUERY [conn83] query db33.coll33 query: { $text: { $search: "MongoDB 2.8 includes significant improvements to concurrency, resulting MongoDB Community to develop a wide array of storage engines designed for" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:2307247216523 ntoreturn:0 ntoskip:0 nscanned:521 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:101 reslen:26599 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 } }, Collection: { acquireCount: { r: 8 } } } 259ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:14.273-0400 m31200| 2015-07-09T13:58:14.271-0400 I QUERY [conn38] query db33.coll33 query: { $text: { $search: "the coming weeks optimizing and tuning some of the new features. Now it’s storage engine, WiredTiger, that fulfills our desire to make MongoDB Prior..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:2306589787749 ntoreturn:0 ntoskip:0 nscanned:626 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:101 reslen:26030 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 } }, Collection: { acquireCount: { r: 8 } } } 169ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:14.276-0400 m31200| 2015-07-09T13:58:14.272-0400 I QUERY [conn28] query db33.coll33 query: { $text: { $search: "We’re truly excited to announce the availability of the first MongoDB now correspond more directly to system throughput. now correspond more directl..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } ntoreturn:0 ntoskip:0 nscanned:243 nscannedObjects:100 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:4 nreturned:100 reslen:27876 locks:{ Global: { acquireCount: { r: 10 } }, Database: { acquireCount: { r: 5 } }, Collection: { acquireCount: { r: 5 } } } 121ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:14.281-0400 m30999| 2015-07-09T13:58:14.281-0400 I NETWORK [conn208] end connection 127.0.0.1:63315 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:14.289-0400 m31200| 2015-07-09T13:58:14.289-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63333 #138 (81 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:14.305-0400 m31200| 2015-07-09T13:58:14.303-0400 I QUERY [conn39] query db33.coll33 query: { $text: { $search: "to the WiredTiger storage engine, please see the 2.8 Release CPU utilization, you can switch to zlib compression. CPU utilization, you can switch to z..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:2308180653032 ntoreturn:0 ntoskip:0 nscanned:454 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:5 nreturned:101 reslen:26181 locks:{ Global: { acquireCount: { r: 12 } }, Database: { acquireCount: { r: 6 } }, Collection: { acquireCount: { r: 6 } } } 117ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:14.312-0400 m31200| 2015-07-09T13:58:14.310-0400 I QUERY [conn19] query db33.coll33 query: { $text: { $search: "The improved concurrency also means that MongoDB will more usage in MongoDB has been traditionally fairly low, it will" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:2308280484745 ntoreturn:0 ntoskip:0 nscanned:254 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:5 nreturned:101 reslen:28385 locks:{ Global: { acquireCount: { r: 12 } }, Database: { acquireCount: { r: 6 } }, Collection: { acquireCount: { r: 6 } } } 126ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:14.312-0400 m31200| 2015-07-09T13:58:14.310-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63334 #139 (82 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:14.314-0400 m31200| 2015-07-09T13:58:14.310-0400 I QUERY [conn37] query db33.coll33 query: { $text: { $search: "storage engine, WiredTiger, that fulfills our desire to make MongoDB use cases, where writing makes up a significant portion of now correspond more di..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:2308069013433 ntoreturn:0 ntoskip:0 nscanned:457 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:6 nreturned:101 reslen:26277 locks:{ Global: { acquireCount: { r: 14 } }, Database: { acquireCount: { r: 7 } }, Collection: { acquireCount: { r: 7 } } } 127ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:14.329-0400 m31200| 2015-07-09T13:58:14.328-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63335 #140 (83 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:14.382-0400 m31200| 2015-07-09T13:58:14.382-0400 I QUERY [conn35] query db33.coll33 query: { $text: { $search: "level locking. MongoDB 2.8 introduces document-level locking with by participating in our MongoDB 2.8 Bug Hunt. Winners are entitled to some now corre..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:2307539810701 ntoreturn:0 ntoskip:0 nscanned:396 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:5 nreturned:101 reslen:27251 locks:{ Global: { acquireCount: { r: 12 } }, Database: { acquireCount: { r: 6 } }, Collection: { acquireCount: { r: 6 } } } 123ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:14.403-0400 m31200| 2015-07-09T13:58:14.401-0400 I QUERY [conn28] query db33.coll33 query: { $text: { $search: "throughput for write-heavy workloads, including those that mix reading delivering the other two features we’re highlighting today. file formats, and..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:2306509476842 ntoreturn:0 ntoskip:0 nscanned:495 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:101 reslen:24878 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 } }, Collection: { acquireCount: { r: 8 } } } 110ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:14.424-0400 m31100| 2015-07-09T13:58:14.418-0400 I COMMAND [conn146] command db33.$cmd command: insert { insert: "coll33", documents: [ { _id: ObjectId('559eb636eac5440bf8d2ad63'), indexed_insert_text: [ "in greater utilization of available hardware resources, and vastly better" ] } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 2000|3, ObjectId('559eb634ca4787b9985d1cd9') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 101ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:14.432-0400 m31100| 2015-07-09T13:58:14.431-0400 I QUERY [conn54] query db33.coll33 query: { $text: { $search: "document-level locking), compression, and pluggable storage engines. Pluggable Storage Engines for their data. In 2.8, WiredTiger compression defaults..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:2341361468016 ntoreturn:0 ntoskip:0 nscanned:521 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:6 nreturned:101 reslen:27639 locks:{ Global: { acquireCount: { r: 14 } }, Database: { acquireCount: { r: 7 } }, Collection: { acquireCount: { r: 7 } } } 139ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:14.520-0400 m31100| 2015-07-09T13:58:14.516-0400 I QUERY [conn51] query db33.coll33 query: { $text: { $search: "dramatically improve throughput and performance. Compression For more information, including how to seamlessly upgrade in greater utilization of avail..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:2341582752993 ntoreturn:0 ntoskip:0 nscanned:338 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:5 nreturned:101 reslen:27810 locks:{ Global: { acquireCount: { r: 12 } }, Database: { acquireCount: { r: 6 } }, Collection: { acquireCount: { r: 6 } } } 132ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:14.538-0400 m31200| 2015-07-09T13:58:14.528-0400 I QUERY [conn52] query db33.coll33 query: { $text: { $search: "document-level locking), compression, and pluggable storage engines. Pluggable Storage Engines for their data. In 2.8, WiredTiger compression defaults..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:2307941066546 ntoreturn:0 ntoskip:0 nscanned:499 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:101 reslen:26227 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 } }, Collection: { acquireCount: { r: 9 } } } 218ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:14.557-0400 m31100| 2015-07-09T13:58:14.548-0400 I QUERY [conn48] query db33.coll33 query: { $text: { $search: "storage engine, WiredTiger, that fulfills our desire to make MongoDB now correspond more directly to system throughput." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:2342410535537 ntoreturn:0 ntoskip:0 nscanned:402 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:4 nreturned:101 reslen:28612 locks:{ Global: { acquireCount: { r: 10 } }, Database: { acquireCount: { r: 5 } }, Collection: { acquireCount: { r: 5 } } } 143ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:14.585-0400 m31200| 2015-07-09T13:58:14.582-0400 I QUERY [conn38] query db33.coll33 query: { $text: { $search: "ecosystem. MongoDB 2.8 ships with two storage engines, both of which specific workloads, hardware optimizations, or deployment architectures. use the ..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:2306564823550 ntoreturn:0 ntoskip:0 nscanned:657 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:6 nreturned:101 reslen:25324 locks:{ Global: { acquireCount: { r: 14 } }, Database: { acquireCount: { r: 7 } }, Collection: { acquireCount: { r: 7 } } } 141ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:14.630-0400 m31200| 2015-07-09T13:58:14.629-0400 I QUERY [conn137] query db33.coll33 query: { $text: { $search: "compression rates. For greater compression, at the cost of additional throughput for write-heavy workloads, including those that mix reading" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:2306738918505 ntoreturn:0 ntoskip:0 nscanned:281 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:5 nreturned:101 reslen:27189 locks:{ Global: { acquireCount: { r: 12 } }, Database: { acquireCount: { r: 6 } }, Collection: { acquireCount: { r: 6 } } } 178ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:14.631-0400 m30998| 2015-07-09T13:58:14.631-0400 I NETWORK [conn212] end connection 127.0.0.1:63324 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:14.632-0400 m31200| 2015-07-09T13:58:14.629-0400 I QUERY [conn79] query db33.coll33 query: { $text: { $search: "storage engine, WiredTiger, that fulfills our desire to make MongoDB now correspond more directly to system throughput." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:2307248246312 ntoreturn:0 ntoskip:0 nscanned:425 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:5 nreturned:101 reslen:26566 locks:{ Global: { acquireCount: { r: 12 } }, Database: { acquireCount: { r: 6 } }, Collection: { acquireCount: { r: 6 } } } 228ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:14.633-0400 m30999| 2015-07-09T13:58:14.633-0400 I NETWORK [conn213] end connection 127.0.0.1:63322 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:14.682-0400 m31200| 2015-07-09T13:58:14.681-0400 I QUERY [conn28] query db33.coll33 query: { $text: { $search: "make up the 2.8 release. We will begin today with our three headliners: the new WiredTiger storage engine, and brings collection-level The WiredTiger ..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:2308409852724 ntoreturn:0 ntoskip:0 nscanned:820 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:101 reslen:24558 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 } }, Collection: { acquireCount: { r: 9 } } } 149ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:14.683-0400 m31200| 2015-07-09T13:58:14.682-0400 I QUERY [conn52] query db33.coll33 query: { $text: { $search: "to the WiredTiger storage engine, please see the 2.8 Release 30-80%. Compression is configured individually for each collection and on-disk compressio..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:2306543502572 ntoreturn:0 ntoskip:0 nscanned:710 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:101 reslen:23673 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 } }, Collection: { acquireCount: { r: 8 } } } 112ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:14.705-0400 m31200| 2015-07-09T13:58:14.704-0400 I QUERY [conn19] query db33.coll33 query: { $text: { $search: "of modern, multi-core servers with access to large amounts of The WiredTiger storage engine in MongoDB 2.8 provides storage engine, WiredTiger, that f..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:2307573568695 ntoreturn:0 ntoskip:0 nscanned:527 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:6 nreturned:101 reslen:26235 locks:{ Global: { acquireCount: { r: 14 } }, Database: { acquireCount: { r: 7 } }, Collection: { acquireCount: { r: 7 } } } 183ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:14.724-0400 m31200| 2015-07-09T13:58:14.718-0400 I QUERY [conn38] query db33.coll33 query: { $text: { $search: "index, so users can choose the compression algorithm most appropriate specific workloads, hardware optimizations, or deployment architectures." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } ntoreturn:0 ntoskip:0 nscanned:173 nscannedObjects:99 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:99 reslen:27570 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 116ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:14.732-0400 m31200| 2015-07-09T13:58:14.730-0400 I QUERY [conn32] query db33.coll33 query: { $text: { $search: "engines that seamlessly integrate with MongoDB. This opens the door for the on-disk compression, reducing disk I/O and storage footprint by specific w..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:2308007346011 ntoreturn:0 ntoskip:0 nscanned:493 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:101 reslen:24219 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 } }, Collection: { acquireCount: { r: 8 } } } 246ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:14.739-0400 m31200| 2015-07-09T13:58:14.737-0400 I QUERY [conn82] query db33.coll33 query: { $text: { $search: "to the WiredTiger storage engine, please see the 2.8 Release operations, migrating to the WiredTiger storage engine will Compression The new pluggable..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:2308338866483 ntoreturn:0 ntoskip:0 nscanned:652 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:6 nreturned:101 reslen:24959 locks:{ Global: { acquireCount: { r: 14 } }, Database: { acquireCount: { r: 7 } }, Collection: { acquireCount: { r: 7 } } } 209ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:14.743-0400 m31200| 2015-07-09T13:58:14.738-0400 I QUERY [conn83] query db33.coll33 query: { $text: { $search: "storage engine, WiredTiger, that fulfills our desire to make MongoDB your turn to help ensure the quality of this important release. Over the In futur..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:2308449386489 ntoreturn:0 ntoskip:0 nscanned:461 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:101 reslen:26291 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 } }, Collection: { acquireCount: { r: 8 } } } 206ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:14.746-0400 m31200| 2015-07-09T13:58:14.746-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63337 #141 (84 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:14.764-0400 m31200| 2015-07-09T13:58:14.764-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63338 #142 (85 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:14.796-0400 m31200| 2015-07-09T13:58:14.796-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63339 #143 (86 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:14.861-0400 m30999| 2015-07-09T13:58:14.860-0400 I NETWORK [conn211] end connection 127.0.0.1:63320 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:14.879-0400 m31200| 2015-07-09T13:58:14.878-0400 I QUERY [conn79] query db33.coll33 query: { $text: { $search: "in greater utilization of available hardware resources, and vastly better achieves high concurrency and low latency by taking full advantage file form..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:2306472346392 ntoreturn:0 ntoskip:0 nscanned:855 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:101 reslen:23774 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 } }, Collection: { acquireCount: { r: 8 } } } 138ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:14.902-0400 m30998| 2015-07-09T13:58:14.901-0400 I NETWORK [conn208] end connection 127.0.0.1:63314 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:14.903-0400 m31100| 2015-07-09T13:58:14.901-0400 I QUERY [conn20] query db33.coll33 query: { $text: { $search: "by participating in our MongoDB 2.8 Bug Hunt. Winners are entitled to some ecosystem. MongoDB 2.8 ships with two storage engines, both of which" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:2341783041596 ntoreturn:0 ntoskip:0 nscanned:481 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:5 nreturned:101 reslen:27425 locks:{ Global: { acquireCount: { r: 12 } }, Database: { acquireCount: { r: 6 } }, Collection: { acquireCount: { r: 6 } } } 125ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:14.917-0400 m31200| 2015-07-09T13:58:14.911-0400 I QUERY [conn80] query db33.coll33 query: { $text: { $search: "Pluggable Storage Engines throughput for write-heavy workloads, including those that mix reading to the WiredTiger storage engine, please see the 2.8 ..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:2307115237006 ntoreturn:0 ntoskip:0 nscanned:950 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:10 nreturned:101 reslen:24225 locks:{ Global: { acquireCount: { r: 22 } }, Database: { acquireCount: { r: 11 } }, Collection: { acquireCount: { r: 11 } } } 168ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:14.934-0400 m31200| 2015-07-09T13:58:14.933-0400 I QUERY [conn35] query db33.coll33 query: { $text: { $search: "by participating in our MongoDB 2.8 Bug Hunt. Winners are entitled to some ecosystem. MongoDB 2.8 ships with two storage engines, both of which" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:2307199897801 ntoreturn:0 ntoskip:0 nscanned:502 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:6 nreturned:101 reslen:27176 locks:{ Global: { acquireCount: { r: 14 } }, Database: { acquireCount: { r: 7 } }, Collection: { acquireCount: { r: 7 } } } 175ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:14.971-0400 m30998| 2015-07-09T13:58:14.969-0400 I NETWORK [conn211] end connection 127.0.0.1:63323 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:14.992-0400 m31200| 2015-07-09T13:58:14.991-0400 I QUERY [conn38] query db33.coll33 query: { $text: { $search: "the new WiredTiger storage engine, and brings collection-level Pluggable storage engines are first-class players in the MongoDB index, so users can ch..." } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 }, IXSCAN { _fts: "text", _ftsx: 1 } cursorid:2307392713114 ntoreturn:0 ntoskip:0 nscanned:757 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:9 nreturned:101 reslen:25033 locks:{ Global: { acquireCount: { r: 20 } }, Database: { acquireCount: { r: 10 } }, Collection: { acquireCount: { r: 10 } } } 245ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.017-0400 m30999| 2015-07-09T13:58:15.015-0400 I NETWORK [conn214] end connection 127.0.0.1:63327 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.052-0400 m30999| 2015-07-09T13:58:15.049-0400 I NETWORK [conn210] end connection 127.0.0.1:63319 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.068-0400 m30998| 2015-07-09T13:58:15.067-0400 I NETWORK [conn214] end connection 127.0.0.1:63326 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.114-0400 m30999| 2015-07-09T13:58:15.111-0400 I NETWORK [conn212] end connection 127.0.0.1:63321 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.188-0400 m30999| 2015-07-09T13:58:15.187-0400 I NETWORK [conn207] end connection 127.0.0.1:63312 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.204-0400 m30998| 2015-07-09T13:58:15.200-0400 I NETWORK [conn209] end connection 127.0.0.1:63317 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.246-0400 m30999| 2015-07-09T13:58:15.245-0400 I NETWORK [conn209] end connection 127.0.0.1:63316 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.259-0400 m30998| 2015-07-09T13:58:15.256-0400 I NETWORK [conn207] end connection 127.0.0.1:63313 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.297-0400 m30999| 2015-07-09T13:58:15.296-0400 I NETWORK [conn216] end connection 127.0.0.1:63331 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.318-0400 m30998| 2015-07-09T13:58:15.317-0400 I NETWORK [conn213] end connection 127.0.0.1:63325 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.323-0400 m30999| 2015-07-09T13:58:15.322-0400 I NETWORK [conn215] end connection 127.0.0.1:63329 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.342-0400 m30998| 2015-07-09T13:58:15.342-0400 I NETWORK [conn215] end connection 127.0.0.1:63328 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.376-0400 m30998| 2015-07-09T13:58:15.376-0400 I NETWORK [conn216] end connection 127.0.0.1:63330 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.397-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.397-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.398-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.398-0400 jstests/concurrency/fsm_workloads/indexed_insert_text_multikey.js: Workload completed in 2583 ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.398-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.398-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.398-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.398-0400 m30999| 2015-07-09T13:58:15.398-0400 I COMMAND [conn1] DROP: db33.coll33 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.398-0400 m30999| 2015-07-09T13:58:15.398-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:15.398-0400-559eb637ca4787b9985d1cdb", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464695398), what: "dropCollection.start", ns: "db33.coll33", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.456-0400 m30999| 2015-07-09T13:58:15.455-0400 I SHARDING [conn1] distributed lock 'db33.coll33/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb637ca4787b9985d1cdc [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.457-0400 m31100| 2015-07-09T13:58:15.457-0400 I COMMAND [conn37] CMD: drop db33.coll33 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.460-0400 m31200| 2015-07-09T13:58:15.460-0400 I COMMAND [conn84] CMD: drop db33.coll33 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.461-0400 m31102| 2015-07-09T13:58:15.461-0400 I COMMAND [repl writer worker 4] CMD: drop db33.coll33 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.462-0400 m31101| 2015-07-09T13:58:15.462-0400 I COMMAND [repl writer worker 7] CMD: drop db33.coll33 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.467-0400 m31202| 2015-07-09T13:58:15.467-0400 I COMMAND [repl writer worker 2] CMD: drop db33.coll33 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.468-0400 m31201| 2015-07-09T13:58:15.467-0400 I COMMAND [repl writer worker 11] CMD: drop db33.coll33 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.519-0400 m31100| 2015-07-09T13:58:15.518-0400 I SHARDING [conn37] remotely refreshing metadata for db33.coll33 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559eb634ca4787b9985d1cd9, current metadata version is 2|3||559eb634ca4787b9985d1cd9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.520-0400 m31100| 2015-07-09T13:58:15.519-0400 W SHARDING [conn37] no chunks found when reloading db33.coll33, previous version was 0|0||559eb634ca4787b9985d1cd9, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.520-0400 m31100| 2015-07-09T13:58:15.520-0400 I SHARDING [conn37] dropping metadata for db33.coll33 at shard version 2|3||559eb634ca4787b9985d1cd9, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.522-0400 m31200| 2015-07-09T13:58:15.521-0400 I SHARDING [conn84] remotely refreshing metadata for db33.coll33 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559eb634ca4787b9985d1cd9, current metadata version is 2|5||559eb634ca4787b9985d1cd9 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.523-0400 m31200| 2015-07-09T13:58:15.522-0400 W SHARDING [conn84] no chunks found when reloading db33.coll33, previous version was 0|0||559eb634ca4787b9985d1cd9, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.523-0400 m31200| 2015-07-09T13:58:15.523-0400 I SHARDING [conn84] dropping metadata for db33.coll33 at shard version 2|5||559eb634ca4787b9985d1cd9, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.524-0400 m30999| 2015-07-09T13:58:15.524-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:15.524-0400-559eb637ca4787b9985d1cdd", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464695524), what: "dropCollection", ns: "db33.coll33", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.578-0400 m30999| 2015-07-09T13:58:15.578-0400 I SHARDING [conn1] distributed lock 'db33.coll33/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.635-0400 m30999| 2015-07-09T13:58:15.634-0400 I COMMAND [conn1] DROP DATABASE: db33 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.635-0400 m30999| 2015-07-09T13:58:15.634-0400 I SHARDING [conn1] DBConfig::dropDatabase: db33 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.635-0400 m30999| 2015-07-09T13:58:15.634-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:15.634-0400-559eb637ca4787b9985d1cde", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464695634), what: "dropDatabase.start", ns: "db33", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.742-0400 m30999| 2015-07-09T13:58:15.741-0400 I SHARDING [conn1] DBConfig::dropDatabase: db33 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.742-0400 m31100| 2015-07-09T13:58:15.742-0400 I COMMAND [conn28] dropDatabase db33 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.742-0400 m31100| 2015-07-09T13:58:15.742-0400 I COMMAND [conn28] dropDatabase db33 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.743-0400 m30999| 2015-07-09T13:58:15.743-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:15.743-0400-559eb637ca4787b9985d1cdf", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464695743), what: "dropDatabase", ns: "db33", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.743-0400 m31102| 2015-07-09T13:58:15.743-0400 I COMMAND [repl writer worker 0] dropDatabase db33 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.744-0400 m31102| 2015-07-09T13:58:15.743-0400 I COMMAND [repl writer worker 0] dropDatabase db33 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.744-0400 m31101| 2015-07-09T13:58:15.743-0400 I COMMAND [repl writer worker 14] dropDatabase db33 starting [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.744-0400 m31101| 2015-07-09T13:58:15.743-0400 I COMMAND [repl writer worker 14] dropDatabase db33 finished [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.828-0400 m31100| 2015-07-09T13:58:15.827-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.831-0400 m31101| 2015-07-09T13:58:15.831-0400 I COMMAND [repl writer worker 12] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.831-0400 m31102| 2015-07-09T13:58:15.831-0400 I COMMAND [repl writer worker 13] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.866-0400 m31200| 2015-07-09T13:58:15.865-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.869-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.869-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.869-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.870-0400 jstests/concurrency/fsm_workloads/update_where.js [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.870-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.870-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.870-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.870-0400 m31201| 2015-07-09T13:58:15.870-0400 I COMMAND [repl writer worker 6] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.871-0400 m31202| 2015-07-09T13:58:15.870-0400 I COMMAND [repl writer worker 6] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.879-0400 m30999| 2015-07-09T13:58:15.878-0400 I SHARDING [conn1] distributed lock 'db34/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb637ca4787b9985d1ce0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.882-0400 m30999| 2015-07-09T13:58:15.882-0400 I SHARDING [conn1] Placing [db34] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.883-0400 m30999| 2015-07-09T13:58:15.882-0400 I SHARDING [conn1] Enabling sharding for database [db34] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.937-0400 m30999| 2015-07-09T13:58:15.937-0400 I SHARDING [conn1] distributed lock 'db34/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.961-0400 m31100| 2015-07-09T13:58:15.961-0400 I INDEX [conn68] build index on: db34.coll34 properties: { v: 1, key: { tid: 1.0 }, name: "tid_1", ns: "db34.coll34" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.961-0400 m31100| 2015-07-09T13:58:15.961-0400 I INDEX [conn68] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.969-0400 m31100| 2015-07-09T13:58:15.969-0400 I INDEX [conn68] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.970-0400 m30999| 2015-07-09T13:58:15.970-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db34.coll34", key: { tid: 1.0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.973-0400 m30999| 2015-07-09T13:58:15.973-0400 I SHARDING [conn1] distributed lock 'db34.coll34/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb637ca4787b9985d1ce1 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.974-0400 m30999| 2015-07-09T13:58:15.973-0400 I SHARDING [conn1] enable sharding on: db34.coll34 with shard key: { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.975-0400 m30999| 2015-07-09T13:58:15.974-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:15.974-0400-559eb637ca4787b9985d1ce2", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464695974), what: "shardCollection.start", ns: "db34.coll34", details: { shardKey: { tid: 1.0 }, collection: "db34.coll34", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 1 } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.978-0400 m31102| 2015-07-09T13:58:15.977-0400 I INDEX [repl writer worker 8] build index on: db34.coll34 properties: { v: 1, key: { tid: 1.0 }, name: "tid_1", ns: "db34.coll34" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.978-0400 m31102| 2015-07-09T13:58:15.977-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.985-0400 m31101| 2015-07-09T13:58:15.984-0400 I INDEX [repl writer worker 11] build index on: db34.coll34 properties: { v: 1, key: { tid: 1.0 }, name: "tid_1", ns: "db34.coll34" } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.985-0400 m31101| 2015-07-09T13:58:15.984-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.985-0400 m31102| 2015-07-09T13:58:15.985-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:15.991-0400 m31101| 2015-07-09T13:58:15.991-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.028-0400 m30999| 2015-07-09T13:58:16.028-0400 I SHARDING [conn1] going to create 1 chunk(s) for: db34.coll34 using new epoch 559eb638ca4787b9985d1ce3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.082-0400 m30999| 2015-07-09T13:58:16.082-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db34.coll34: 0ms sequenceNumber: 155 version: 1|0||559eb638ca4787b9985d1ce3 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.137-0400 m30999| 2015-07-09T13:58:16.137-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db34.coll34: 0ms sequenceNumber: 156 version: 1|0||559eb638ca4787b9985d1ce3 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.139-0400 m31100| 2015-07-09T13:58:16.139-0400 I SHARDING [conn57] remotely refreshing metadata for db34.coll34 with requested shard version 1|0||559eb638ca4787b9985d1ce3, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.141-0400 m31100| 2015-07-09T13:58:16.141-0400 I SHARDING [conn57] collection db34.coll34 was previously unsharded, new metadata loaded with shard version 1|0||559eb638ca4787b9985d1ce3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.141-0400 m31100| 2015-07-09T13:58:16.141-0400 I SHARDING [conn57] collection version was loaded at version 1|0||559eb638ca4787b9985d1ce3, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.141-0400 m30999| 2015-07-09T13:58:16.141-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:16.141-0400-559eb638ca4787b9985d1ce4", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436464696141), what: "shardCollection", ns: "db34.coll34", details: { version: "1|0||559eb638ca4787b9985d1ce3" } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.194-0400 m30999| 2015-07-09T13:58:16.194-0400 I SHARDING [conn1] distributed lock 'db34.coll34/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.195-0400 Using 10 threads (requested 10) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.263-0400 m30999| 2015-07-09T13:58:16.263-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63340 #217 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.314-0400 m30999| 2015-07-09T13:58:16.314-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63341 #218 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.314-0400 m30998| 2015-07-09T13:58:16.314-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63342 #217 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.316-0400 m30998| 2015-07-09T13:58:16.316-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63344 #218 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.317-0400 m30999| 2015-07-09T13:58:16.317-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63343 #219 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.317-0400 m30998| 2015-07-09T13:58:16.317-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63346 #219 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.321-0400 m30999| 2015-07-09T13:58:16.321-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63345 #220 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.322-0400 m30998| 2015-07-09T13:58:16.321-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63348 #220 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.327-0400 m30999| 2015-07-09T13:58:16.326-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63347 #221 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.333-0400 m30998| 2015-07-09T13:58:16.332-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63349 #221 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.340-0400 setting random seed: 8599152932874 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.340-0400 setting random seed: 8727232646197 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.340-0400 setting random seed: 5421474813483 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.340-0400 setting random seed: 9351194091141 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.345-0400 setting random seed: 5963410125114 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.347-0400 setting random seed: 572481304407 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.350-0400 setting random seed: 5519162993878 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.352-0400 setting random seed: 1693287650123 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.357-0400 m30998| 2015-07-09T13:58:16.357-0400 I SHARDING [conn217] ChunkManager: time to load chunks for db34.coll34: 0ms sequenceNumber: 39 version: 1|0||559eb638ca4787b9985d1ce3 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.358-0400 setting random seed: 765118771232 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.358-0400 setting random seed: 7034019459970 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.445-0400 m31100| 2015-07-09T13:58:16.445-0400 I SHARDING [conn37] request split points lookup for chunk db34.coll34 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.447-0400 m31100| 2015-07-09T13:58:16.447-0400 W SHARDING [conn37] possible low cardinality key detected in db34.coll34 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.447-0400 m31100| 2015-07-09T13:58:16.447-0400 W SHARDING [conn37] possible low cardinality key detected in db34.coll34 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.447-0400 m31100| 2015-07-09T13:58:16.447-0400 W SHARDING [conn37] possible low cardinality key detected in db34.coll34 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.448-0400 m31100| 2015-07-09T13:58:16.447-0400 W SHARDING [conn37] possible low cardinality key detected in db34.coll34 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.448-0400 m31100| 2015-07-09T13:58:16.447-0400 W SHARDING [conn37] possible low cardinality key detected in db34.coll34 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.448-0400 m31100| 2015-07-09T13:58:16.447-0400 W SHARDING [conn37] possible low cardinality key detected in db34.coll34 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.448-0400 m31100| 2015-07-09T13:58:16.447-0400 W SHARDING [conn37] possible low cardinality key detected in db34.coll34 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.448-0400 m31100| 2015-07-09T13:58:16.447-0400 W SHARDING [conn37] possible low cardinality key detected in db34.coll34 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.448-0400 m31100| 2015-07-09T13:58:16.447-0400 W SHARDING [conn37] possible low cardinality key detected in db34.coll34 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.448-0400 m31100| 2015-07-09T13:58:16.447-0400 W SHARDING [conn37] possible low cardinality key detected in db34.coll34 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.449-0400 m31100| 2015-07-09T13:58:16.448-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db34.coll34", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb638ca4787b9985d1ce3') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.449-0400 m31100| 2015-07-09T13:58:16.449-0400 I SHARDING [conn40] request split points lookup for chunk db34.coll34 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.457-0400 m31100| 2015-07-09T13:58:16.457-0400 I SHARDING [conn38] request split points lookup for chunk db34.coll34 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.458-0400 m31100| 2015-07-09T13:58:16.458-0400 I SHARDING [conn39] request split points lookup for chunk db34.coll34 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.458-0400 m31100| 2015-07-09T13:58:16.458-0400 I SHARDING [conn35] request split points lookup for chunk db34.coll34 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.459-0400 m31100| 2015-07-09T13:58:16.458-0400 I SHARDING [conn37] distributed lock 'db34.coll34/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb638792e00bb67274985 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.459-0400 m31100| 2015-07-09T13:58:16.459-0400 I SHARDING [conn37] remotely refreshing metadata for db34.coll34 based on current shard version 1|0||559eb638ca4787b9985d1ce3, current metadata version is 1|0||559eb638ca4787b9985d1ce3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.459-0400 m31100| 2015-07-09T13:58:16.459-0400 W SHARDING [conn39] possible low cardinality key detected in db34.coll34 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.459-0400 m31100| 2015-07-09T13:58:16.459-0400 W SHARDING [conn39] possible low cardinality key detected in db34.coll34 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.460-0400 m31100| 2015-07-09T13:58:16.459-0400 W SHARDING [conn39] possible low cardinality key detected in db34.coll34 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.460-0400 m31100| 2015-07-09T13:58:16.459-0400 W SHARDING [conn39] possible low cardinality key detected in db34.coll34 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.460-0400 m31100| 2015-07-09T13:58:16.459-0400 W SHARDING [conn39] possible low cardinality key detected in db34.coll34 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.460-0400 m31100| 2015-07-09T13:58:16.459-0400 W SHARDING [conn39] possible low cardinality key detected in db34.coll34 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.460-0400 m31100| 2015-07-09T13:58:16.459-0400 W SHARDING [conn39] possible low cardinality key detected in db34.coll34 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.460-0400 m31100| 2015-07-09T13:58:16.459-0400 W SHARDING [conn39] possible low cardinality key detected in db34.coll34 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.460-0400 m31100| 2015-07-09T13:58:16.459-0400 W SHARDING [conn39] possible low cardinality key detected in db34.coll34 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.461-0400 m31100| 2015-07-09T13:58:16.459-0400 W SHARDING [conn39] possible low cardinality key detected in db34.coll34 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.462-0400 m31100| 2015-07-09T13:58:16.459-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db34.coll34", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb638ca4787b9985d1ce3') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.463-0400 m31100| 2015-07-09T13:58:16.461-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db34.coll34", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb638ca4787b9985d1ce3') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.463-0400 m31100| 2015-07-09T13:58:16.462-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db34.coll34", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb638ca4787b9985d1ce3') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.464-0400 m31100| 2015-07-09T13:58:16.464-0400 W SHARDING [conn38] could not acquire collection lock for db34.coll34 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db34.coll34 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.465-0400 m30999| 2015-07-09T13:58:16.464-0400 W SHARDING [conn220] splitChunk failed - cmd: { splitChunk: "db34.coll34", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb638ca4787b9985d1ce3') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db34.coll34 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.466-0400 m31100| 2015-07-09T13:58:16.466-0400 W SHARDING [conn35] could not acquire collection lock for db34.coll34 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db34.coll34 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.467-0400 m30998| 2015-07-09T13:58:16.466-0400 W SHARDING [conn217] splitChunk failed - cmd: { splitChunk: "db34.coll34", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb638ca4787b9985d1ce3') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db34.coll34 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.472-0400 m31100| 2015-07-09T13:58:16.471-0400 W SHARDING [conn40] could not acquire collection lock for db34.coll34 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db34.coll34 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.472-0400 m30999| 2015-07-09T13:58:16.472-0400 W SHARDING [conn217] splitChunk failed - cmd: { splitChunk: "db34.coll34", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb638ca4787b9985d1ce3') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db34.coll34 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.483-0400 m31100| 2015-07-09T13:58:16.475-0400 I SHARDING [conn37] metadata of collection db34.coll34 already up to date (shard version : 1|0||559eb638ca4787b9985d1ce3, took 2ms) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.483-0400 m31100| 2015-07-09T13:58:16.475-0400 I SHARDING [conn37] splitChunk accepted at version 1|0||559eb638ca4787b9985d1ce3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.492-0400 m31100| 2015-07-09T13:58:16.486-0400 I COMMAND [conn31] command db34.$cmd command: insert { insert: "coll34", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eb638ca4787b9985d1ce3') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 105, w: 105 } }, Database: { acquireCount: { w: 105 } }, Collection: { acquireCount: { w: 5 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 5551 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 114ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.492-0400 m31100| 2015-07-09T13:58:16.486-0400 I SHARDING [conn35] request split points lookup for chunk db34.coll34 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.493-0400 m31100| 2015-07-09T13:58:16.487-0400 I SHARDING [conn39] received splitChunk request: { splitChunk: "db34.coll34", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb638ca4787b9985d1ce3') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.494-0400 m31100| 2015-07-09T13:58:16.487-0400 I COMMAND [conn70] command db34.$cmd command: insert { insert: "coll34", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eb638ca4787b9985d1ce3') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 106, w: 106 } }, Database: { acquireCount: { w: 106 } }, Collection: { acquireCount: { w: 6 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 5416 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 131ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.494-0400 m31100| 2015-07-09T13:58:16.488-0400 I SHARDING [conn40] request split points lookup for chunk db34.coll34 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.494-0400 m31100| 2015-07-09T13:58:16.488-0400 W SHARDING [conn35] possible low cardinality key detected in db34.coll34 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.494-0400 m31100| 2015-07-09T13:58:16.488-0400 W SHARDING [conn35] possible low cardinality key detected in db34.coll34 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.494-0400 m31100| 2015-07-09T13:58:16.488-0400 W SHARDING [conn35] possible low cardinality key detected in db34.coll34 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.495-0400 m31100| 2015-07-09T13:58:16.488-0400 W SHARDING [conn35] possible low cardinality key detected in db34.coll34 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.495-0400 m31100| 2015-07-09T13:58:16.488-0400 W SHARDING [conn35] possible low cardinality key detected in db34.coll34 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.495-0400 m31100| 2015-07-09T13:58:16.488-0400 W SHARDING [conn35] possible low cardinality key detected in db34.coll34 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.495-0400 m31100| 2015-07-09T13:58:16.488-0400 W SHARDING [conn35] possible low cardinality key detected in db34.coll34 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.496-0400 m31100| 2015-07-09T13:58:16.488-0400 W SHARDING [conn35] possible low cardinality key detected in db34.coll34 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.496-0400 m31100| 2015-07-09T13:58:16.488-0400 W SHARDING [conn35] possible low cardinality key detected in db34.coll34 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.496-0400 m31100| 2015-07-09T13:58:16.488-0400 W SHARDING [conn35] possible low cardinality key detected in db34.coll34 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.497-0400 m31100| 2015-07-09T13:58:16.497-0400 W SHARDING [conn39] could not acquire collection lock for db34.coll34 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db34.coll34 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.497-0400 m30998| 2015-07-09T13:58:16.497-0400 W SHARDING [conn220] splitChunk failed - cmd: { splitChunk: "db34.coll34", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb638ca4787b9985d1ce3') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db34.coll34 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.503-0400 m31100| 2015-07-09T13:58:16.499-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db34.coll34", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb638ca4787b9985d1ce3') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.503-0400 m31100| 2015-07-09T13:58:16.500-0400 W SHARDING [conn35] could not acquire collection lock for db34.coll34 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db34.coll34 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.504-0400 m30998| 2015-07-09T13:58:16.501-0400 W SHARDING [conn221] splitChunk failed - cmd: { splitChunk: "db34.coll34", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb638ca4787b9985d1ce3') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db34.coll34 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.574-0400 m31100| 2015-07-09T13:58:16.573-0400 I SHARDING [conn37] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:16.573-0400-559eb638792e00bb67274986", server: "bs-osx108-8", clientAddr: "127.0.0.1:62639", time: new Date(1436464696573), what: "multi-split", ns: "db34.coll34", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 1, of: 10, chunk: { min: { tid: MinKey }, max: { tid: 0.0 }, lastmod: Timestamp 1000|1, lastmodEpoch: ObjectId('559eb638ca4787b9985d1ce3') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.574-0400 m31100| 2015-07-09T13:58:16.573-0400 W SHARDING [conn40] possible low cardinality key detected in db34.coll34 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.574-0400 m31100| 2015-07-09T13:58:16.573-0400 W SHARDING [conn40] possible low cardinality key detected in db34.coll34 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.575-0400 m31100| 2015-07-09T13:58:16.573-0400 W SHARDING [conn40] possible low cardinality key detected in db34.coll34 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.575-0400 m31100| 2015-07-09T13:58:16.573-0400 W SHARDING [conn40] possible low cardinality key detected in db34.coll34 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.575-0400 m31100| 2015-07-09T13:58:16.573-0400 W SHARDING [conn40] possible low cardinality key detected in db34.coll34 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.576-0400 m31100| 2015-07-09T13:58:16.574-0400 W SHARDING [conn40] possible low cardinality key detected in db34.coll34 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.576-0400 m31100| 2015-07-09T13:58:16.574-0400 W SHARDING [conn40] possible low cardinality key detected in db34.coll34 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.576-0400 m31100| 2015-07-09T13:58:16.574-0400 W SHARDING [conn40] possible low cardinality key detected in db34.coll34 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.576-0400 m31100| 2015-07-09T13:58:16.574-0400 W SHARDING [conn40] possible low cardinality key detected in db34.coll34 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.576-0400 m31100| 2015-07-09T13:58:16.574-0400 W SHARDING [conn40] possible low cardinality key detected in db34.coll34 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.576-0400 m31100| 2015-07-09T13:58:16.574-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db34.coll34", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb638ca4787b9985d1ce3') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.577-0400 m31100| 2015-07-09T13:58:16.575-0400 W SHARDING [conn40] could not acquire collection lock for db34.coll34 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db34.coll34 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.581-0400 m30999| 2015-07-09T13:58:16.580-0400 W SHARDING [conn218] splitChunk failed - cmd: { splitChunk: "db34.coll34", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb638ca4787b9985d1ce3') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db34.coll34 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.591-0400 m31100| 2015-07-09T13:58:16.590-0400 I COMMAND [conn25] command db34.$cmd command: insert { insert: "coll34", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eb638ca4787b9985d1ce3') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 110, w: 110 } }, Database: { acquireCount: { w: 110 } }, Collection: { acquireCount: { w: 10 }, acquireWaitCount: { w: 4 }, timeAcquiringMicros: { w: 64453 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 223ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.591-0400 m31100| 2015-07-09T13:58:16.591-0400 I SHARDING [conn35] request split points lookup for chunk db34.coll34 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.593-0400 m31100| 2015-07-09T13:58:16.592-0400 W SHARDING [conn35] possible low cardinality key detected in db34.coll34 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.594-0400 m31100| 2015-07-09T13:58:16.592-0400 W SHARDING [conn35] possible low cardinality key detected in db34.coll34 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.594-0400 m31100| 2015-07-09T13:58:16.592-0400 W SHARDING [conn35] possible low cardinality key detected in db34.coll34 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.594-0400 m31100| 2015-07-09T13:58:16.592-0400 W SHARDING [conn35] possible low cardinality key detected in db34.coll34 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.594-0400 m31100| 2015-07-09T13:58:16.592-0400 W SHARDING [conn35] possible low cardinality key detected in db34.coll34 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.595-0400 m31100| 2015-07-09T13:58:16.592-0400 W SHARDING [conn35] possible low cardinality key detected in db34.coll34 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.595-0400 m31100| 2015-07-09T13:58:16.592-0400 W SHARDING [conn35] possible low cardinality key detected in db34.coll34 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.596-0400 m31100| 2015-07-09T13:58:16.592-0400 W SHARDING [conn35] possible low cardinality key detected in db34.coll34 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.596-0400 m31100| 2015-07-09T13:58:16.592-0400 W SHARDING [conn35] possible low cardinality key detected in db34.coll34 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.596-0400 m31100| 2015-07-09T13:58:16.593-0400 W SHARDING [conn35] possible low cardinality key detected in db34.coll34 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.597-0400 m31100| 2015-07-09T13:58:16.594-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db34.coll34", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb638ca4787b9985d1ce3') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.597-0400 m31100| 2015-07-09T13:58:16.596-0400 W SHARDING [conn35] could not acquire collection lock for db34.coll34 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db34.coll34 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.598-0400 m30998| 2015-07-09T13:58:16.596-0400 W SHARDING [conn219] splitChunk failed - cmd: { splitChunk: "db34.coll34", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb638ca4787b9985d1ce3') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db34.coll34 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.601-0400 m30998| 2015-07-09T13:58:16.600-0400 I SHARDING [conn219] ChunkManager: time to load chunks for db34.coll34: 0ms sequenceNumber: 40 version: 1|10||559eb638ca4787b9985d1ce3 based on: 1|0||559eb638ca4787b9985d1ce3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.624-0400 m31100| 2015-07-09T13:58:16.624-0400 I SHARDING [conn37] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:16.624-0400-559eb638792e00bb67274987", server: "bs-osx108-8", clientAddr: "127.0.0.1:62639", time: new Date(1436464696624), what: "multi-split", ns: "db34.coll34", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 2, of: 10, chunk: { min: { tid: 0.0 }, max: { tid: 2.0 }, lastmod: Timestamp 1000|2, lastmodEpoch: ObjectId('559eb638ca4787b9985d1ce3') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.632-0400 m31100| 2015-07-09T13:58:16.632-0400 I COMMAND [conn145] command db34.$cmd command: insert { insert: "coll34", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eb638ca4787b9985d1ce3') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 107, w: 107 } }, Database: { acquireCount: { w: 107 } }, Collection: { acquireCount: { w: 7 }, acquireWaitCount: { w: 3 }, timeAcquiringMicros: { w: 75181 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 167ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.633-0400 m31100| 2015-07-09T13:58:16.632-0400 I SHARDING [conn40] request split points lookup for chunk db34.coll34 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.634-0400 m31100| 2015-07-09T13:58:16.634-0400 W SHARDING [conn40] possible low cardinality key detected in db34.coll34 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.634-0400 m31100| 2015-07-09T13:58:16.634-0400 W SHARDING [conn40] possible low cardinality key detected in db34.coll34 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.634-0400 m31100| 2015-07-09T13:58:16.634-0400 W SHARDING [conn40] possible low cardinality key detected in db34.coll34 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.635-0400 m31100| 2015-07-09T13:58:16.634-0400 W SHARDING [conn40] possible low cardinality key detected in db34.coll34 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.635-0400 m31100| 2015-07-09T13:58:16.634-0400 W SHARDING [conn40] possible low cardinality key detected in db34.coll34 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.635-0400 m31100| 2015-07-09T13:58:16.634-0400 W SHARDING [conn40] possible low cardinality key detected in db34.coll34 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.635-0400 m31100| 2015-07-09T13:58:16.634-0400 W SHARDING [conn40] possible low cardinality key detected in db34.coll34 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.635-0400 m31100| 2015-07-09T13:58:16.634-0400 W SHARDING [conn40] possible low cardinality key detected in db34.coll34 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.635-0400 m31100| 2015-07-09T13:58:16.634-0400 W SHARDING [conn40] possible low cardinality key detected in db34.coll34 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.636-0400 m31100| 2015-07-09T13:58:16.634-0400 W SHARDING [conn40] possible low cardinality key detected in db34.coll34 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.636-0400 m31100| 2015-07-09T13:58:16.635-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db34.coll34", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb638ca4787b9985d1ce3') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.638-0400 m31100| 2015-07-09T13:58:16.637-0400 W SHARDING [conn40] could not acquire collection lock for db34.coll34 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db34.coll34 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.638-0400 m30999| 2015-07-09T13:58:16.638-0400 W SHARDING [conn219] splitChunk failed - cmd: { splitChunk: "db34.coll34", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb638ca4787b9985d1ce3') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db34.coll34 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.646-0400 m31100| 2015-07-09T13:58:16.646-0400 I SHARDING [conn40] request split points lookup for chunk db34.coll34 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.649-0400 m31100| 2015-07-09T13:58:16.648-0400 W SHARDING [conn40] possible low cardinality key detected in db34.coll34 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.649-0400 m31100| 2015-07-09T13:58:16.648-0400 W SHARDING [conn40] possible low cardinality key detected in db34.coll34 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.649-0400 m31100| 2015-07-09T13:58:16.648-0400 W SHARDING [conn40] possible low cardinality key detected in db34.coll34 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.649-0400 m31100| 2015-07-09T13:58:16.648-0400 W SHARDING [conn40] possible low cardinality key detected in db34.coll34 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.649-0400 m31100| 2015-07-09T13:58:16.648-0400 W SHARDING [conn40] possible low cardinality key detected in db34.coll34 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.649-0400 m31100| 2015-07-09T13:58:16.648-0400 W SHARDING [conn40] possible low cardinality key detected in db34.coll34 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.650-0400 m31100| 2015-07-09T13:58:16.648-0400 W SHARDING [conn40] possible low cardinality key detected in db34.coll34 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.650-0400 m31100| 2015-07-09T13:58:16.648-0400 W SHARDING [conn40] possible low cardinality key detected in db34.coll34 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.650-0400 m31100| 2015-07-09T13:58:16.648-0400 W SHARDING [conn40] possible low cardinality key detected in db34.coll34 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.650-0400 m31100| 2015-07-09T13:58:16.648-0400 W SHARDING [conn40] possible low cardinality key detected in db34.coll34 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.650-0400 m31100| 2015-07-09T13:58:16.649-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db34.coll34", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb638ca4787b9985d1ce3') } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.651-0400 m31100| 2015-07-09T13:58:16.651-0400 W SHARDING [conn40] could not acquire collection lock for db34.coll34 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db34.coll34 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.651-0400 m30999| 2015-07-09T13:58:16.651-0400 W SHARDING [conn218] splitChunk failed - cmd: { splitChunk: "db34.coll34", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb638ca4787b9985d1ce3') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db34.coll34 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.678-0400 m31100| 2015-07-09T13:58:16.677-0400 I SHARDING [conn37] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:16.677-0400-559eb638792e00bb67274988", server: "bs-osx108-8", clientAddr: "127.0.0.1:62639", time: new Date(1436464696677), what: "multi-split", ns: "db34.coll34", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 3, of: 10, chunk: { min: { tid: 2.0 }, max: { tid: 3.0 }, lastmod: Timestamp 1000|3, lastmodEpoch: ObjectId('559eb638ca4787b9985d1ce3') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.731-0400 m31100| 2015-07-09T13:58:16.730-0400 I SHARDING [conn37] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:16.730-0400-559eb638792e00bb67274989", server: "bs-osx108-8", clientAddr: "127.0.0.1:62639", time: new Date(1436464696730), what: "multi-split", ns: "db34.coll34", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 4, of: 10, chunk: { min: { tid: 3.0 }, max: { tid: 4.0 }, lastmod: Timestamp 1000|4, lastmodEpoch: ObjectId('559eb638ca4787b9985d1ce3') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.783-0400 m31100| 2015-07-09T13:58:16.782-0400 I SHARDING [conn37] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:16.782-0400-559eb638792e00bb6727498a", server: "bs-osx108-8", clientAddr: "127.0.0.1:62639", time: new Date(1436464696782), what: "multi-split", ns: "db34.coll34", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 5, of: 10, chunk: { min: { tid: 4.0 }, max: { tid: 5.0 }, lastmod: Timestamp 1000|5, lastmodEpoch: ObjectId('559eb638ca4787b9985d1ce3') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.834-0400 m31100| 2015-07-09T13:58:16.834-0400 I SHARDING [conn37] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:16.834-0400-559eb638792e00bb6727498b", server: "bs-osx108-8", clientAddr: "127.0.0.1:62639", time: new Date(1436464696834), what: "multi-split", ns: "db34.coll34", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 6, of: 10, chunk: { min: { tid: 5.0 }, max: { tid: 6.0 }, lastmod: Timestamp 1000|6, lastmodEpoch: ObjectId('559eb638ca4787b9985d1ce3') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.886-0400 m31100| 2015-07-09T13:58:16.886-0400 I SHARDING [conn37] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:16.886-0400-559eb638792e00bb6727498c", server: "bs-osx108-8", clientAddr: "127.0.0.1:62639", time: new Date(1436464696886), what: "multi-split", ns: "db34.coll34", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 7, of: 10, chunk: { min: { tid: 6.0 }, max: { tid: 7.0 }, lastmod: Timestamp 1000|7, lastmodEpoch: ObjectId('559eb638ca4787b9985d1ce3') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:16.939-0400 m31100| 2015-07-09T13:58:16.938-0400 I SHARDING [conn37] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:16.938-0400-559eb638792e00bb6727498d", server: "bs-osx108-8", clientAddr: "127.0.0.1:62639", time: new Date(1436464696938), what: "multi-split", ns: "db34.coll34", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 8, of: 10, chunk: { min: { tid: 7.0 }, max: { tid: 8.0 }, lastmod: Timestamp 1000|8, lastmodEpoch: ObjectId('559eb638ca4787b9985d1ce3') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:17.001-0400 m31100| 2015-07-09T13:58:17.000-0400 I SHARDING [conn37] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:17.000-0400-559eb639792e00bb6727498e", server: "bs-osx108-8", clientAddr: "127.0.0.1:62639", time: new Date(1436464697000), what: "multi-split", ns: "db34.coll34", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 9, of: 10, chunk: { min: { tid: 8.0 }, max: { tid: 9.0 }, lastmod: Timestamp 1000|9, lastmodEpoch: ObjectId('559eb638ca4787b9985d1ce3') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:17.022-0400 m31100| 2015-07-09T13:58:17.022-0400 I SHARDING [conn37] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T13:58:17.022-0400-559eb639792e00bb6727498f", server: "bs-osx108-8", clientAddr: "127.0.0.1:62639", time: new Date(1436464697022), what: "multi-split", ns: "db34.coll34", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 10, of: 10, chunk: { min: { tid: 9.0 }, max: { tid: MaxKey }, lastmod: Timestamp 1000|10, lastmodEpoch: ObjectId('559eb638ca4787b9985d1ce3') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:17.075-0400 m31100| 2015-07-09T13:58:17.075-0400 I SHARDING [conn37] distributed lock 'db34.coll34/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:17.076-0400 m31100| 2015-07-09T13:58:17.075-0400 I COMMAND [conn37] command db34.coll34 command: splitChunk { splitChunk: "db34.coll34", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb638ca4787b9985d1ce3') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 4, w: 2 } }, Database: { acquireCount: { r: 1, w: 2 } }, Collection: { acquireCount: { r: 1, W: 2 }, acquireWaitCount: { W: 2 }, timeAcquiringMicros: { W: 98243 } } } protocol:op_command 627ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:17.077-0400 m30999| 2015-07-09T13:58:17.076-0400 I SHARDING [conn221] ChunkManager: time to load chunks for db34.coll34: 0ms sequenceNumber: 157 version: 1|10||559eb638ca4787b9985d1ce3 based on: 1|0||559eb638ca4787b9985d1ce3 [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:17.077-0400 m30999| 2015-07-09T13:58:17.077-0400 I SHARDING [conn221] autosplitted db34.coll34 shard: ns: db34.coll34, shard: test-rs0, lastmod: 1|0||000000000000000000000000, min: { tid: MinKey }, max: { tid: MaxKey } into 10 (splitThreshold 921) [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:33.788-0400 m31100| 2015-07-09T13:58:33.787-0400 I QUERY [conn50] query db34.coll34 query: { $where: "this.tid === 3" } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1200 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:639 nreturned:100 reslen:4620 locks:{ Global: { acquireCount: { r: 1284 } }, Database: { acquireCount: { r: 642 } }, Collection: { acquireCount: { r: 642 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 49536 } } } 17311ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:34.483-0400 m30999| 2015-07-09T13:58:34.483-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T13:58:34.481-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30999:1436464534:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:34.633-0400 m31100| 2015-07-09T13:58:34.632-0400 I QUERY [conn57] query db34.coll34 query: { $where: "this.tid === 0" } planSummary: COLLSCAN cursorid:2350570005477 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1023 keyUpdates:0 writeConflicts:0 numYields:666 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 1338 } }, Database: { acquireCount: { r: 669 } }, Collection: { acquireCount: { r: 669 } } } 17960ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:34.722-0400 m31100| 2015-07-09T13:58:34.721-0400 I WRITE [conn16] update db34.coll34 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 2" } update: { $set: { x: 8.0 } } nscanned:0 nscannedObjects:1200 nMatched:10 nModified:9 keyUpdates:0 writeConflicts:0 numYields:676 locks:{ Global: { acquireCount: { r: 690, w: 686 } }, Database: { acquireCount: { r: 2, w: 686 } }, Collection: { acquireCount: { r: 2, w: 677 }, acquireWaitCount: { w: 2 }, timeAcquiringMicros: { w: 14811 } }, Metadata: { acquireCount: { w: 9 } }, oplog: { acquireCount: { w: 9 } } } 18241ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:34.723-0400 m31100| 2015-07-09T13:58:34.722-0400 I COMMAND [conn16] command db34.$cmd command: update { update: "coll34", updates: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 2" }, u: { $set: { x: 8.0 } }, multi: true, upsert: false } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eb638ca4787b9985d1ce3') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:140 locks:{ Global: { acquireCount: { r: 690, w: 686 } }, Database: { acquireCount: { r: 2, w: 686 } }, Collection: { acquireCount: { r: 2, w: 677 }, acquireWaitCount: { w: 2 }, timeAcquiringMicros: { w: 14811 } }, Metadata: { acquireCount: { w: 9 } }, oplog: { acquireCount: { w: 9 } } } protocol:op_command 18242ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:34.801-0400 m31100| 2015-07-09T13:58:34.800-0400 I WRITE [conn29] update db34.coll34 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 8" } update: { $set: { x: 8.0 } } nscanned:0 nscannedObjects:1200 nMatched:9 nModified:7 keyUpdates:0 writeConflicts:0 numYields:679 locks:{ Global: { acquireCount: { r: 691, w: 687 } }, Database: { acquireCount: { r: 2, w: 687 } }, Collection: { acquireCount: { r: 2, w: 680 }, acquireWaitCount: { w: 2 }, timeAcquiringMicros: { w: 13644 } }, Metadata: { acquireCount: { w: 7 } }, oplog: { acquireCount: { w: 7 } } } 18327ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:34.802-0400 m31100| 2015-07-09T13:58:34.800-0400 I COMMAND [conn29] command db34.$cmd command: update { update: "coll34", updates: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 8" }, u: { $set: { x: 8.0 } }, multi: true, upsert: false } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eb638ca4787b9985d1ce3') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:140 locks:{ Global: { acquireCount: { r: 691, w: 687 } }, Database: { acquireCount: { r: 2, w: 687 } }, Collection: { acquireCount: { r: 2, w: 680 }, acquireWaitCount: { w: 2 }, timeAcquiringMicros: { w: 13644 } }, Metadata: { acquireCount: { w: 7 } }, oplog: { acquireCount: { w: 7 } } } protocol:op_command 18328ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:34.863-0400 m31100| 2015-07-09T13:58:34.863-0400 I WRITE [conn146] update db34.coll34 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 1" } update: { $set: { x: 9.0 } } nscanned:0 nscannedObjects:1200 nMatched:6 nModified:6 keyUpdates:0 writeConflicts:0 numYields:679 locks:{ Global: { acquireCount: { r: 690, w: 686 } }, Database: { acquireCount: { r: 2, w: 686 } }, Collection: { acquireCount: { r: 2, w: 680 }, acquireWaitCount: { w: 2 }, timeAcquiringMicros: { w: 22439 } }, Metadata: { acquireCount: { w: 6 } }, oplog: { acquireCount: { w: 6 } } } 18393ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:34.864-0400 m31100| 2015-07-09T13:58:34.863-0400 I COMMAND [conn146] command db34.$cmd command: update { update: "coll34", updates: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 1" }, u: { $set: { x: 9.0 } }, multi: true, upsert: false } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eb638ca4787b9985d1ce3') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:140 locks:{ Global: { acquireCount: { r: 690, w: 686 } }, Database: { acquireCount: { r: 2, w: 686 } }, Collection: { acquireCount: { r: 2, w: 680 }, acquireWaitCount: { w: 2 }, timeAcquiringMicros: { w: 22439 } }, Metadata: { acquireCount: { w: 6 } }, oplog: { acquireCount: { w: 6 } } } protocol:op_command 18393ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:35.898-0400 m30998| 2015-07-09T13:58:35.898-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T13:58:35.896-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30998:1436464535:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:36.802-0400 m31100| 2015-07-09T13:58:36.802-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T13:58:36.800-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31100:1436464536:197041335', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:37.086-0400 m31100| 2015-07-09T13:58:37.085-0400 I WRITE [conn31] update db34.coll34 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 5" } update: { $set: { x: 8.0 } } nscanned:0 nscannedObjects:1200 nMatched:10 nModified:9 keyUpdates:0 writeConflicts:0 numYields:758 locks:{ Global: { acquireCount: { r: 772, w: 768 } }, Database: { acquireCount: { r: 2, w: 768 } }, Collection: { acquireCount: { r: 2, w: 759 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 42456 } }, Metadata: { acquireCount: { w: 9 } }, oplog: { acquireCount: { w: 9 } } } 20581ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:37.087-0400 m31100| 2015-07-09T13:58:37.085-0400 I COMMAND [conn31] command db34.$cmd command: update { update: "coll34", updates: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 5" }, u: { $set: { x: 8.0 } }, multi: true, upsert: false } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eb638ca4787b9985d1ce3') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:140 locks:{ Global: { acquireCount: { r: 772, w: 768 } }, Database: { acquireCount: { r: 2, w: 768 } }, Collection: { acquireCount: { r: 2, w: 759 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 42456 } }, Metadata: { acquireCount: { w: 9 } }, oplog: { acquireCount: { w: 9 } } } protocol:op_command 20581ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:37.228-0400 m31200| 2015-07-09T13:58:37.228-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T13:58:37.220-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31200:1436464537:809424560', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:40.635-0400 m31100| 2015-07-09T13:58:40.634-0400 I WRITE [conn67] update db34.coll34 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 9" } update: { $set: { x: 5.0 } } nscanned:0 nscannedObjects:1400 nMatched:12 nModified:11 keyUpdates:0 writeConflicts:0 numYields:884 locks:{ Global: { acquireCount: { r: 900, w: 896 } }, Database: { acquireCount: { r: 2, w: 896 } }, Collection: { acquireCount: { r: 2, w: 885 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 28710 } }, Metadata: { acquireCount: { w: 11 } }, oplog: { acquireCount: { w: 11 } } } 24111ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:40.636-0400 m31100| 2015-07-09T13:58:40.635-0400 I COMMAND [conn67] command db34.$cmd command: update { update: "coll34", updates: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 9" }, u: { $set: { x: 5.0 } }, multi: true, upsert: false } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eb638ca4787b9985d1ce3') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:140 locks:{ Global: { acquireCount: { r: 900, w: 896 } }, Database: { acquireCount: { r: 2, w: 896 } }, Collection: { acquireCount: { r: 2, w: 885 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 28710 } }, Metadata: { acquireCount: { w: 11 } }, oplog: { acquireCount: { w: 11 } } } protocol:op_command 24111ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:40.785-0400 m31100| 2015-07-09T13:58:40.785-0400 I QUERY [conn46] query db34.coll34 query: { $where: "this.tid === 7" } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1400 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:890 nreturned:100 reslen:4620 locks:{ Global: { acquireCount: { r: 1786 } }, Database: { acquireCount: { r: 893 } }, Collection: { acquireCount: { r: 893 } } } 24162ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:41.248-0400 m31100| 2015-07-09T13:58:41.247-0400 I QUERY [conn135] getmore db34.coll34 query: { $where: "this.tid === 0" } cursorid:2350570005477 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:239 nreturned:99 reslen:4574 locks:{ Global: { acquireCount: { r: 480 } }, Database: { acquireCount: { r: 240 } }, Collection: { acquireCount: { r: 240 } } } 6612ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:41.263-0400 m31100| 2015-07-09T13:58:41.262-0400 I WRITE [conn145] update db34.coll34 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 6" } update: { $set: { x: 9.0 } } nscanned:0 nscannedObjects:1400 nMatched:18 nModified:17 keyUpdates:0 writeConflicts:0 numYields:911 locks:{ Global: { acquireCount: { r: 933, w: 929 } }, Database: { acquireCount: { r: 2, w: 929 } }, Collection: { acquireCount: { r: 2, w: 912 } }, Metadata: { acquireCount: { w: 17 } }, oplog: { acquireCount: { w: 17 } } } 24622ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:41.263-0400 m31100| 2015-07-09T13:58:41.262-0400 I COMMAND [conn145] command db34.$cmd command: update { update: "coll34", updates: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 6" }, u: { $set: { x: 9.0 } }, multi: true, upsert: false } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eb638ca4787b9985d1ce3') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:140 locks:{ Global: { acquireCount: { r: 933, w: 929 } }, Database: { acquireCount: { r: 2, w: 929 } }, Collection: { acquireCount: { r: 2, w: 912 } }, Metadata: { acquireCount: { w: 17 } }, oplog: { acquireCount: { w: 17 } } } protocol:op_command 24622ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:43.586-0400 m31100| 2015-07-09T13:58:43.586-0400 I WRITE [conn70] update db34.coll34 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 4" } update: { $set: { x: 6.0 } } nscanned:0 nscannedObjects:1500 nMatched:4 nModified:4 keyUpdates:0 writeConflicts:0 numYields:972 locks:{ Global: { acquireCount: { r: 981, w: 977 } }, Database: { acquireCount: { r: 2, w: 977 } }, Collection: { acquireCount: { r: 2, w: 973 } }, Metadata: { acquireCount: { w: 4 } }, oplog: { acquireCount: { w: 4 } } } 26505ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:43.587-0400 m31100| 2015-07-09T13:58:43.586-0400 I COMMAND [conn70] command db34.$cmd command: update { update: "coll34", updates: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 4" }, u: { $set: { x: 6.0 } }, multi: true, upsert: false } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb638ca4787b9985d1ce3') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:140 locks:{ Global: { acquireCount: { r: 981, w: 977 } }, Database: { acquireCount: { r: 2, w: 977 } }, Collection: { acquireCount: { r: 2, w: 973 } }, Metadata: { acquireCount: { w: 4 } }, oplog: { acquireCount: { w: 4 } } } protocol:op_command 26505ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:58.911-0400 m31100| 2015-07-09T13:58:58.911-0400 I QUERY [conn52] query db34.coll34 query: { $where: "this.tid === 6" } planSummary: COLLSCAN cursorid:2349439299905 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:984 keyUpdates:0 writeConflicts:0 numYields:650 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 1302 } }, Database: { acquireCount: { r: 651 } }, Collection: { acquireCount: { r: 651 } } } 17643ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:58:59.608-0400 m31100| 2015-07-09T13:58:59.607-0400 I QUERY [conn60] query db34.coll34 query: { $where: "this.tid === 0" } planSummary: COLLSCAN cursorid:2351430286195 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1023 keyUpdates:0 writeConflicts:0 numYields:675 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 1352 } }, Database: { acquireCount: { r: 676 } }, Collection: { acquireCount: { r: 676 } } } 18326ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:59:03.931-0400 m31100| 2015-07-09T13:59:03.931-0400 I QUERY [conn50] query db34.coll34 query: { $where: "this.tid === 3" } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1700 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1109 nreturned:100 reslen:4620 locks:{ Global: { acquireCount: { r: 2220 } }, Database: { acquireCount: { r: 1110 } }, Collection: { acquireCount: { r: 1110 } } } 30135ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:59:04.485-0400 m30999| 2015-07-09T13:59:04.485-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T13:59:04.483-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30999:1436464534:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:59:04.875-0400 m31100| 2015-07-09T13:59:04.874-0400 I WRITE [conn16] update db34.coll34 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 2" } update: { $set: { x: 4.0 } } nscanned:0 nscannedObjects:1700 nMatched:11 nModified:11 keyUpdates:0 writeConflicts:0 numYields:1105 locks:{ Global: { acquireCount: { r: 1117, w: 1117 } }, Database: { acquireCount: { w: 1117 } }, Collection: { acquireCount: { w: 1106 } }, Metadata: { acquireCount: { w: 11 } }, oplog: { acquireCount: { w: 11 } } } 30149ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:59:04.876-0400 m31100| 2015-07-09T13:59:04.875-0400 I COMMAND [conn16] command db34.$cmd command: update { update: "coll34", updates: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 2" }, u: { $set: { x: 4.0 } }, multi: true, upsert: false } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb638ca4787b9985d1ce3') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:140 locks:{ Global: { acquireCount: { r: 1117, w: 1117 } }, Database: { acquireCount: { w: 1117 } }, Collection: { acquireCount: { w: 1106 } }, Metadata: { acquireCount: { w: 11 } }, oplog: { acquireCount: { w: 11 } } } protocol:op_command 30150ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:59:05.900-0400 m30998| 2015-07-09T13:59:05.900-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T13:59:05.898-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30998:1436464535:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:59:06.607-0400 m31100| 2015-07-09T13:59:06.606-0400 I WRITE [conn29] update db34.coll34 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 8" } update: { $set: { x: 1.0 } } nscanned:0 nscannedObjects:1800 nMatched:7 nModified:6 keyUpdates:0 writeConflicts:0 numYields:1169 locks:{ Global: { acquireCount: { r: 1176, w: 1176 } }, Database: { acquireCount: { w: 1176 } }, Collection: { acquireCount: { w: 1170 } }, Metadata: { acquireCount: { w: 6 } }, oplog: { acquireCount: { w: 6 } } } 31802ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:59:06.607-0400 m31100| 2015-07-09T13:59:06.606-0400 I COMMAND [conn29] command db34.$cmd command: update { update: "coll34", updates: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 8" }, u: { $set: { x: 1.0 } }, multi: true, upsert: false } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb638ca4787b9985d1ce3') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:140 locks:{ Global: { acquireCount: { r: 1176, w: 1176 } }, Database: { acquireCount: { w: 1176 } }, Collection: { acquireCount: { w: 1170 } }, Metadata: { acquireCount: { w: 6 } }, oplog: { acquireCount: { w: 6 } } } protocol:op_command 31802ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:59:06.631-0400 m31100| 2015-07-09T13:59:06.631-0400 I QUERY [conn48] query db34.coll34 query: { $where: "this.tid === 1" } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1800 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1164 nreturned:100 reslen:4620 locks:{ Global: { acquireCount: { r: 2330 } }, Database: { acquireCount: { r: 1165 } }, Collection: { acquireCount: { r: 1165 } } } 31752ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:59:06.804-0400 m31100| 2015-07-09T13:59:06.803-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T13:59:06.801-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31100:1436464536:197041335', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:59:07.229-0400 m31200| 2015-07-09T13:59:07.229-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T13:59:07.227-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31200:1436464537:809424560', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:59:12.300-0400 m31100| 2015-07-09T13:59:12.299-0400 I WRITE [conn31] update db34.coll34 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 5" } update: { $set: { x: 4.0 } } nscanned:0 nscannedObjects:2000 nMatched:22 nModified:20 keyUpdates:0 writeConflicts:0 numYields:1279 locks:{ Global: { acquireCount: { r: 1300, w: 1300 } }, Database: { acquireCount: { w: 1300 } }, Collection: { acquireCount: { w: 1280 } }, Metadata: { acquireCount: { w: 20 } }, oplog: { acquireCount: { w: 20 } } } 35150ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:59:12.300-0400 m31100| 2015-07-09T13:59:12.299-0400 I COMMAND [conn31] command db34.$cmd command: update { update: "coll34", updates: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 5" }, u: { $set: { x: 4.0 } }, multi: true, upsert: false } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb638ca4787b9985d1ce3') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:140 locks:{ Global: { acquireCount: { r: 1300, w: 1300 } }, Database: { acquireCount: { w: 1300 } }, Collection: { acquireCount: { w: 1280 } }, Metadata: { acquireCount: { w: 20 } }, oplog: { acquireCount: { w: 20 } } } protocol:op_command 35151ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:59:16.958-0400 m31100| 2015-07-09T13:59:16.957-0400 I WRITE [conn67] update db34.coll34 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 9" } update: { $set: { x: 5.0 } } nscanned:0 nscannedObjects:2100 nMatched:10 nModified:8 keyUpdates:0 writeConflicts:0 numYields:1320 locks:{ Global: { acquireCount: { r: 1329, w: 1329 } }, Database: { acquireCount: { w: 1329 } }, Collection: { acquireCount: { w: 1321 } }, Metadata: { acquireCount: { w: 8 } }, oplog: { acquireCount: { w: 8 } } } 36319ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:59:16.958-0400 m31100| 2015-07-09T13:59:16.957-0400 I COMMAND [conn67] command db34.$cmd command: update { update: "coll34", updates: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 9" }, u: { $set: { x: 5.0 } }, multi: true, upsert: false } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb638ca4787b9985d1ce3') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:140 locks:{ Global: { acquireCount: { r: 1329, w: 1329 } }, Database: { acquireCount: { w: 1329 } }, Collection: { acquireCount: { w: 1321 } }, Metadata: { acquireCount: { w: 8 } }, oplog: { acquireCount: { w: 8 } } } protocol:op_command 36320ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:59:18.909-0400 m31100| 2015-07-09T13:59:18.908-0400 I WRITE [conn26] update db34.coll34 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 7" } update: { $set: { x: 8.0 } } nscanned:0 nscannedObjects:2200 nMatched:10 nModified:10 keyUpdates:0 writeConflicts:0 numYields:1390 locks:{ Global: { acquireCount: { r: 1401, w: 1401 } }, Database: { acquireCount: { w: 1401 } }, Collection: { acquireCount: { w: 1391 } }, Metadata: { acquireCount: { w: 10 } }, oplog: { acquireCount: { w: 10 } } } 38120ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:59:19.066-0400 m31100| 2015-07-09T13:59:18.909-0400 I COMMAND [conn26] command db34.$cmd command: update { update: "coll34", updates: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 7" }, u: { $set: { x: 8.0 } }, multi: true, upsert: false } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb638ca4787b9985d1ce3') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:140 locks:{ Global: { acquireCount: { r: 1401, w: 1401 } }, Database: { acquireCount: { w: 1401 } }, Collection: { acquireCount: { w: 1391 } }, Metadata: { acquireCount: { w: 10 } }, oplog: { acquireCount: { w: 10 } } } protocol:op_command 38120ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:59:19.406-0400 m31100| 2015-07-09T13:59:19.405-0400 I QUERY [conn135] getmore db34.coll34 query: { $where: "this.tid === 6" } cursorid:2349439299905 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:731 nreturned:99 reslen:4574 locks:{ Global: { acquireCount: { r: 1464 } }, Database: { acquireCount: { r: 732 } }, Collection: { acquireCount: { r: 732 } } } 20492ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:59:19.412-0400 m31100| 2015-07-09T13:59:19.412-0400 I QUERY [conn43] getmore db34.coll34 query: { $where: "this.tid === 0" } cursorid:2351430286195 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:709 nreturned:199 reslen:9174 locks:{ Global: { acquireCount: { r: 1420 } }, Database: { acquireCount: { r: 710 } }, Collection: { acquireCount: { r: 710 } } } 19801ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:59:23.527-0400 m31100| 2015-07-09T13:59:23.526-0400 I WRITE [conn70] update db34.coll34 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 4" } update: { $set: { x: 8.0 } } nscanned:0 nscannedObjects:2300 nMatched:27 nModified:24 keyUpdates:0 writeConflicts:0 numYields:1450 locks:{ Global: { acquireCount: { r: 1475, w: 1475 } }, Database: { acquireCount: { w: 1475 } }, Collection: { acquireCount: { w: 1451 } }, Metadata: { acquireCount: { w: 24 } }, oplog: { acquireCount: { w: 24 } } } 39883ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:59:23.527-0400 m31100| 2015-07-09T13:59:23.526-0400 I COMMAND [conn70] command db34.$cmd command: update { update: "coll34", updates: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 4" }, u: { $set: { x: 8.0 } }, multi: true, upsert: false } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb638ca4787b9985d1ce3') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:140 locks:{ Global: { acquireCount: { r: 1475, w: 1475 } }, Database: { acquireCount: { w: 1475 } }, Collection: { acquireCount: { w: 1451 } }, Metadata: { acquireCount: { w: 24 } }, oplog: { acquireCount: { w: 24 } } } protocol:op_command 39884ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:59:34.489-0400 m30999| 2015-07-09T13:59:34.488-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T13:59:34.485-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30999:1436464534:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:59:35.901-0400 m30998| 2015-07-09T13:59:35.901-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T13:59:35.899-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30998:1436464535:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:59:36.806-0400 m31100| 2015-07-09T13:59:36.805-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T13:59:36.803-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31100:1436464536:197041335', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:59:37.166-0400 m31100| 2015-07-09T13:59:37.165-0400 I QUERY [conn60] query db34.coll34 query: { $where: "this.tid === 8" } planSummary: COLLSCAN cursorid:2350695829761 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1801 keyUpdates:0 writeConflicts:0 numYields:1092 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 2186 } }, Database: { acquireCount: { r: 1093 } }, Collection: { acquireCount: { r: 1093 } } } 30530ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:59:37.232-0400 m31200| 2015-07-09T13:59:37.231-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T13:59:37.229-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31200:1436464537:809424560', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:59:39.089-0400 m31100| 2015-07-09T13:59:39.089-0400 I QUERY [conn48] query db34.coll34 query: { $where: "this.tid === 1" } planSummary: COLLSCAN cursorid:2349477719075 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1901 keyUpdates:0 writeConflicts:0 numYields:1164 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 2330 } }, Database: { acquireCount: { r: 1165 } }, Collection: { acquireCount: { r: 1165 } } } 32373ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:59:43.192-0400 m31100| 2015-07-09T13:59:43.191-0400 I WRITE [conn24] update db34.coll34 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 3" } update: { $set: { x: 5.0 } } nscanned:0 nscannedObjects:2300 nMatched:10 nModified:10 keyUpdates:0 writeConflicts:0 numYields:1397 locks:{ Global: { acquireCount: { r: 1408, w: 1408 } }, Database: { acquireCount: { w: 1408 } }, Collection: { acquireCount: { w: 1398 } }, Metadata: { acquireCount: { w: 10 } }, oplog: { acquireCount: { w: 10 } } } 39255ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:59:43.193-0400 m31100| 2015-07-09T13:59:43.192-0400 I COMMAND [conn24] command db34.$cmd command: update { update: "coll34", updates: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 3" }, u: { $set: { x: 5.0 } }, multi: true, upsert: false } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb638ca4787b9985d1ce3') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:140 locks:{ Global: { acquireCount: { r: 1408, w: 1408 } }, Database: { acquireCount: { w: 1408 } }, Collection: { acquireCount: { w: 1398 } }, Metadata: { acquireCount: { w: 10 } }, oplog: { acquireCount: { w: 10 } } } protocol:op_command 39256ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:59:45.807-0400 m31100| 2015-07-09T13:59:45.806-0400 I WRITE [conn16] update db34.coll34 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 2" } update: { $set: { x: 9.0 } } nscanned:0 nscannedObjects:2400 nMatched:18 nModified:17 keyUpdates:0 writeConflicts:0 numYields:1466 locks:{ Global: { acquireCount: { r: 1484, w: 1484 } }, Database: { acquireCount: { w: 1484 } }, Collection: { acquireCount: { w: 1467 } }, Metadata: { acquireCount: { w: 17 } }, oplog: { acquireCount: { w: 17 } } } 40904ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:59:45.808-0400 m31100| 2015-07-09T13:59:45.806-0400 I COMMAND [conn16] command db34.$cmd command: update { update: "coll34", updates: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 2" }, u: { $set: { x: 9.0 } }, multi: true, upsert: false } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb638ca4787b9985d1ce3') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:140 locks:{ Global: { acquireCount: { r: 1484, w: 1484 } }, Database: { acquireCount: { w: 1484 } }, Collection: { acquireCount: { w: 1467 } }, Metadata: { acquireCount: { w: 17 } }, oplog: { acquireCount: { w: 17 } } } protocol:op_command 40904ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:59:47.919-0400 m31100| 2015-07-09T13:59:47.919-0400 I QUERY [conn86] getmore db34.coll34 query: { $where: "this.tid === 8" } cursorid:2350695829761 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:396 nreturned:99 reslen:4574 locks:{ Global: { acquireCount: { r: 794 } }, Database: { acquireCount: { r: 397 } }, Collection: { acquireCount: { r: 397 } } } 10751ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:59:49.701-0400 m31100| 2015-07-09T13:59:49.700-0400 I QUERY [conn52] query db34.coll34 query: { $where: "this.tid === 4" } planSummary: COLLSCAN cursorid:2349494739234 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1501 keyUpdates:0 writeConflicts:0 numYields:949 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 1900 } }, Database: { acquireCount: { r: 950 } }, Collection: { acquireCount: { r: 950 } } } 26162ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:59:49.888-0400 m31100| 2015-07-09T13:59:49.788-0400 I QUERY [conn137] getmore db34.coll34 query: { $where: "this.tid === 1" } cursorid:2349477719075 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:388 nreturned:99 reslen:4574 locks:{ Global: { acquireCount: { r: 778 } }, Database: { acquireCount: { r: 389 } }, Collection: { acquireCount: { r: 389 } } } 10696ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:59:53.622-0400 m31100| 2015-07-09T13:59:53.621-0400 I QUERY [conn50] query db34.coll34 query: { $where: "this.tid === 9" } planSummary: COLLSCAN cursorid:2349902099051 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:2101 keyUpdates:0 writeConflicts:0 numYields:1319 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 2640 } }, Database: { acquireCount: { r: 1320 } }, Collection: { acquireCount: { r: 1320 } } } 36638ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:59:59.547-0400 m31100| 2015-07-09T13:59:59.546-0400 I WRITE [conn31] update db34.coll34 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 5" } update: { $set: { x: 9.0 } } nscanned:0 nscannedObjects:2700 nMatched:35 nModified:34 keyUpdates:0 writeConflicts:0 numYields:1704 locks:{ Global: { acquireCount: { r: 1739, w: 1739 } }, Database: { acquireCount: { w: 1739 } }, Collection: { acquireCount: { w: 1705 } }, Metadata: { acquireCount: { w: 34 } }, oplog: { acquireCount: { w: 34 } } } 47218ms [js_test:fsm_all_sharded_replication] 2015-07-09T13:59:59.548-0400 m31100| 2015-07-09T13:59:59.546-0400 I COMMAND [conn31] command db34.$cmd command: update { update: "coll34", updates: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 5" }, u: { $set: { x: 9.0 } }, multi: true, upsert: false } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb638ca4787b9985d1ce3') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:140 locks:{ Global: { acquireCount: { r: 1739, w: 1739 } }, Database: { acquireCount: { w: 1739 } }, Collection: { acquireCount: { w: 1705 } }, Metadata: { acquireCount: { w: 34 } }, oplog: { acquireCount: { w: 34 } } } protocol:op_command 47218ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:00:04.490-0400 m31100| 2015-07-09T14:00:04.489-0400 I QUERY [conn137] getmore db34.coll34 query: { $where: "this.tid === 9" } cursorid:2349902099051 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:403 nreturned:99 reslen:4574 locks:{ Global: { acquireCount: { r: 808 } }, Database: { acquireCount: { r: 404 } }, Collection: { acquireCount: { r: 404 } } } 10866ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:00:04.490-0400 m30999| 2015-07-09T14:00:04.490-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:00:04.488-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30999:1436464534:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:00:05.903-0400 m30998| 2015-07-09T14:00:05.903-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:00:05.901-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30998:1436464535:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:00:06.808-0400 m31100| 2015-07-09T14:00:06.808-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:00:06.805-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31100:1436464536:197041335', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:00:07.233-0400 m31200| 2015-07-09T14:00:07.232-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:00:07.231-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31200:1436464537:809424560', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:00:10.239-0400 m31100| 2015-07-09T14:00:10.239-0400 I QUERY [conn46] query db34.coll34 query: { $where: "this.tid === 7" } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:2900 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1877 nreturned:100 reslen:4620 locks:{ Global: { acquireCount: { r: 3756 } }, Database: { acquireCount: { r: 1878 } }, Collection: { acquireCount: { r: 1878 } } } 51322ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:00:10.765-0400 m31100| 2015-07-09T14:00:10.764-0400 I WRITE [conn29] update db34.coll34 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 6" } update: { $set: { x: 7.0 } } nscanned:0 nscannedObjects:2900 nMatched:25 nModified:22 keyUpdates:0 writeConflicts:0 numYields:1888 locks:{ Global: { acquireCount: { r: 1911, w: 1911 } }, Database: { acquireCount: { w: 1911 } }, Collection: { acquireCount: { w: 1889 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } 51354ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:00:10.765-0400 m31100| 2015-07-09T14:00:10.764-0400 I COMMAND [conn29] command db34.$cmd command: update { update: "coll34", updates: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 6" }, u: { $set: { x: 7.0 } }, multi: true, upsert: false } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb638ca4787b9985d1ce3') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:140 locks:{ Global: { acquireCount: { r: 1911, w: 1911 } }, Database: { acquireCount: { w: 1911 } }, Collection: { acquireCount: { w: 1889 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 51354ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:00:12.709-0400 m31100| 2015-07-09T14:00:12.708-0400 I WRITE [conn23] update db34.coll34 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 0" } update: { $set: { x: 0.0 } } nscanned:0 nscannedObjects:3000 nMatched:43 nModified:39 keyUpdates:0 writeConflicts:0 numYields:1950 locks:{ Global: { acquireCount: { r: 1990, w: 1990 } }, Database: { acquireCount: { w: 1990 } }, Collection: { acquireCount: { w: 1951 } }, Metadata: { acquireCount: { w: 39 } }, oplog: { acquireCount: { w: 39 } } } 53271ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:00:12.710-0400 m31100| 2015-07-09T14:00:12.708-0400 I COMMAND [conn23] command db34.$cmd command: update { update: "coll34", updates: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 0" }, u: { $set: { x: 0.0 } }, multi: true, upsert: false } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb638ca4787b9985d1ce3') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:140 locks:{ Global: { acquireCount: { r: 1990, w: 1990 } }, Database: { acquireCount: { w: 1990 } }, Collection: { acquireCount: { w: 1951 } }, Metadata: { acquireCount: { w: 39 } }, oplog: { acquireCount: { w: 39 } } } protocol:op_command 53271ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:00:16.748-0400 m31100| 2015-07-09T14:00:16.748-0400 I QUERY [conn60] query db34.coll34 query: { $where: "this.tid === 2" } planSummary: COLLSCAN cursorid:2351220999313 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1701 keyUpdates:0 writeConflicts:0 numYields:1171 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 2344 } }, Database: { acquireCount: { r: 1172 } }, Collection: { acquireCount: { r: 1172 } } } 30933ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:00:18.986-0400 m31100| 2015-07-09T14:00:18.986-0400 I QUERY [conn86] getmore db34.coll34 query: { $where: "this.tid === 4" } cursorid:2349494739234 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1105 nreturned:199 reslen:9174 locks:{ Global: { acquireCount: { r: 2212 } }, Database: { acquireCount: { r: 1106 } }, Collection: { acquireCount: { r: 1106 } } } 29284ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:00:21.904-0400 m31100| 2015-07-09T14:00:21.903-0400 I QUERY [conn50] query db34.coll34 query: { $where: "this.tid === 5" } planSummary: COLLSCAN cursorid:2350141412528 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1201 keyUpdates:0 writeConflicts:0 numYields:867 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 1736 } }, Database: { acquireCount: { r: 868 } }, Collection: { acquireCount: { r: 868 } } } 22348ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:00:25.966-0400 m31100| 2015-07-09T14:00:25.965-0400 I QUERY [conn48] query db34.coll34 query: { $where: "this.tid === 3" } planSummary: COLLSCAN cursorid:2350470161864 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:2301 keyUpdates:0 writeConflicts:0 numYields:1638 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 3278 } }, Database: { acquireCount: { r: 1639 } }, Collection: { acquireCount: { r: 1639 } } } 42747ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:00:34.493-0400 m30999| 2015-07-09T14:00:34.492-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:00:34.490-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30999:1436464534:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:00:35.906-0400 m30998| 2015-07-09T14:00:35.905-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:00:35.902-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30998:1436464535:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:00:36.811-0400 m31100| 2015-07-09T14:00:36.811-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:00:36.808-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31100:1436464536:197041335', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:00:37.235-0400 m31200| 2015-07-09T14:00:37.235-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:00:37.232-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31200:1436464537:809424560', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:00:44.416-0400 m31100| 2015-07-09T14:00:44.416-0400 I QUERY [conn74] getmore db34.coll34 query: { $where: "this.tid === 3" } cursorid:2350470161864 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:728 nreturned:99 reslen:4574 locks:{ Global: { acquireCount: { r: 1458 } }, Database: { acquireCount: { r: 729 } }, Collection: { acquireCount: { r: 729 } } } 18448ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:00:47.022-0400 m31100| 2015-07-09T14:00:47.022-0400 I QUERY [conn44] getmore db34.coll34 query: { $where: "this.tid === 2" } cursorid:2351220999313 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1177 nreturned:99 reslen:4574 locks:{ Global: { acquireCount: { r: 2356 } }, Database: { acquireCount: { r: 1178 } }, Collection: { acquireCount: { r: 1178 } } } 30272ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:00:49.197-0400 m31100| 2015-07-09T14:00:49.196-0400 I QUERY [conn60] query db34.coll34 query: { $where: "this.tid === 4" } planSummary: COLLSCAN cursorid:2350420181207 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1501 keyUpdates:0 writeConflicts:0 numYields:1179 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 2360 } }, Database: { acquireCount: { r: 1180 } }, Collection: { acquireCount: { r: 1180 } } } 30181ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:00:51.195-0400 m31100| 2015-07-09T14:00:51.195-0400 I WRITE [conn16] update db34.coll34 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 8" } update: { $set: { x: 3.0 } } nscanned:0 nscannedObjects:3300 nMatched:39 nModified:34 keyUpdates:0 writeConflicts:0 numYields:2435 locks:{ Global: { acquireCount: { r: 2470, w: 2470 } }, Database: { acquireCount: { w: 2470 } }, Collection: { acquireCount: { w: 2436 } }, Metadata: { acquireCount: { w: 34 } }, oplog: { acquireCount: { w: 34 } } } 63249ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:00:51.196-0400 m31100| 2015-07-09T14:00:51.195-0400 I COMMAND [conn16] command db34.$cmd command: update { update: "coll34", updates: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 8" }, u: { $set: { x: 3.0 } }, multi: true, upsert: false } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb638ca4787b9985d1ce3') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:140 locks:{ Global: { acquireCount: { r: 2470, w: 2470 } }, Database: { acquireCount: { w: 2470 } }, Collection: { acquireCount: { w: 2436 } }, Metadata: { acquireCount: { w: 34 } }, oplog: { acquireCount: { w: 34 } } } protocol:op_command 63249ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:00:55.489-0400 m31100| 2015-07-09T14:00:55.489-0400 I WRITE [conn24] update db34.coll34 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 1" } update: { $set: { x: 5.0 } } nscanned:0 nscannedObjects:3400 nMatched:46 nModified:38 keyUpdates:0 writeConflicts:0 numYields:2542 locks:{ Global: { acquireCount: { r: 2581, w: 2581 } }, Database: { acquireCount: { w: 2581 } }, Collection: { acquireCount: { w: 2543 } }, Metadata: { acquireCount: { w: 38 } }, oplog: { acquireCount: { w: 38 } } } 65648ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:00:55.490-0400 m31100| 2015-07-09T14:00:55.489-0400 I COMMAND [conn24] command db34.$cmd command: update { update: "coll34", updates: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 1" }, u: { $set: { x: 5.0 } }, multi: true, upsert: false } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb638ca4787b9985d1ce3') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:140 locks:{ Global: { acquireCount: { r: 2581, w: 2581 } }, Database: { acquireCount: { w: 2581 } }, Collection: { acquireCount: { w: 2543 } }, Metadata: { acquireCount: { w: 38 } }, oplog: { acquireCount: { w: 38 } } } protocol:op_command 65648ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:01:04.494-0400 m30999| 2015-07-09T14:01:04.494-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:01:04.492-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30999:1436464534:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:01:05.908-0400 m30998| 2015-07-09T14:01:05.908-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:01:05.905-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30998:1436464535:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:01:06.813-0400 m31100| 2015-07-09T14:01:06.813-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:01:06.811-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31100:1436464536:197041335', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:01:07.238-0400 m31200| 2015-07-09T14:01:07.237-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:01:07.235-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31200:1436464537:809424560', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:01:07.608-0400 m31100| 2015-07-09T14:01:07.608-0400 I QUERY [conn137] getmore db34.coll34 query: { $where: "this.tid === 5" } cursorid:2350141412528 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1806 nreturned:299 reslen:13774 locks:{ Global: { acquireCount: { r: 3614 } }, Database: { acquireCount: { r: 1807 } }, Collection: { acquireCount: { r: 1807 } } } 45703ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:01:07.655-0400 m30998| 2015-07-09T14:01:07.654-0400 I NETWORK [conn221] end connection 127.0.0.1:63349 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:01:16.314-0400 m31100| 2015-07-09T14:01:16.313-0400 I WRITE [conn31] update db34.coll34 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 9" } update: { $set: { x: 2.0 } } nscanned:0 nscannedObjects:3700 nMatched:49 nModified:47 keyUpdates:0 writeConflicts:0 numYields:2809 locks:{ Global: { acquireCount: { r: 2857, w: 2857 } }, Database: { acquireCount: { w: 2857 } }, Collection: { acquireCount: { w: 2810 } }, Metadata: { acquireCount: { w: 47 } }, oplog: { acquireCount: { w: 47 } } } 71783ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:01:16.315-0400 m31100| 2015-07-09T14:01:16.314-0400 I COMMAND [conn31] command db34.$cmd command: update { update: "coll34", updates: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 9" }, u: { $set: { x: 2.0 } }, multi: true, upsert: false } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb638ca4787b9985d1ce3') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:140 locks:{ Global: { acquireCount: { r: 2857, w: 2857 } }, Database: { acquireCount: { w: 2857 } }, Collection: { acquireCount: { w: 2810 } }, Metadata: { acquireCount: { w: 47 } }, oplog: { acquireCount: { w: 47 } } } protocol:op_command 71783ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:01:19.333-0400 m31100| 2015-07-09T14:01:19.332-0400 I QUERY [conn45] query db34.coll34 query: { $where: "this.tid === 2" } planSummary: COLLSCAN cursorid:2349976624756 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1701 keyUpdates:0 writeConflicts:0 numYields:1278 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 2558 } }, Database: { acquireCount: { r: 1279 } }, Collection: { acquireCount: { r: 1279 } } } 32282ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:01:22.520-0400 m31100| 2015-07-09T14:01:22.520-0400 I WRITE [conn146] update db34.coll34 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 7" } update: { $set: { x: 3.0 } } nscanned:0 nscannedObjects:3700 nMatched:8 nModified:8 keyUpdates:0 writeConflicts:0 numYields:2839 locks:{ Global: { acquireCount: { r: 2848, w: 2848 } }, Database: { acquireCount: { w: 2848 } }, Collection: { acquireCount: { w: 2840 } }, Metadata: { acquireCount: { w: 8 } }, oplog: { acquireCount: { w: 8 } } } 72276ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:01:22.521-0400 m31100| 2015-07-09T14:01:22.520-0400 I COMMAND [conn146] command db34.$cmd command: update { update: "coll34", updates: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 7" }, u: { $set: { x: 3.0 } }, multi: true, upsert: false } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb638ca4787b9985d1ce3') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:140 locks:{ Global: { acquireCount: { r: 2848, w: 2848 } }, Database: { acquireCount: { w: 2848 } }, Collection: { acquireCount: { w: 2840 } }, Metadata: { acquireCount: { w: 8 } }, oplog: { acquireCount: { w: 8 } } } protocol:op_command 72276ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:01:23.258-0400 m31100| 2015-07-09T14:01:23.257-0400 I WRITE [conn29] update db34.coll34 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 6" } update: { $set: { x: 9.0 } } nscanned:0 nscannedObjects:3700 nMatched:32 nModified:30 keyUpdates:0 writeConflicts:0 numYields:2845 locks:{ Global: { acquireCount: { r: 2876, w: 2876 } }, Database: { acquireCount: { w: 2876 } }, Collection: { acquireCount: { w: 2846 } }, Metadata: { acquireCount: { w: 30 } }, oplog: { acquireCount: { w: 30 } } } 72466ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:01:23.259-0400 m31100| 2015-07-09T14:01:23.258-0400 I COMMAND [conn29] command db34.$cmd command: update { update: "coll34", updates: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 6" }, u: { $set: { x: 9.0 } }, multi: true, upsert: false } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb638ca4787b9985d1ce3') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:140 locks:{ Global: { acquireCount: { r: 2876, w: 2876 } }, Database: { acquireCount: { w: 2876 } }, Collection: { acquireCount: { w: 2846 } }, Metadata: { acquireCount: { w: 30 } }, oplog: { acquireCount: { w: 30 } } } protocol:op_command 72466ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:01:25.915-0400 m31100| 2015-07-09T14:01:25.915-0400 I QUERY [conn60] query db34.coll34 query: { $where: "this.tid === 8" } planSummary: COLLSCAN cursorid:2350246261728 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1801 keyUpdates:0 writeConflicts:0 numYields:1386 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 2774 } }, Database: { acquireCount: { r: 1387 } }, Collection: { acquireCount: { r: 1387 } } } 34690ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:01:27.285-0400 m31100| 2015-07-09T14:01:27.285-0400 I WRITE [conn23] update db34.coll34 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 0" } update: { $set: { x: 6.0 } } nscanned:0 nscannedObjects:3800 nMatched:42 nModified:38 keyUpdates:0 writeConflicts:0 numYields:2946 locks:{ Global: { acquireCount: { r: 2985, w: 2985 } }, Database: { acquireCount: { w: 2985 } }, Collection: { acquireCount: { w: 2947 } }, Metadata: { acquireCount: { w: 38 } }, oplog: { acquireCount: { w: 38 } } } 74546ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:01:27.286-0400 m31100| 2015-07-09T14:01:27.285-0400 I COMMAND [conn23] command db34.$cmd command: update { update: "coll34", updates: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 0" }, u: { $set: { x: 6.0 } }, multi: true, upsert: false } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb638ca4787b9985d1ce3') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:140 locks:{ Global: { acquireCount: { r: 2985, w: 2985 } }, Database: { acquireCount: { w: 2985 } }, Collection: { acquireCount: { w: 2947 } }, Metadata: { acquireCount: { w: 38 } }, oplog: { acquireCount: { w: 38 } } } protocol:op_command 74547ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:01:27.314-0400 m30999| 2015-07-09T14:01:27.314-0400 I NETWORK [conn218] end connection 127.0.0.1:63341 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:01:33.679-0400 m31100| 2015-07-09T14:01:33.678-0400 I QUERY [conn44] getmore db34.coll34 query: { $where: "this.tid === 4" } cursorid:2350420181207 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1773 nreturned:299 reslen:13774 locks:{ Global: { acquireCount: { r: 3548 } }, Database: { acquireCount: { r: 1774 } }, Collection: { acquireCount: { r: 1774 } } } 44480ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:01:34.497-0400 m30999| 2015-07-09T14:01:34.496-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:01:34.494-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30999:1436464534:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:01:35.911-0400 m30998| 2015-07-09T14:01:35.910-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:01:35.908-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30998:1436464535:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:01:36.816-0400 m31100| 2015-07-09T14:01:36.815-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:01:36.813-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31100:1436464536:197041335', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:01:37.240-0400 m31200| 2015-07-09T14:01:37.240-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:01:37.237-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31200:1436464537:809424560', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:01:53.377-0400 m31100| 2015-07-09T14:01:53.377-0400 I QUERY [conn33] query db34.coll34 query: { $where: "this.tid === 9" } planSummary: COLLSCAN cursorid:2349540576959 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:2101 keyUpdates:0 writeConflicts:0 numYields:1424 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 2850 } }, Database: { acquireCount: { r: 1425 } }, Collection: { acquireCount: { r: 1425 } } } 37056ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:01:55.260-0400 m31100| 2015-07-09T14:01:55.259-0400 I WRITE [conn26] update db34.coll34 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 3" } update: { $set: { x: 9.0 } } nscanned:0 nscannedObjects:3900 nMatched:26 nModified:26 keyUpdates:0 writeConflicts:0 numYields:2740 locks:{ Global: { acquireCount: { r: 2767, w: 2767 } }, Database: { acquireCount: { w: 2767 } }, Collection: { acquireCount: { w: 2741 } }, Metadata: { acquireCount: { w: 26 } }, oplog: { acquireCount: { w: 26 } } } 70840ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:01:55.260-0400 m31100| 2015-07-09T14:01:55.260-0400 I COMMAND [conn26] command db34.$cmd command: update { update: "coll34", updates: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 3" }, u: { $set: { x: 9.0 } }, multi: true, upsert: false } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb638ca4787b9985d1ce3') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:140 locks:{ Global: { acquireCount: { r: 2767, w: 2767 } }, Database: { acquireCount: { w: 2767 } }, Collection: { acquireCount: { w: 2741 } }, Metadata: { acquireCount: { w: 26 } }, oplog: { acquireCount: { w: 26 } } } protocol:op_command 70841ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:01:58.018-0400 m31100| 2015-07-09T14:01:58.017-0400 I QUERY [conn43] getmore db34.coll34 query: { $where: "this.tid === 2" } cursorid:2349976624756 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1471 nreturned:199 reslen:9174 locks:{ Global: { acquireCount: { r: 2944 } }, Database: { acquireCount: { r: 1472 } }, Collection: { acquireCount: { r: 1472 } } } 38683ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:02:04.173-0400 m31100| 2015-07-09T14:02:04.172-0400 I QUERY [conn135] getmore db34.coll34 query: { $where: "this.tid === 8" } cursorid:2350246261728 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1453 nreturned:299 reslen:13774 locks:{ Global: { acquireCount: { r: 2908 } }, Database: { acquireCount: { r: 1454 } }, Collection: { acquireCount: { r: 1454 } } } 38255ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:02:04.201-0400 m30999| 2015-07-09T14:02:04.200-0400 I NETWORK [conn220] end connection 127.0.0.1:63345 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:02:04.500-0400 m30999| 2015-07-09T14:02:04.499-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:02:04.496-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30999:1436464534:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:02:05.913-0400 m30998| 2015-07-09T14:02:05.913-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:02:05.910-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30998:1436464535:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:02:06.819-0400 m31100| 2015-07-09T14:02:06.818-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:02:06.816-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31100:1436464536:197041335', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:02:07.242-0400 m31200| 2015-07-09T14:02:07.242-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:02:07.240-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31200:1436464537:809424560', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:02:09.673-0400 m31100| 2015-07-09T14:02:09.673-0400 I WRITE [conn24] update db34.coll34 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 1" } update: { $set: { x: 9.0 } } nscanned:0 nscannedObjects:4100 nMatched:43 nModified:39 keyUpdates:0 writeConflicts:0 numYields:2845 locks:{ Global: { acquireCount: { r: 2885, w: 2885 } }, Database: { acquireCount: { w: 2885 } }, Collection: { acquireCount: { w: 2846 } }, Metadata: { acquireCount: { w: 39 } }, oplog: { acquireCount: { w: 39 } } } 74161ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:02:09.674-0400 m31100| 2015-07-09T14:02:09.673-0400 I COMMAND [conn24] command db34.$cmd command: update { update: "coll34", updates: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 1" }, u: { $set: { x: 9.0 } }, multi: true, upsert: false } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb638ca4787b9985d1ce3') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:140 locks:{ Global: { acquireCount: { r: 2885, w: 2885 } }, Database: { acquireCount: { w: 2885 } }, Collection: { acquireCount: { w: 2846 } }, Metadata: { acquireCount: { w: 39 } }, oplog: { acquireCount: { w: 39 } } } protocol:op_command 74161ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:02:09.677-0400 m30998| 2015-07-09T14:02:09.677-0400 I NETWORK [conn217] end connection 127.0.0.1:63342 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:02:26.131-0400 m31100| 2015-07-09T14:02:26.130-0400 I QUERY [conn52] query db34.coll34 query: { $where: "this.tid === 2" } planSummary: COLLSCAN cursorid:2351056979041 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1701 keyUpdates:0 writeConflicts:0 numYields:1059 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 2120 } }, Database: { acquireCount: { r: 1060 } }, Collection: { acquireCount: { r: 1060 } } } 28085ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:02:26.719-0400 m31100| 2015-07-09T14:02:26.719-0400 I QUERY [conn136] getmore db34.coll34 query: { $where: "this.tid === 9" } cursorid:2349540576959 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1251 nreturned:299 reslen:13774 locks:{ Global: { acquireCount: { r: 2504 } }, Database: { acquireCount: { r: 1252 } }, Collection: { acquireCount: { r: 1252 } } } 33340ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:02:31.475-0400 m31100| 2015-07-09T14:02:31.474-0400 I WRITE [conn146] update db34.coll34 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 7" } update: { $set: { x: 1.0 } } nscanned:0 nscannedObjects:4100 nMatched:10 nModified:8 keyUpdates:0 writeConflicts:0 numYields:2600 locks:{ Global: { acquireCount: { r: 2609, w: 2609 } }, Database: { acquireCount: { w: 2609 } }, Collection: { acquireCount: { w: 2601 } }, Metadata: { acquireCount: { w: 8 } }, oplog: { acquireCount: { w: 8 } } } 68950ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:02:31.475-0400 m31100| 2015-07-09T14:02:31.474-0400 I COMMAND [conn146] command db34.$cmd command: update { update: "coll34", updates: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 7" }, u: { $set: { x: 1.0 } }, multi: true, upsert: false } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb638ca4787b9985d1ce3') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:140 locks:{ Global: { acquireCount: { r: 2609, w: 2609 } }, Database: { acquireCount: { w: 2609 } }, Collection: { acquireCount: { w: 2601 } }, Metadata: { acquireCount: { w: 8 } }, oplog: { acquireCount: { w: 8 } } } protocol:op_command 68951ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:02:32.823-0400 m31100| 2015-07-09T14:02:32.823-0400 I QUERY [conn33] query db34.coll34 query: { $where: "this.tid === 3" } planSummary: COLLSCAN cursorid:2351168864406 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:2301 keyUpdates:0 writeConflicts:0 numYields:1417 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 2836 } }, Database: { acquireCount: { r: 1418 } }, Collection: { acquireCount: { r: 1418 } } } 37557ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:02:33.670-0400 m31100| 2015-07-09T14:02:33.669-0400 I WRITE [conn29] update db34.coll34 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 6" } update: { $set: { x: 2.0 } } nscanned:0 nscannedObjects:4200 nMatched:42 nModified:37 keyUpdates:0 writeConflicts:0 numYields:2637 locks:{ Global: { acquireCount: { r: 2675, w: 2675 } }, Database: { acquireCount: { w: 2675 } }, Collection: { acquireCount: { w: 2638 } }, Metadata: { acquireCount: { w: 37 } }, oplog: { acquireCount: { w: 37 } } } 70385ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:02:33.670-0400 m31100| 2015-07-09T14:02:33.669-0400 I COMMAND [conn29] command db34.$cmd command: update { update: "coll34", updates: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 6" }, u: { $set: { x: 2.0 } }, multi: true, upsert: false } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb638ca4787b9985d1ce3') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:140 locks:{ Global: { acquireCount: { r: 2675, w: 2675 } }, Database: { acquireCount: { w: 2675 } }, Collection: { acquireCount: { w: 2638 } }, Metadata: { acquireCount: { w: 37 } }, oplog: { acquireCount: { w: 37 } } } protocol:op_command 70385ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:02:33.695-0400 m30999| 2015-07-09T14:02:33.695-0400 I NETWORK [conn219] end connection 127.0.0.1:63343 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:02:34.503-0400 m30999| 2015-07-09T14:02:34.502-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:02:34.499-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30999:1436464534:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:02:35.916-0400 m30998| 2015-07-09T14:02:35.915-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:02:35.912-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30998:1436464535:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:02:36.821-0400 m31100| 2015-07-09T14:02:36.820-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:02:36.818-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31100:1436464536:197041335', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:02:37.245-0400 m31200| 2015-07-09T14:02:37.244-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:02:37.241-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31200:1436464537:809424560', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:02:42.664-0400 m31100| 2015-07-09T14:02:42.663-0400 I WRITE [conn23] update db34.coll34 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 4" } update: { $set: { x: 0.0 } } nscanned:0 nscannedObjects:4300 nMatched:44 nModified:41 keyUpdates:0 writeConflicts:0 numYields:2630 locks:{ Global: { acquireCount: { r: 2672, w: 2672 } }, Database: { acquireCount: { w: 2672 } }, Collection: { acquireCount: { w: 2631 } }, Metadata: { acquireCount: { w: 41 } }, oplog: { acquireCount: { w: 41 } } } 68980ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:02:42.665-0400 m31100| 2015-07-09T14:02:42.663-0400 I COMMAND [conn23] command db34.$cmd command: update { update: "coll34", updates: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 4" }, u: { $set: { x: 0.0 } }, multi: true, upsert: false } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb638ca4787b9985d1ce3') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:140 locks:{ Global: { acquireCount: { r: 2672, w: 2672 } }, Database: { acquireCount: { w: 2672 } }, Collection: { acquireCount: { w: 2631 } }, Metadata: { acquireCount: { w: 41 } }, oplog: { acquireCount: { w: 41 } } } protocol:op_command 68981ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:02:57.654-0400 m31100| 2015-07-09T14:02:57.654-0400 I QUERY [conn136] getmore db34.coll34 query: { $where: "this.tid === 3" } cursorid:2351168864406 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1041 nreturned:99 reslen:4574 locks:{ Global: { acquireCount: { r: 2084 } }, Database: { acquireCount: { r: 1042 } }, Collection: { acquireCount: { r: 1042 } } } 24828ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:02:59.169-0400 m31100| 2015-07-09T14:02:59.168-0400 I QUERY [conn135] getmore db34.coll34 query: { $where: "this.tid === 2" } cursorid:2351056979041 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1367 nreturned:299 reslen:13774 locks:{ Global: { acquireCount: { r: 2736 } }, Database: { acquireCount: { r: 1368 } }, Collection: { acquireCount: { r: 1368 } } } 33035ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:02:59.174-0400 m30999| 2015-07-09T14:02:59.174-0400 I NETWORK [conn217] end connection 127.0.0.1:63340 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:04.505-0400 m30999| 2015-07-09T14:03:04.504-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:03:04.502-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30999:1436464534:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:05.918-0400 m30998| 2015-07-09T14:03:05.918-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:03:05.915-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30998:1436464535:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:06.823-0400 m31100| 2015-07-09T14:03:06.823-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:03:06.820-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31100:1436464536:197041335', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:07.246-0400 m31200| 2015-07-09T14:03:07.246-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:03:07.244-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31200:1436464537:809424560', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:14.681-0400 m31100| 2015-07-09T14:03:14.680-0400 I WRITE [conn24] update db34.coll34 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 9" } update: { $set: { x: 4.0 } } nscanned:0 nscannedObjects:4300 nMatched:43 nModified:38 keyUpdates:0 writeConflicts:0 numYields:2058 locks:{ Global: { acquireCount: { r: 2097, w: 2097 } }, Database: { acquireCount: { w: 2097 } }, Collection: { acquireCount: { w: 2059 } }, Metadata: { acquireCount: { w: 38 } }, oplog: { acquireCount: { w: 38 } } } 47958ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:14.682-0400 m31100| 2015-07-09T14:03:14.681-0400 I COMMAND [conn24] command db34.$cmd command: update { update: "coll34", updates: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 9" }, u: { $set: { x: 4.0 } }, multi: true, upsert: false } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb638ca4787b9985d1ce3') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:140 locks:{ Global: { acquireCount: { r: 2097, w: 2097 } }, Database: { acquireCount: { w: 2097 } }, Collection: { acquireCount: { w: 2059 } }, Metadata: { acquireCount: { w: 38 } }, oplog: { acquireCount: { w: 38 } } } protocol:op_command 47958ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:14.687-0400 m30998| 2015-07-09T14:03:14.687-0400 I NETWORK [conn220] end connection 127.0.0.1:63348 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:15.270-0400 m31100| 2015-07-09T14:03:15.269-0400 I QUERY [conn48] query db34.coll34 query: { $where: "this.tid === 7" } planSummary: COLLSCAN cursorid:2349768062487 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:4101 keyUpdates:0 writeConflicts:0 numYields:1900 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 3802 } }, Database: { acquireCount: { r: 1901 } }, Collection: { acquireCount: { r: 1901 } } } 43766ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:16.681-0400 m31100| 2015-07-09T14:03:16.681-0400 I QUERY [conn136] getmore db34.coll34 query: { $where: "this.tid === 7" } cursorid:2349768062487 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:62 nreturned:99 reslen:4574 locks:{ Global: { acquireCount: { r: 126 } }, Database: { acquireCount: { r: 63 } }, Collection: { acquireCount: { r: 63 } } } 1409ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:17.073-0400 m31100| 2015-07-09T14:03:17.072-0400 I QUERY [conn72] query db34.coll34 query: { $where: "this.tid === 3" } planSummary: COLLSCAN cursorid:2350733409916 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:2301 keyUpdates:0 writeConflicts:0 numYields:884 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 1770 } }, Database: { acquireCount: { r: 885 } }, Collection: { acquireCount: { r: 885 } } } 19413ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:22.347-0400 m31100| 2015-07-09T14:03:22.346-0400 I WRITE [conn23] update db34.coll34 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 4" } update: { $set: { x: 9.0 } } nscanned:0 nscannedObjects:4300 nMatched:39 nModified:33 keyUpdates:0 writeConflicts:0 numYields:1754 locks:{ Global: { acquireCount: { r: 1788, w: 1788 } }, Database: { acquireCount: { w: 1788 } }, Collection: { acquireCount: { w: 1755 } }, Metadata: { acquireCount: { w: 33 } }, oplog: { acquireCount: { w: 33 } } } 39680ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:22.347-0400 m31100| 2015-07-09T14:03:22.346-0400 I COMMAND [conn23] command db34.$cmd command: update { update: "coll34", updates: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 4" }, u: { $set: { x: 9.0 } }, multi: true, upsert: false } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb638ca4787b9985d1ce3') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:140 locks:{ Global: { acquireCount: { r: 1788, w: 1788 } }, Database: { acquireCount: { w: 1788 } }, Collection: { acquireCount: { w: 1755 } }, Metadata: { acquireCount: { w: 33 } }, oplog: { acquireCount: { w: 33 } } } protocol:op_command 39681ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:22.352-0400 m30999| 2015-07-09T14:03:22.351-0400 I NETWORK [conn221] end connection 127.0.0.1:63347 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:22.888-0400 m31100| 2015-07-09T14:03:22.888-0400 I QUERY [conn136] getmore db34.coll34 query: { $where: "this.tid === 3" } cursorid:2350733409916 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:265 nreturned:99 reslen:4574 locks:{ Global: { acquireCount: { r: 532 } }, Database: { acquireCount: { r: 266 } }, Collection: { acquireCount: { r: 266 } } } 5813ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:22.930-0400 m31100| 2015-07-09T14:03:22.929-0400 I QUERY [conn48] query db34.coll34 query: { $where: "this.tid === 7" } planSummary: COLLSCAN cursorid:2351013917125 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:4101 keyUpdates:0 writeConflicts:0 numYields:298 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 598 } }, Database: { acquireCount: { r: 299 } }, Collection: { acquireCount: { r: 299 } } } 6243ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:22.952-0400 m30998| 2015-07-09T14:03:22.951-0400 I NETWORK [conn218] end connection 127.0.0.1:63344 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.232-0400 m31100| 2015-07-09T14:03:23.232-0400 I WRITE [conn24] update db34.coll34 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 7" } update: { $set: { x: 3.0 } } nscanned:0 nscannedObjects:4400 nMatched:13 nModified:12 keyUpdates:0 writeConflicts:0 numYields:34 locks:{ Global: { acquireCount: { r: 47, w: 47 } }, Database: { acquireCount: { w: 47 } }, Collection: { acquireCount: { w: 35 } }, Metadata: { acquireCount: { w: 12 } }, oplog: { acquireCount: { w: 12 } } } 237ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.233-0400 m31100| 2015-07-09T14:03:23.232-0400 I COMMAND [conn24] command db34.$cmd command: update { update: "coll34", updates: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 7" }, u: { $set: { x: 3.0 } }, multi: true, upsert: false } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb638ca4787b9985d1ce3') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:140 locks:{ Global: { acquireCount: { r: 47, w: 47 } }, Database: { acquireCount: { w: 47 } }, Collection: { acquireCount: { w: 35 } }, Metadata: { acquireCount: { w: 12 } }, oplog: { acquireCount: { w: 12 } } } protocol:op_command 237ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.239-0400 m30998| 2015-07-09T14:03:23.238-0400 I NETWORK [conn219] end connection 127.0.0.1:63346 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.260-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.260-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.260-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.260-0400 jstests/concurrency/fsm_workloads/update_where.js: Workload completed in 307064 ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.260-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.260-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.260-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.260-0400 m30999| 2015-07-09T14:03:23.260-0400 I COMMAND [conn1] DROP: db34.coll34 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.261-0400 m30999| 2015-07-09T14:03:23.260-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:23.260-0400-559eb76bca4787b9985d1ce5", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465003260), what: "dropCollection.start", ns: "db34.coll34", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.326-0400 m30999| 2015-07-09T14:03:23.326-0400 I SHARDING [conn1] distributed lock 'db34.coll34/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb76bca4787b9985d1ce6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.328-0400 m31100| 2015-07-09T14:03:23.327-0400 I COMMAND [conn15] CMD: drop db34.coll34 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.331-0400 m31200| 2015-07-09T14:03:23.331-0400 I COMMAND [conn18] CMD: drop db34.coll34 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.332-0400 m31102| 2015-07-09T14:03:23.331-0400 I COMMAND [repl writer worker 4] CMD: drop db34.coll34 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.332-0400 m31101| 2015-07-09T14:03:23.331-0400 I COMMAND [repl writer worker 11] CMD: drop db34.coll34 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.386-0400 m31100| 2015-07-09T14:03:23.386-0400 I SHARDING [conn15] remotely refreshing metadata for db34.coll34 with requested shard version 0|0||000000000000000000000000, current shard version is 1|10||559eb638ca4787b9985d1ce3, current metadata version is 1|10||559eb638ca4787b9985d1ce3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.388-0400 m31100| 2015-07-09T14:03:23.387-0400 W SHARDING [conn15] no chunks found when reloading db34.coll34, previous version was 0|0||559eb638ca4787b9985d1ce3, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.388-0400 m31100| 2015-07-09T14:03:23.388-0400 I SHARDING [conn15] dropping metadata for db34.coll34 at shard version 1|10||559eb638ca4787b9985d1ce3, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.391-0400 m30999| 2015-07-09T14:03:23.391-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:23.391-0400-559eb76bca4787b9985d1ce7", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465003391), what: "dropCollection", ns: "db34.coll34", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.452-0400 m30999| 2015-07-09T14:03:23.451-0400 I SHARDING [conn1] distributed lock 'db34.coll34/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.509-0400 m30999| 2015-07-09T14:03:23.508-0400 I COMMAND [conn1] DROP DATABASE: db34 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.509-0400 m30999| 2015-07-09T14:03:23.508-0400 I SHARDING [conn1] DBConfig::dropDatabase: db34 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.509-0400 m30999| 2015-07-09T14:03:23.508-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:23.508-0400-559eb76bca4787b9985d1ce8", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465003508), what: "dropDatabase.start", ns: "db34", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.616-0400 m30999| 2015-07-09T14:03:23.615-0400 I SHARDING [conn1] DBConfig::dropDatabase: db34 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.616-0400 m31100| 2015-07-09T14:03:23.616-0400 I COMMAND [conn28] dropDatabase db34 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.616-0400 m31100| 2015-07-09T14:03:23.616-0400 I COMMAND [conn28] dropDatabase db34 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.617-0400 m30999| 2015-07-09T14:03:23.616-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:23.616-0400-559eb76bca4787b9985d1ce9", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465003616), what: "dropDatabase", ns: "db34", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.617-0400 m31102| 2015-07-09T14:03:23.617-0400 I COMMAND [repl writer worker 8] dropDatabase db34 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.618-0400 m31102| 2015-07-09T14:03:23.617-0400 I COMMAND [repl writer worker 8] dropDatabase db34 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.618-0400 m31101| 2015-07-09T14:03:23.617-0400 I COMMAND [repl writer worker 3] dropDatabase db34 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.618-0400 m31101| 2015-07-09T14:03:23.617-0400 I COMMAND [repl writer worker 3] dropDatabase db34 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.733-0400 m31100| 2015-07-09T14:03:23.733-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.742-0400 m31102| 2015-07-09T14:03:23.741-0400 I COMMAND [repl writer worker 6] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.742-0400 m31101| 2015-07-09T14:03:23.742-0400 I COMMAND [repl writer worker 10] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.809-0400 m31200| 2015-07-09T14:03:23.808-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.813-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.813-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.813-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.813-0400 jstests/concurrency/fsm_workloads/explain_update.js [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.813-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.813-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.814-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.814-0400 m31202| 2015-07-09T14:03:23.812-0400 I COMMAND [repl writer worker 0] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.814-0400 m31201| 2015-07-09T14:03:23.812-0400 I COMMAND [repl writer worker 15] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.821-0400 m30999| 2015-07-09T14:03:23.820-0400 I SHARDING [conn1] distributed lock 'db35/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb76bca4787b9985d1cea [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.825-0400 m30999| 2015-07-09T14:03:23.825-0400 I SHARDING [conn1] Placing [db35] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.825-0400 m30999| 2015-07-09T14:03:23.825-0400 I SHARDING [conn1] Enabling sharding for database [db35] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.880-0400 m30999| 2015-07-09T14:03:23.879-0400 I SHARDING [conn1] distributed lock 'db35/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.904-0400 m31100| 2015-07-09T14:03:23.904-0400 I INDEX [conn23] build index on: db35.coll35 properties: { v: 1, key: { j: 1.0 }, name: "j_1", ns: "db35.coll35" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.905-0400 m31100| 2015-07-09T14:03:23.904-0400 I INDEX [conn23] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.919-0400 m31100| 2015-07-09T14:03:23.918-0400 I INDEX [conn23] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.919-0400 m30999| 2015-07-09T14:03:23.919-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db35.coll35", key: { j: 1.0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.923-0400 m30999| 2015-07-09T14:03:23.923-0400 I SHARDING [conn1] distributed lock 'db35.coll35/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb76bca4787b9985d1ceb [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.924-0400 m30999| 2015-07-09T14:03:23.924-0400 I SHARDING [conn1] enable sharding on: db35.coll35 with shard key: { j: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.925-0400 m30999| 2015-07-09T14:03:23.924-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:23.924-0400-559eb76bca4787b9985d1cec", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465003924), what: "shardCollection.start", ns: "db35.coll35", details: { shardKey: { j: 1.0 }, collection: "db35.coll35", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 1 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.929-0400 m31101| 2015-07-09T14:03:23.929-0400 I INDEX [repl writer worker 7] build index on: db35.coll35 properties: { v: 1, key: { j: 1.0 }, name: "j_1", ns: "db35.coll35" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.929-0400 m31101| 2015-07-09T14:03:23.929-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.935-0400 m31102| 2015-07-09T14:03:23.934-0400 I INDEX [repl writer worker 2] build index on: db35.coll35 properties: { v: 1, key: { j: 1.0 }, name: "j_1", ns: "db35.coll35" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.935-0400 m31102| 2015-07-09T14:03:23.934-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.941-0400 m31101| 2015-07-09T14:03:23.941-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.946-0400 m31102| 2015-07-09T14:03:23.945-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:23.982-0400 m30999| 2015-07-09T14:03:23.981-0400 I SHARDING [conn1] going to create 1 chunk(s) for: db35.coll35 using new epoch 559eb76bca4787b9985d1ced [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.036-0400 m30999| 2015-07-09T14:03:24.036-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db35.coll35: 0ms sequenceNumber: 158 version: 1|0||559eb76bca4787b9985d1ced based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.093-0400 m30999| 2015-07-09T14:03:24.092-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db35.coll35: 1ms sequenceNumber: 159 version: 1|0||559eb76bca4787b9985d1ced based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.095-0400 m31100| 2015-07-09T14:03:24.094-0400 I SHARDING [conn60] remotely refreshing metadata for db35.coll35 with requested shard version 1|0||559eb76bca4787b9985d1ced, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.096-0400 m31100| 2015-07-09T14:03:24.096-0400 I SHARDING [conn60] collection db35.coll35 was previously unsharded, new metadata loaded with shard version 1|0||559eb76bca4787b9985d1ced [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.097-0400 m31100| 2015-07-09T14:03:24.096-0400 I SHARDING [conn60] collection version was loaded at version 1|0||559eb76bca4787b9985d1ced, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.097-0400 m30999| 2015-07-09T14:03:24.097-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:24.097-0400-559eb76cca4787b9985d1cee", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465004097), what: "shardCollection", ns: "db35.coll35", details: { version: "1|0||559eb76bca4787b9985d1ced" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.152-0400 m30999| 2015-07-09T14:03:24.151-0400 I SHARDING [conn1] distributed lock 'db35.coll35/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.153-0400 Using 10 threads (requested 10) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.232-0400 m30998| 2015-07-09T14:03:24.231-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63390 #222 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.249-0400 m30999| 2015-07-09T14:03:24.249-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63391 #222 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.266-0400 m30999| 2015-07-09T14:03:24.266-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63392 #223 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.288-0400 m30999| 2015-07-09T14:03:24.287-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63394 #224 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.288-0400 m30998| 2015-07-09T14:03:24.287-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63393 #223 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.288-0400 m30999| 2015-07-09T14:03:24.288-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63395 #225 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.288-0400 m30998| 2015-07-09T14:03:24.288-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63396 #224 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.298-0400 m30999| 2015-07-09T14:03:24.297-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63397 #226 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.298-0400 m30998| 2015-07-09T14:03:24.298-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63398 #225 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.301-0400 m30998| 2015-07-09T14:03:24.301-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63399 #226 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.310-0400 setting random seed: 9085548869334 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.310-0400 setting random seed: 2466841023415 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.310-0400 setting random seed: 2657142155803 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.310-0400 setting random seed: 5094066876918 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.311-0400 setting random seed: 361928674392 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.312-0400 setting random seed: 2438572859391 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.312-0400 setting random seed: 819918005727 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.314-0400 setting random seed: 197099847719 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.315-0400 setting random seed: 8588233385235 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.316-0400 setting random seed: 2935233344323 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.325-0400 m30998| 2015-07-09T14:03:24.324-0400 I SHARDING [conn223] ChunkManager: time to load chunks for db35.coll35: 0ms sequenceNumber: 41 version: 1|0||559eb76bca4787b9985d1ced based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.384-0400 m31100| 2015-07-09T14:03:24.383-0400 I SHARDING [conn15] request split points lookup for chunk db35.coll35 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.388-0400 m31100| 2015-07-09T14:03:24.387-0400 I SHARDING [conn32] request split points lookup for chunk db35.coll35 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.389-0400 m31100| 2015-07-09T14:03:24.389-0400 I SHARDING [conn32] received splitChunk request: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 6.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.392-0400 m31100| 2015-07-09T14:03:24.392-0400 I SHARDING [conn132] request split points lookup for chunk db35.coll35 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.393-0400 m31100| 2015-07-09T14:03:24.392-0400 I SHARDING [conn32] distributed lock 'db35.coll35/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb76c792e00bb67274991 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.395-0400 m31100| 2015-07-09T14:03:24.392-0400 I SHARDING [conn32] remotely refreshing metadata for db35.coll35 based on current shard version 1|0||559eb76bca4787b9985d1ced, current metadata version is 1|0||559eb76bca4787b9985d1ced [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.395-0400 m31100| 2015-07-09T14:03:24.392-0400 I SHARDING [conn36] request split points lookup for chunk db35.coll35 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.395-0400 m31100| 2015-07-09T14:03:24.393-0400 I SHARDING [conn39] request split points lookup for chunk db35.coll35 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.396-0400 m31100| 2015-07-09T14:03:24.394-0400 I SHARDING [conn132] received splitChunk request: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.396-0400 m31100| 2015-07-09T14:03:24.394-0400 I SHARDING [conn36] received splitChunk request: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.397-0400 m31100| 2015-07-09T14:03:24.395-0400 I SHARDING [conn39] received splitChunk request: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.398-0400 m31100| 2015-07-09T14:03:24.396-0400 I SHARDING [conn32] metadata of collection db35.coll35 already up to date (shard version : 1|0||559eb76bca4787b9985d1ced, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.398-0400 m31100| 2015-07-09T14:03:24.396-0400 I SHARDING [conn32] splitChunk accepted at version 1|0||559eb76bca4787b9985d1ced [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.399-0400 m31100| 2015-07-09T14:03:24.397-0400 W SHARDING [conn132] could not acquire collection lock for db35.coll35 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db35.coll35 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.402-0400 m30998| 2015-07-09T14:03:24.397-0400 W SHARDING [conn224] splitChunk failed - cmd: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db35.coll35 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.403-0400 m31100| 2015-07-09T14:03:24.397-0400 I SHARDING [conn15] request split points lookup for chunk db35.coll35 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.404-0400 m31100| 2015-07-09T14:03:24.398-0400 W SHARDING [conn36] could not acquire collection lock for db35.coll35 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db35.coll35 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.405-0400 m30998| 2015-07-09T14:03:24.398-0400 W SHARDING [conn222] splitChunk failed - cmd: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db35.coll35 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.405-0400 m31100| 2015-07-09T14:03:24.398-0400 I SHARDING [conn35] request split points lookup for chunk db35.coll35 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.406-0400 m31100| 2015-07-09T14:03:24.399-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.406-0400 m31100| 2015-07-09T14:03:24.399-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.407-0400 m31100| 2015-07-09T14:03:24.399-0400 I SHARDING [conn32] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:24.399-0400-559eb76c792e00bb67274992", server: "bs-osx108-8", clientAddr: "127.0.0.1:62634", time: new Date(1436465004399), what: "multi-split", ns: "db35.coll35", details: { before: { min: { j: MinKey }, max: { j: MaxKey } }, number: 1, of: 3, chunk: { min: { j: MinKey }, max: { j: 0.0 }, lastmod: Timestamp 1000|1, lastmodEpoch: ObjectId('559eb76bca4787b9985d1ced') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.407-0400 m31100| 2015-07-09T14:03:24.401-0400 W SHARDING [conn39] could not acquire collection lock for db35.coll35 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db35.coll35 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.408-0400 m31100| 2015-07-09T14:03:24.401-0400 W SHARDING [conn35] could not acquire collection lock for db35.coll35 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db35.coll35 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.408-0400 m30998| 2015-07-09T14:03:24.401-0400 W SHARDING [conn226] splitChunk failed - cmd: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db35.coll35 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.409-0400 m30998| 2015-07-09T14:03:24.401-0400 W SHARDING [conn225] splitChunk failed - cmd: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db35.coll35 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.409-0400 m31100| 2015-07-09T14:03:24.402-0400 W SHARDING [conn15] could not acquire collection lock for db35.coll35 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db35.coll35 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.411-0400 m30999| 2015-07-09T14:03:24.402-0400 W SHARDING [conn226] splitChunk failed - cmd: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db35.coll35 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.417-0400 m30998| 2015-07-09T14:03:24.415-0400 I SHARDING [conn226] ChunkManager: time to load chunks for db35.coll35: 0ms sequenceNumber: 42 version: 1|3||559eb76bca4787b9985d1ced based on: 1|0||559eb76bca4787b9985d1ced [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.429-0400 m31100| 2015-07-09T14:03:24.429-0400 I SHARDING [conn15] request split points lookup for chunk db35.coll35 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.430-0400 m31100| 2015-07-09T14:03:24.429-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.433-0400 m31100| 2015-07-09T14:03:24.431-0400 W SHARDING [conn15] could not acquire collection lock for db35.coll35 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db35.coll35 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.433-0400 m30999| 2015-07-09T14:03:24.431-0400 W SHARDING [conn223] splitChunk failed - cmd: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db35.coll35 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.433-0400 m31100| 2015-07-09T14:03:24.431-0400 I SHARDING [conn34] request split points lookup for chunk db35.coll35 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.434-0400 m31100| 2015-07-09T14:03:24.432-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.435-0400 m31100| 2015-07-09T14:03:24.434-0400 W SHARDING [conn34] could not acquire collection lock for db35.coll35 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db35.coll35 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.436-0400 m30999| 2015-07-09T14:03:24.435-0400 W SHARDING [conn225] splitChunk failed - cmd: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db35.coll35 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.445-0400 m31100| 2015-07-09T14:03:24.445-0400 I SHARDING [conn34] request split points lookup for chunk db35.coll35 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.446-0400 m31100| 2015-07-09T14:03:24.445-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.448-0400 m31100| 2015-07-09T14:03:24.447-0400 W SHARDING [conn34] could not acquire collection lock for db35.coll35 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db35.coll35 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.449-0400 m30999| 2015-07-09T14:03:24.447-0400 W SHARDING [conn226] splitChunk failed - cmd: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db35.coll35 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.452-0400 m31100| 2015-07-09T14:03:24.452-0400 I SHARDING [conn32] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:24.452-0400-559eb76c792e00bb67274993", server: "bs-osx108-8", clientAddr: "127.0.0.1:62634", time: new Date(1436465004452), what: "multi-split", ns: "db35.coll35", details: { before: { min: { j: MinKey }, max: { j: MaxKey } }, number: 2, of: 3, chunk: { min: { j: 0.0 }, max: { j: 6.0 }, lastmod: Timestamp 1000|2, lastmodEpoch: ObjectId('559eb76bca4787b9985d1ced') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.472-0400 m31100| 2015-07-09T14:03:24.471-0400 I SHARDING [conn34] request split points lookup for chunk db35.coll35 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.472-0400 m31100| 2015-07-09T14:03:24.472-0400 I SHARDING [conn15] request split points lookup for chunk db35.coll35 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.473-0400 m31100| 2015-07-09T14:03:24.472-0400 I SHARDING [conn38] request split points lookup for chunk db35.coll35 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.473-0400 m31100| 2015-07-09T14:03:24.473-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 14.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.474-0400 m31100| 2015-07-09T14:03:24.473-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 14.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.475-0400 m31100| 2015-07-09T14:03:24.474-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 14.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.477-0400 m31100| 2015-07-09T14:03:24.476-0400 W SHARDING [conn34] could not acquire collection lock for db35.coll35 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db35.coll35 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.477-0400 m30999| 2015-07-09T14:03:24.476-0400 W SHARDING [conn226] splitChunk failed - cmd: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 14.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db35.coll35 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.478-0400 m31100| 2015-07-09T14:03:24.477-0400 W SHARDING [conn15] could not acquire collection lock for db35.coll35 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db35.coll35 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.478-0400 m30999| 2015-07-09T14:03:24.477-0400 W SHARDING [conn223] splitChunk failed - cmd: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 14.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db35.coll35 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.479-0400 m31100| 2015-07-09T14:03:24.479-0400 W SHARDING [conn38] could not acquire collection lock for db35.coll35 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db35.coll35 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.480-0400 m30999| 2015-07-09T14:03:24.479-0400 W SHARDING [conn225] splitChunk failed - cmd: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 14.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db35.coll35 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.504-0400 m31100| 2015-07-09T14:03:24.504-0400 I SHARDING [conn38] request split points lookup for chunk db35.coll35 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.505-0400 m31100| 2015-07-09T14:03:24.504-0400 I SHARDING [conn15] request split points lookup for chunk db35.coll35 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.507-0400 m31100| 2015-07-09T14:03:24.505-0400 I SHARDING [conn32] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:24.505-0400-559eb76c792e00bb67274994", server: "bs-osx108-8", clientAddr: "127.0.0.1:62634", time: new Date(1436465004505), what: "multi-split", ns: "db35.coll35", details: { before: { min: { j: MinKey }, max: { j: MaxKey } }, number: 3, of: 3, chunk: { min: { j: 6.0 }, max: { j: MaxKey }, lastmod: Timestamp 1000|3, lastmodEpoch: ObjectId('559eb76bca4787b9985d1ced') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.507-0400 m31100| 2015-07-09T14:03:24.505-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.507-0400 m31100| 2015-07-09T14:03:24.506-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.509-0400 m31100| 2015-07-09T14:03:24.508-0400 W SHARDING [conn15] could not acquire collection lock for db35.coll35 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db35.coll35 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.509-0400 m31100| 2015-07-09T14:03:24.508-0400 W SHARDING [conn38] could not acquire collection lock for db35.coll35 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db35.coll35 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.510-0400 m30999| 2015-07-09T14:03:24.509-0400 W SHARDING [conn226] splitChunk failed - cmd: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db35.coll35 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.511-0400 m30999| 2015-07-09T14:03:24.509-0400 W SHARDING [conn224] splitChunk failed - cmd: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db35.coll35 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.527-0400 m31100| 2015-07-09T14:03:24.525-0400 I SHARDING [conn38] request split points lookup for chunk db35.coll35 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.527-0400 m31100| 2015-07-09T14:03:24.526-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 16.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.528-0400 m31100| 2015-07-09T14:03:24.528-0400 W SHARDING [conn38] could not acquire collection lock for db35.coll35 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db35.coll35 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.529-0400 m30999| 2015-07-09T14:03:24.528-0400 W SHARDING [conn222] splitChunk failed - cmd: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 16.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db35.coll35 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.529-0400 m31100| 2015-07-09T14:03:24.528-0400 I SHARDING [conn15] request split points lookup for chunk db35.coll35 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.530-0400 m31100| 2015-07-09T14:03:24.529-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 16.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.533-0400 m31100| 2015-07-09T14:03:24.532-0400 W SHARDING [conn15] could not acquire collection lock for db35.coll35 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db35.coll35 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.535-0400 m30999| 2015-07-09T14:03:24.532-0400 W SHARDING [conn223] splitChunk failed - cmd: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 16.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db35.coll35 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.561-0400 m31100| 2015-07-09T14:03:24.560-0400 I SHARDING [conn32] distributed lock 'db35.coll35/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.561-0400 m31100| 2015-07-09T14:03:24.560-0400 I COMMAND [conn32] command db35.coll35 command: splitChunk { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 6.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 4, w: 2 } }, Database: { acquireCount: { r: 1, w: 2 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 489 } }, Collection: { acquireCount: { r: 1, W: 2 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 1306 } } } protocol:op_command 171ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.564-0400 m30998| 2015-07-09T14:03:24.564-0400 I SHARDING [conn223] autosplitted db35.coll35 shard: ns: db35.coll35, shard: test-rs0, lastmod: 1|0||000000000000000000000000, min: { j: MinKey }, max: { j: MaxKey } into 3 (splitThreshold 921) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.568-0400 m31100| 2015-07-09T14:03:24.566-0400 I SHARDING [conn15] request split points lookup for chunk db35.coll35 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.568-0400 m31100| 2015-07-09T14:03:24.567-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 18.0 }, { j: 26.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.570-0400 m31100| 2015-07-09T14:03:24.570-0400 I SHARDING [conn15] distributed lock 'db35.coll35/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb76c792e00bb67274995 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.570-0400 m31100| 2015-07-09T14:03:24.570-0400 I SHARDING [conn15] remotely refreshing metadata for db35.coll35 based on current shard version 1|3||559eb76bca4787b9985d1ced, current metadata version is 1|3||559eb76bca4787b9985d1ced [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.571-0400 m31100| 2015-07-09T14:03:24.571-0400 I SHARDING [conn15] metadata of collection db35.coll35 already up to date (shard version : 1|3||559eb76bca4787b9985d1ced, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.572-0400 m31100| 2015-07-09T14:03:24.571-0400 W SHARDING [conn15] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.573-0400 m31100| 2015-07-09T14:03:24.572-0400 I SHARDING [conn15] distributed lock 'db35.coll35/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.574-0400 m30999| 2015-07-09T14:03:24.573-0400 W SHARDING [conn226] splitChunk failed - cmd: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 18.0 }, { j: 26.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.589-0400 m31100| 2015-07-09T14:03:24.589-0400 I SHARDING [conn15] request split points lookup for chunk db35.coll35 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.590-0400 m31100| 2015-07-09T14:03:24.589-0400 I SHARDING [conn38] request split points lookup for chunk db35.coll35 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.591-0400 m31100| 2015-07-09T14:03:24.590-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 18.0 }, { j: 22.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.591-0400 m31100| 2015-07-09T14:03:24.590-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 22.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.593-0400 m31100| 2015-07-09T14:03:24.593-0400 I SHARDING [conn34] request split points lookup for chunk db35.coll35 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.594-0400 m31100| 2015-07-09T14:03:24.593-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 22.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.596-0400 m31100| 2015-07-09T14:03:24.595-0400 I SHARDING [conn34] could not acquire lock 'db35.coll35/bs-osx108-8:31100:1436464536:197041335' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.597-0400 m31100| 2015-07-09T14:03:24.595-0400 I SHARDING [conn34] distributed lock 'db35.coll35/bs-osx108-8:31100:1436464536:197041335' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.597-0400 m31100| 2015-07-09T14:03:24.595-0400 I SHARDING [conn38] could not acquire lock 'db35.coll35/bs-osx108-8:31100:1436464536:197041335' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.597-0400 m31100| 2015-07-09T14:03:24.595-0400 W SHARDING [conn34] could not acquire collection lock for db35.coll35 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db35.coll35 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.597-0400 m31100| 2015-07-09T14:03:24.595-0400 I SHARDING [conn38] distributed lock 'db35.coll35/bs-osx108-8:31100:1436464536:197041335' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.598-0400 m30999| 2015-07-09T14:03:24.596-0400 W SHARDING [conn222] splitChunk failed - cmd: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 22.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db35.coll35 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.598-0400 m31100| 2015-07-09T14:03:24.596-0400 I SHARDING [conn15] distributed lock 'db35.coll35/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb76c792e00bb67274996 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.598-0400 m31100| 2015-07-09T14:03:24.596-0400 W SHARDING [conn38] could not acquire collection lock for db35.coll35 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db35.coll35 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.599-0400 m31100| 2015-07-09T14:03:24.596-0400 I SHARDING [conn40] request split points lookup for chunk db35.coll35 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.599-0400 m30999| 2015-07-09T14:03:24.598-0400 W SHARDING [conn226] splitChunk failed - cmd: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 22.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db35.coll35 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.600-0400 m31100| 2015-07-09T14:03:24.597-0400 I SHARDING [conn15] remotely refreshing metadata for db35.coll35 based on current shard version 1|3||559eb76bca4787b9985d1ced, current metadata version is 1|3||559eb76bca4787b9985d1ced [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.602-0400 m31100| 2015-07-09T14:03:24.601-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 28.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.604-0400 m31100| 2015-07-09T14:03:24.603-0400 W SHARDING [conn40] could not acquire collection lock for db35.coll35 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db35.coll35 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.605-0400 m30999| 2015-07-09T14:03:24.604-0400 W SHARDING [conn223] splitChunk failed - cmd: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 28.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db35.coll35 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.606-0400 m31100| 2015-07-09T14:03:24.604-0400 I SHARDING [conn15] metadata of collection db35.coll35 already up to date (shard version : 1|3||559eb76bca4787b9985d1ced, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.606-0400 m31100| 2015-07-09T14:03:24.604-0400 W SHARDING [conn15] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.606-0400 m31100| 2015-07-09T14:03:24.605-0400 I SHARDING [conn15] distributed lock 'db35.coll35/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.607-0400 m30999| 2015-07-09T14:03:24.606-0400 W SHARDING [conn224] splitChunk failed - cmd: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 18.0 }, { j: 22.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.628-0400 m31100| 2015-07-09T14:03:24.627-0400 I SHARDING [conn15] request split points lookup for chunk db35.coll35 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.634-0400 m31100| 2015-07-09T14:03:24.630-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 24.0 }, { j: 30.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.635-0400 m31100| 2015-07-09T14:03:24.635-0400 I SHARDING [conn15] distributed lock 'db35.coll35/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb76c792e00bb67274999 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.637-0400 m31100| 2015-07-09T14:03:24.635-0400 I SHARDING [conn15] remotely refreshing metadata for db35.coll35 based on current shard version 1|3||559eb76bca4787b9985d1ced, current metadata version is 1|3||559eb76bca4787b9985d1ced [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.642-0400 m31100| 2015-07-09T14:03:24.638-0400 I SHARDING [conn15] metadata of collection db35.coll35 already up to date (shard version : 1|3||559eb76bca4787b9985d1ced, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.642-0400 m31100| 2015-07-09T14:03:24.638-0400 W SHARDING [conn15] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.642-0400 m31100| 2015-07-09T14:03:24.639-0400 I SHARDING [conn15] distributed lock 'db35.coll35/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.643-0400 m31100| 2015-07-09T14:03:24.639-0400 I SHARDING [conn40] request split points lookup for chunk db35.coll35 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.644-0400 m31100| 2015-07-09T14:03:24.640-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 22.0 }, { j: 28.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.644-0400 m30999| 2015-07-09T14:03:24.641-0400 W SHARDING [conn222] splitChunk failed - cmd: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 24.0 }, { j: 30.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.645-0400 m31100| 2015-07-09T14:03:24.642-0400 I SHARDING [conn40] distributed lock 'db35.coll35/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb76c792e00bb6727499a [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.645-0400 m31100| 2015-07-09T14:03:24.642-0400 I SHARDING [conn40] remotely refreshing metadata for db35.coll35 based on current shard version 1|3||559eb76bca4787b9985d1ced, current metadata version is 1|3||559eb76bca4787b9985d1ced [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.648-0400 m31100| 2015-07-09T14:03:24.648-0400 I SHARDING [conn40] metadata of collection db35.coll35 already up to date (shard version : 1|3||559eb76bca4787b9985d1ced, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.649-0400 m31100| 2015-07-09T14:03:24.648-0400 W SHARDING [conn40] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.651-0400 m31100| 2015-07-09T14:03:24.650-0400 I SHARDING [conn40] distributed lock 'db35.coll35/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.651-0400 m30999| 2015-07-09T14:03:24.650-0400 W SHARDING [conn225] splitChunk failed - cmd: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 22.0 }, { j: 28.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.668-0400 m31100| 2015-07-09T14:03:24.668-0400 I SHARDING [conn40] request split points lookup for chunk db35.coll35 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.670-0400 m31100| 2015-07-09T14:03:24.668-0400 I SHARDING [conn15] request split points lookup for chunk db35.coll35 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.672-0400 m31100| 2015-07-09T14:03:24.671-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 22.0 }, { j: 24.0 }, { j: 30.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.674-0400 m31100| 2015-07-09T14:03:24.671-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 22.0 }, { j: 24.0 }, { j: 30.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.674-0400 m31100| 2015-07-09T14:03:24.673-0400 I SHARDING [conn40] could not acquire lock 'db35.coll35/bs-osx108-8:31100:1436464536:197041335' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.675-0400 m31100| 2015-07-09T14:03:24.673-0400 I SHARDING [conn40] distributed lock 'db35.coll35/bs-osx108-8:31100:1436464536:197041335' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.675-0400 m31100| 2015-07-09T14:03:24.673-0400 W SHARDING [conn40] could not acquire collection lock for db35.coll35 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db35.coll35 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.676-0400 m31100| 2015-07-09T14:03:24.673-0400 I SHARDING [conn15] distributed lock 'db35.coll35/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb76c792e00bb6727499c [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.676-0400 m31100| 2015-07-09T14:03:24.673-0400 I SHARDING [conn15] remotely refreshing metadata for db35.coll35 based on current shard version 1|3||559eb76bca4787b9985d1ced, current metadata version is 1|3||559eb76bca4787b9985d1ced [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.677-0400 m30999| 2015-07-09T14:03:24.673-0400 W SHARDING [conn222] splitChunk failed - cmd: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 22.0 }, { j: 24.0 }, { j: 30.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db35.coll35 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.678-0400 m31100| 2015-07-09T14:03:24.678-0400 I SHARDING [conn15] metadata of collection db35.coll35 already up to date (shard version : 1|3||559eb76bca4787b9985d1ced, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.679-0400 m31100| 2015-07-09T14:03:24.678-0400 W SHARDING [conn15] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.680-0400 m31100| 2015-07-09T14:03:24.679-0400 I SHARDING [conn15] distributed lock 'db35.coll35/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.681-0400 m30999| 2015-07-09T14:03:24.679-0400 W SHARDING [conn223] splitChunk failed - cmd: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 22.0 }, { j: 24.0 }, { j: 30.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.706-0400 m31100| 2015-07-09T14:03:24.703-0400 I SHARDING [conn15] request split points lookup for chunk db35.coll35 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.707-0400 m31100| 2015-07-09T14:03:24.703-0400 I SHARDING [conn40] request split points lookup for chunk db35.coll35 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.708-0400 m31100| 2015-07-09T14:03:24.704-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 24.0 }, { j: 26.0 }, { j: 32.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.708-0400 m31100| 2015-07-09T14:03:24.705-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 24.0 }, { j: 26.0 }, { j: 32.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.708-0400 m31100| 2015-07-09T14:03:24.706-0400 I SHARDING [conn15] could not acquire lock 'db35.coll35/bs-osx108-8:31100:1436464536:197041335' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.708-0400 m31100| 2015-07-09T14:03:24.706-0400 I SHARDING [conn15] distributed lock 'db35.coll35/bs-osx108-8:31100:1436464536:197041335' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.709-0400 m31100| 2015-07-09T14:03:24.706-0400 W SHARDING [conn15] could not acquire collection lock for db35.coll35 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db35.coll35 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.710-0400 m30999| 2015-07-09T14:03:24.706-0400 W SHARDING [conn226] splitChunk failed - cmd: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 24.0 }, { j: 26.0 }, { j: 32.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db35.coll35 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.710-0400 m31100| 2015-07-09T14:03:24.707-0400 I SHARDING [conn40] distributed lock 'db35.coll35/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb76c792e00bb6727499d [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.710-0400 m31100| 2015-07-09T14:03:24.707-0400 I SHARDING [conn40] remotely refreshing metadata for db35.coll35 based on current shard version 1|3||559eb76bca4787b9985d1ced, current metadata version is 1|3||559eb76bca4787b9985d1ced [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.711-0400 m31100| 2015-07-09T14:03:24.708-0400 I SHARDING [conn40] metadata of collection db35.coll35 already up to date (shard version : 1|3||559eb76bca4787b9985d1ced, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.711-0400 m31100| 2015-07-09T14:03:24.708-0400 W SHARDING [conn40] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.711-0400 m31100| 2015-07-09T14:03:24.709-0400 I SHARDING [conn40] distributed lock 'db35.coll35/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.712-0400 m30999| 2015-07-09T14:03:24.709-0400 W SHARDING [conn223] splitChunk failed - cmd: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 24.0 }, { j: 26.0 }, { j: 32.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.736-0400 m31100| 2015-07-09T14:03:24.736-0400 I SHARDING [conn40] request split points lookup for chunk db35.coll35 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.743-0400 m31100| 2015-07-09T14:03:24.737-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 24.0 }, { j: 26.0 }, { j: 30.0 }, { j: 34.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.743-0400 m31100| 2015-07-09T14:03:24.739-0400 I SHARDING [conn15] request split points lookup for chunk db35.coll35 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.743-0400 m31100| 2015-07-09T14:03:24.742-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 24.0 }, { j: 26.0 }, { j: 28.0 }, { j: 34.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.746-0400 m31100| 2015-07-09T14:03:24.745-0400 W SHARDING [conn15] could not acquire collection lock for db35.coll35 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db35.coll35 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.746-0400 m30999| 2015-07-09T14:03:24.746-0400 W SHARDING [conn224] splitChunk failed - cmd: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 24.0 }, { j: 26.0 }, { j: 28.0 }, { j: 34.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db35.coll35 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.749-0400 m30999| 2015-07-09T14:03:24.749-0400 I NETWORK [conn222] end connection 127.0.0.1:63391 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.753-0400 m30998| 2015-07-09T14:03:24.753-0400 I NETWORK [conn222] end connection 127.0.0.1:63390 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.762-0400 m31100| 2015-07-09T14:03:24.761-0400 I SHARDING [conn40] distributed lock 'db35.coll35/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb76c792e00bb6727499f [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.762-0400 m31100| 2015-07-09T14:03:24.761-0400 I SHARDING [conn40] remotely refreshing metadata for db35.coll35 based on current shard version 1|3||559eb76bca4787b9985d1ced, current metadata version is 1|3||559eb76bca4787b9985d1ced [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.762-0400 m31100| 2015-07-09T14:03:24.762-0400 I SHARDING [conn40] metadata of collection db35.coll35 already up to date (shard version : 1|3||559eb76bca4787b9985d1ced, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.763-0400 m31100| 2015-07-09T14:03:24.762-0400 W SHARDING [conn40] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.763-0400 m31100| 2015-07-09T14:03:24.762-0400 I SHARDING [conn15] request split points lookup for chunk db35.coll35 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.763-0400 m31100| 2015-07-09T14:03:24.763-0400 I SHARDING [conn40] distributed lock 'db35.coll35/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.765-0400 m31100| 2015-07-09T14:03:24.763-0400 I SHARDING [conn38] request split points lookup for chunk db35.coll35 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.766-0400 m30999| 2015-07-09T14:03:24.763-0400 W SHARDING [conn225] splitChunk failed - cmd: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 24.0 }, { j: 26.0 }, { j: 30.0 }, { j: 34.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.768-0400 m31100| 2015-07-09T14:03:24.764-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 30.0 }, { j: 34.0 }, { j: 40.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.768-0400 m31100| 2015-07-09T14:03:24.764-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 30.0 }, { j: 34.0 }, { j: 40.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.768-0400 m31100| 2015-07-09T14:03:24.767-0400 I SHARDING [conn38] distributed lock 'db35.coll35/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb76c792e00bb672749a0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.769-0400 m31100| 2015-07-09T14:03:24.767-0400 I SHARDING [conn38] remotely refreshing metadata for db35.coll35 based on current shard version 1|3||559eb76bca4787b9985d1ced, current metadata version is 1|3||559eb76bca4787b9985d1ced [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.769-0400 m31100| 2015-07-09T14:03:24.769-0400 W SHARDING [conn15] could not acquire collection lock for db35.coll35 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db35.coll35 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.770-0400 m30999| 2015-07-09T14:03:24.769-0400 W SHARDING [conn224] splitChunk failed - cmd: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 30.0 }, { j: 34.0 }, { j: 40.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db35.coll35 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.782-0400 m31100| 2015-07-09T14:03:24.779-0400 I SHARDING [conn38] metadata of collection db35.coll35 already up to date (shard version : 1|3||559eb76bca4787b9985d1ced, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.782-0400 m31100| 2015-07-09T14:03:24.779-0400 W SHARDING [conn38] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.782-0400 m31100| 2015-07-09T14:03:24.780-0400 I SHARDING [conn38] distributed lock 'db35.coll35/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.784-0400 m30999| 2015-07-09T14:03:24.780-0400 W SHARDING [conn223] splitChunk failed - cmd: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 30.0 }, { j: 34.0 }, { j: 40.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.793-0400 m31100| 2015-07-09T14:03:24.791-0400 I SHARDING [conn38] request split points lookup for chunk db35.coll35 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.793-0400 m31100| 2015-07-09T14:03:24.793-0400 I SHARDING [conn15] request split points lookup for chunk db35.coll35 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.794-0400 m31100| 2015-07-09T14:03:24.793-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 30.0 }, { j: 32.0 }, { j: 36.0 }, { j: 42.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.796-0400 m31100| 2015-07-09T14:03:24.794-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 30.0 }, { j: 32.0 }, { j: 36.0 }, { j: 42.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.798-0400 m31100| 2015-07-09T14:03:24.796-0400 I SHARDING [conn38] distributed lock 'db35.coll35/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb76c792e00bb672749a1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.799-0400 m31100| 2015-07-09T14:03:24.796-0400 I SHARDING [conn38] remotely refreshing metadata for db35.coll35 based on current shard version 1|3||559eb76bca4787b9985d1ced, current metadata version is 1|3||559eb76bca4787b9985d1ced [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.799-0400 m31100| 2015-07-09T14:03:24.796-0400 W SHARDING [conn15] could not acquire collection lock for db35.coll35 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db35.coll35 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.799-0400 m30999| 2015-07-09T14:03:24.797-0400 W SHARDING [conn223] splitChunk failed - cmd: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 30.0 }, { j: 32.0 }, { j: 36.0 }, { j: 42.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db35.coll35 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.799-0400 m31100| 2015-07-09T14:03:24.797-0400 I SHARDING [conn38] metadata of collection db35.coll35 already up to date (shard version : 1|3||559eb76bca4787b9985d1ced, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.800-0400 m31100| 2015-07-09T14:03:24.797-0400 W SHARDING [conn38] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.801-0400 m31100| 2015-07-09T14:03:24.799-0400 I SHARDING [conn38] distributed lock 'db35.coll35/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.803-0400 m30999| 2015-07-09T14:03:24.799-0400 W SHARDING [conn224] splitChunk failed - cmd: { splitChunk: "db35.coll35", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 30.0 }, { j: 32.0 }, { j: 36.0 }, { j: 42.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76bca4787b9985d1ced') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.829-0400 m30999| 2015-07-09T14:03:24.826-0400 I NETWORK [conn223] end connection 127.0.0.1:63392 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.838-0400 m30998| 2015-07-09T14:03:24.837-0400 I NETWORK [conn224] end connection 127.0.0.1:63396 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.852-0400 m30999| 2015-07-09T14:03:24.851-0400 I NETWORK [conn224] end connection 127.0.0.1:63394 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.879-0400 m30999| 2015-07-09T14:03:24.878-0400 I NETWORK [conn226] end connection 127.0.0.1:63397 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.883-0400 m30999| 2015-07-09T14:03:24.883-0400 I NETWORK [conn225] end connection 127.0.0.1:63395 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.883-0400 m30998| 2015-07-09T14:03:24.883-0400 I NETWORK [conn226] end connection 127.0.0.1:63399 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.914-0400 m30998| 2015-07-09T14:03:24.914-0400 I NETWORK [conn223] end connection 127.0.0.1:63393 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.923-0400 m30998| 2015-07-09T14:03:24.923-0400 I NETWORK [conn225] end connection 127.0.0.1:63398 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.923-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.924-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.924-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.924-0400 jstests/concurrency/fsm_workloads/explain_update.js: Workload completed in 771 ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.924-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.924-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.924-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.924-0400 m30999| 2015-07-09T14:03:24.923-0400 I COMMAND [conn1] DROP: db35.coll35 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.924-0400 m30999| 2015-07-09T14:03:24.923-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:24.923-0400-559eb76cca4787b9985d1cef", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465004923), what: "dropCollection.start", ns: "db35.coll35", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.990-0400 m30999| 2015-07-09T14:03:24.990-0400 I SHARDING [conn1] distributed lock 'db35.coll35/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb76cca4787b9985d1cf0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.992-0400 m31100| 2015-07-09T14:03:24.991-0400 I COMMAND [conn38] CMD: drop db35.coll35 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.997-0400 m31200| 2015-07-09T14:03:24.997-0400 I COMMAND [conn18] CMD: drop db35.coll35 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.999-0400 m31101| 2015-07-09T14:03:24.998-0400 I COMMAND [repl writer worker 6] CMD: drop db35.coll35 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:24.999-0400 m31102| 2015-07-09T14:03:24.998-0400 I COMMAND [repl writer worker 15] CMD: drop db35.coll35 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.053-0400 m31100| 2015-07-09T14:03:25.052-0400 I SHARDING [conn38] remotely refreshing metadata for db35.coll35 with requested shard version 0|0||000000000000000000000000, current shard version is 1|3||559eb76bca4787b9985d1ced, current metadata version is 1|3||559eb76bca4787b9985d1ced [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.055-0400 m31100| 2015-07-09T14:03:25.054-0400 W SHARDING [conn38] no chunks found when reloading db35.coll35, previous version was 0|0||559eb76bca4787b9985d1ced, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.055-0400 m31100| 2015-07-09T14:03:25.054-0400 I SHARDING [conn38] dropping metadata for db35.coll35 at shard version 1|3||559eb76bca4787b9985d1ced, took 2ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.057-0400 m30999| 2015-07-09T14:03:25.056-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:25.056-0400-559eb76dca4787b9985d1cf1", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465005056), what: "dropCollection", ns: "db35.coll35", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.111-0400 m30999| 2015-07-09T14:03:25.111-0400 I SHARDING [conn1] distributed lock 'db35.coll35/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.169-0400 m30999| 2015-07-09T14:03:25.168-0400 I COMMAND [conn1] DROP DATABASE: db35 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.169-0400 m30999| 2015-07-09T14:03:25.168-0400 I SHARDING [conn1] DBConfig::dropDatabase: db35 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.169-0400 m30999| 2015-07-09T14:03:25.168-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:25.168-0400-559eb76dca4787b9985d1cf2", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465005168), what: "dropDatabase.start", ns: "db35", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.277-0400 m30999| 2015-07-09T14:03:25.277-0400 I SHARDING [conn1] DBConfig::dropDatabase: db35 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.278-0400 m31100| 2015-07-09T14:03:25.277-0400 I COMMAND [conn28] dropDatabase db35 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.278-0400 m31100| 2015-07-09T14:03:25.278-0400 I COMMAND [conn28] dropDatabase db35 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.279-0400 m30999| 2015-07-09T14:03:25.278-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:25.278-0400-559eb76dca4787b9985d1cf3", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465005278), what: "dropDatabase", ns: "db35", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.280-0400 m31102| 2015-07-09T14:03:25.279-0400 I COMMAND [repl writer worker 9] dropDatabase db35 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.280-0400 m31102| 2015-07-09T14:03:25.279-0400 I COMMAND [repl writer worker 9] dropDatabase db35 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.281-0400 m31101| 2015-07-09T14:03:25.279-0400 I COMMAND [repl writer worker 5] dropDatabase db35 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.281-0400 m31101| 2015-07-09T14:03:25.279-0400 I COMMAND [repl writer worker 5] dropDatabase db35 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.368-0400 m31100| 2015-07-09T14:03:25.368-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.374-0400 m31101| 2015-07-09T14:03:25.374-0400 I COMMAND [repl writer worker 10] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.374-0400 m31102| 2015-07-09T14:03:25.374-0400 I COMMAND [repl writer worker 4] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.404-0400 m31200| 2015-07-09T14:03:25.403-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.404-0400 m31202| 2015-07-09T14:03:25.404-0400 I COMMAND [repl writer worker 1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.405-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.405-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.405-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.405-0400 jstests/concurrency/fsm_workloads/indexed_insert_compound.js [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.405-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.405-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.406-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.407-0400 m31201| 2015-07-09T14:03:25.407-0400 I COMMAND [repl writer worker 9] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.413-0400 m30999| 2015-07-09T14:03:25.413-0400 I SHARDING [conn1] distributed lock 'db36/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb76dca4787b9985d1cf4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.419-0400 m30999| 2015-07-09T14:03:25.418-0400 I SHARDING [conn1] Placing [db36] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.419-0400 m30999| 2015-07-09T14:03:25.418-0400 I SHARDING [conn1] Enabling sharding for database [db36] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.473-0400 m30999| 2015-07-09T14:03:25.473-0400 I SHARDING [conn1] distributed lock 'db36/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.503-0400 m31100| 2015-07-09T14:03:25.502-0400 I INDEX [conn23] build index on: db36.coll36 properties: { v: 1, key: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: 1.0 }, name: "indexed_insert_compound_x_1_indexed_insert_compound_y_1_indexed_insert_compound_z_1", ns: "db36.coll36" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.503-0400 m31100| 2015-07-09T14:03:25.502-0400 I INDEX [conn23] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.517-0400 m31100| 2015-07-09T14:03:25.517-0400 I INDEX [conn23] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.519-0400 m30999| 2015-07-09T14:03:25.519-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db36.coll36", key: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: 1.0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.522-0400 m30999| 2015-07-09T14:03:25.522-0400 I SHARDING [conn1] distributed lock 'db36.coll36/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb76dca4787b9985d1cf5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.524-0400 m30999| 2015-07-09T14:03:25.523-0400 I SHARDING [conn1] enable sharding on: db36.coll36 with shard key: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.524-0400 m30999| 2015-07-09T14:03:25.523-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:25.523-0400-559eb76dca4787b9985d1cf6", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465005523), what: "shardCollection.start", ns: "db36.coll36", details: { shardKey: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: 1.0 }, collection: "db36.coll36", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 1 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.534-0400 m31101| 2015-07-09T14:03:25.532-0400 I INDEX [repl writer worker 9] build index on: db36.coll36 properties: { v: 1, key: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: 1.0 }, name: "indexed_insert_compound_x_1_indexed_insert_compound_y_1_indexed_insert_compound_z_1", ns: "db36.coll36" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.535-0400 m31101| 2015-07-09T14:03:25.533-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.535-0400 m31102| 2015-07-09T14:03:25.533-0400 I INDEX [repl writer worker 11] build index on: db36.coll36 properties: { v: 1, key: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: 1.0 }, name: "indexed_insert_compound_x_1_indexed_insert_compound_y_1_indexed_insert_compound_z_1", ns: "db36.coll36" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.535-0400 m31102| 2015-07-09T14:03:25.533-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.547-0400 m31101| 2015-07-09T14:03:25.546-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.549-0400 m31102| 2015-07-09T14:03:25.548-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.578-0400 m30999| 2015-07-09T14:03:25.577-0400 I SHARDING [conn1] going to create 1 chunk(s) for: db36.coll36 using new epoch 559eb76dca4787b9985d1cf7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.631-0400 m30999| 2015-07-09T14:03:25.631-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db36.coll36: 0ms sequenceNumber: 160 version: 1|0||559eb76dca4787b9985d1cf7 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.688-0400 m30999| 2015-07-09T14:03:25.687-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db36.coll36: 0ms sequenceNumber: 161 version: 1|0||559eb76dca4787b9985d1cf7 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.690-0400 m31100| 2015-07-09T14:03:25.689-0400 I SHARDING [conn47] remotely refreshing metadata for db36.coll36 with requested shard version 1|0||559eb76dca4787b9985d1cf7, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.692-0400 m31100| 2015-07-09T14:03:25.691-0400 I SHARDING [conn47] collection db36.coll36 was previously unsharded, new metadata loaded with shard version 1|0||559eb76dca4787b9985d1cf7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.692-0400 m31100| 2015-07-09T14:03:25.691-0400 I SHARDING [conn47] collection version was loaded at version 1|0||559eb76dca4787b9985d1cf7, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.692-0400 m30999| 2015-07-09T14:03:25.691-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:25.691-0400-559eb76dca4787b9985d1cf8", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465005691), what: "shardCollection", ns: "db36.coll36", details: { version: "1|0||559eb76dca4787b9985d1cf7" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.747-0400 m30999| 2015-07-09T14:03:25.746-0400 I SHARDING [conn1] distributed lock 'db36.coll36/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.770-0400 m31200| 2015-07-09T14:03:25.769-0400 I INDEX [conn41] build index on: db36.coll36 properties: { v: 1, key: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: 1.0 }, name: "indexed_insert_compound_x_1_indexed_insert_compound_y_1_indexed_insert_compound_z_1", ns: "db36.coll36" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.770-0400 m31200| 2015-07-09T14:03:25.769-0400 I INDEX [conn41] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.780-0400 m31200| 2015-07-09T14:03:25.780-0400 I INDEX [conn41] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.781-0400 Using 20 threads (requested 20) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.901-0400 m31201| 2015-07-09T14:03:25.895-0400 I INDEX [repl writer worker 3] build index on: db36.coll36 properties: { v: 1, key: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: 1.0 }, name: "indexed_insert_compound_x_1_indexed_insert_compound_y_1_indexed_insert_compound_z_1", ns: "db36.coll36" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.901-0400 m31201| 2015-07-09T14:03:25.895-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.906-0400 m30998| 2015-07-09T14:03:25.906-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63400 #227 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.917-0400 m30998| 2015-07-09T14:03:25.917-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63401 #228 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.936-0400 m31202| 2015-07-09T14:03:25.931-0400 I INDEX [repl writer worker 9] build index on: db36.coll36 properties: { v: 1, key: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: 1.0 }, name: "indexed_insert_compound_x_1_indexed_insert_compound_y_1_indexed_insert_compound_z_1", ns: "db36.coll36" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.936-0400 m31202| 2015-07-09T14:03:25.931-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.940-0400 m31201| 2015-07-09T14:03:25.939-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.940-0400 m30999| 2015-07-09T14:03:25.940-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63402 #227 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.973-0400 m31202| 2015-07-09T14:03:25.973-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.994-0400 m30999| 2015-07-09T14:03:25.993-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63403 #228 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.995-0400 m30998| 2015-07-09T14:03:25.994-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63404 #229 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.995-0400 m30999| 2015-07-09T14:03:25.994-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63406 #229 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:25.996-0400 m30999| 2015-07-09T14:03:25.996-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63407 #230 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.004-0400 m30999| 2015-07-09T14:03:26.004-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63408 #231 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.005-0400 m30998| 2015-07-09T14:03:26.004-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63405 #230 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.009-0400 m30998| 2015-07-09T14:03:26.008-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63410 #231 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.009-0400 m30999| 2015-07-09T14:03:26.009-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63409 #232 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.014-0400 m30998| 2015-07-09T14:03:26.014-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63411 #232 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.015-0400 m30998| 2015-07-09T14:03:26.014-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63412 #233 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.019-0400 m30998| 2015-07-09T14:03:26.019-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63414 #234 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.019-0400 m30999| 2015-07-09T14:03:26.019-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63413 #233 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.020-0400 m30999| 2015-07-09T14:03:26.019-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63416 #234 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.021-0400 m30999| 2015-07-09T14:03:26.021-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63418 #235 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.027-0400 m30998| 2015-07-09T14:03:26.021-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63415 #235 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.027-0400 m30999| 2015-07-09T14:03:26.021-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63419 #236 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.027-0400 m30998| 2015-07-09T14:03:26.021-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63417 #236 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.034-0400 setting random seed: 150148575194 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.034-0400 setting random seed: 6765170623548 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.034-0400 setting random seed: 1794031010940 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.035-0400 setting random seed: 5676989126950 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.036-0400 setting random seed: 2563919438980 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.038-0400 setting random seed: 3156301085837 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.038-0400 setting random seed: 3547104462049 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.042-0400 m30998| 2015-07-09T14:03:26.041-0400 I SHARDING [conn228] ChunkManager: time to load chunks for db36.coll36: 0ms sequenceNumber: 43 version: 1|0||559eb76dca4787b9985d1cf7 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.042-0400 setting random seed: 6979572614654 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.044-0400 setting random seed: 6184721430763 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.048-0400 setting random seed: 5786432074382 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.060-0400 setting random seed: 4054678380489 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.061-0400 setting random seed: 7304175039753 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.062-0400 setting random seed: 5982040227390 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.063-0400 setting random seed: 2222722922451 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.063-0400 m31100| 2015-07-09T14:03:26.063-0400 I SHARDING [conn32] request split points lookup for chunk db36.coll36 { : MinKey, : MinKey, : MinKey } -->> { : MaxKey, : MaxKey, : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.065-0400 m31100| 2015-07-09T14:03:26.064-0400 I SHARDING [conn32] received splitChunk request: { splitChunk: "db36.coll36", keyPattern: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: 1.0 }, min: { indexed_insert_compound_x: MinKey, indexed_insert_compound_y: MinKey, indexed_insert_compound_z: MinKey }, max: { indexed_insert_compound_x: MaxKey, indexed_insert_compound_y: MaxKey, indexed_insert_compound_z: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: """ }, { indexed_insert_compound_x: 15.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "0" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76dca4787b9985d1cf7') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.067-0400 setting random seed: 1269950298592 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.068-0400 m31100| 2015-07-09T14:03:26.068-0400 I SHARDING [conn35] request split points lookup for chunk db36.coll36 { : MinKey, : MinKey, : MinKey } -->> { : MaxKey, : MaxKey, : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.068-0400 m31100| 2015-07-09T14:03:26.068-0400 I SHARDING [conn15] request split points lookup for chunk db36.coll36 { : MinKey, : MinKey, : MinKey } -->> { : MaxKey, : MaxKey, : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.071-0400 m31100| 2015-07-09T14:03:26.068-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db36.coll36", keyPattern: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: 1.0 }, min: { indexed_insert_compound_x: MinKey, indexed_insert_compound_y: MinKey, indexed_insert_compound_z: MinKey }, max: { indexed_insert_compound_x: MaxKey, indexed_insert_compound_y: MaxKey, indexed_insert_compound_z: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: """ }, { indexed_insert_compound_x: 10.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "+" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76dca4787b9985d1cf7') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.071-0400 m31100| 2015-07-09T14:03:26.069-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db36.coll36", keyPattern: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: 1.0 }, min: { indexed_insert_compound_x: MinKey, indexed_insert_compound_y: MinKey, indexed_insert_compound_z: MinKey }, max: { indexed_insert_compound_x: MaxKey, indexed_insert_compound_y: MaxKey, indexed_insert_compound_z: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: """ }, { indexed_insert_compound_x: 10.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "+" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76dca4787b9985d1cf7') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.072-0400 m31100| 2015-07-09T14:03:26.069-0400 I SHARDING [conn32] distributed lock 'db36.coll36/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb76e792e00bb672749a3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.072-0400 m31100| 2015-07-09T14:03:26.069-0400 I SHARDING [conn32] remotely refreshing metadata for db36.coll36 based on current shard version 1|0||559eb76dca4787b9985d1cf7, current metadata version is 1|0||559eb76dca4787b9985d1cf7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.072-0400 m31100| 2015-07-09T14:03:26.070-0400 I SHARDING [conn38] request split points lookup for chunk db36.coll36 { : MinKey, : MinKey, : MinKey } -->> { : MaxKey, : MaxKey, : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.074-0400 m31100| 2015-07-09T14:03:26.070-0400 I SHARDING [conn39] request split points lookup for chunk db36.coll36 { : MinKey, : MinKey, : MinKey } -->> { : MaxKey, : MaxKey, : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.074-0400 setting random seed: 5556002175435 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.075-0400 m30998| 2015-07-09T14:03:26.071-0400 W SHARDING [conn227] splitChunk failed - cmd: { splitChunk: "db36.coll36", keyPattern: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: 1.0 }, min: { indexed_insert_compound_x: MinKey, indexed_insert_compound_y: MinKey, indexed_insert_compound_z: MinKey }, max: { indexed_insert_compound_x: MaxKey, indexed_insert_compound_y: MaxKey, indexed_insert_compound_z: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: """ }, { indexed_insert_compound_x: 10.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "+" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76dca4787b9985d1cf7') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db36.coll36 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.076-0400 m30999| 2015-07-09T14:03:26.071-0400 W SHARDING [conn228] splitChunk failed - cmd: { splitChunk: "db36.coll36", keyPattern: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: 1.0 }, min: { indexed_insert_compound_x: MinKey, indexed_insert_compound_y: MinKey, indexed_insert_compound_z: MinKey }, max: { indexed_insert_compound_x: MaxKey, indexed_insert_compound_y: MaxKey, indexed_insert_compound_z: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: """ }, { indexed_insert_compound_x: 10.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "+" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76dca4787b9985d1cf7') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db36.coll36 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.076-0400 m31100| 2015-07-09T14:03:26.071-0400 W SHARDING [conn15] could not acquire collection lock for db36.coll36 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db36.coll36 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.076-0400 m31100| 2015-07-09T14:03:26.071-0400 W SHARDING [conn35] could not acquire collection lock for db36.coll36 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db36.coll36 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.076-0400 setting random seed: 6613135244697 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.076-0400 m31100| 2015-07-09T14:03:26.075-0400 I SHARDING [conn32] metadata of collection db36.coll36 already up to date (shard version : 1|0||559eb76dca4787b9985d1cf7, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.076-0400 m31100| 2015-07-09T14:03:26.075-0400 I SHARDING [conn32] splitChunk accepted at version 1|0||559eb76dca4787b9985d1cf7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.077-0400 m31100| 2015-07-09T14:03:26.076-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db36.coll36", keyPattern: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: 1.0 }, min: { indexed_insert_compound_x: MinKey, indexed_insert_compound_y: MinKey, indexed_insert_compound_z: MinKey }, max: { indexed_insert_compound_x: MaxKey, indexed_insert_compound_y: MaxKey, indexed_insert_compound_z: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_compound_x: 0.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: "1" }, { indexed_insert_compound_x: 7.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "(" }, { indexed_insert_compound_x: 15.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "0" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76dca4787b9985d1cf7') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.080-0400 m31100| 2015-07-09T14:03:26.077-0400 I SHARDING [conn39] received splitChunk request: { splitChunk: "db36.coll36", keyPattern: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: 1.0 }, min: { indexed_insert_compound_x: MinKey, indexed_insert_compound_y: MinKey, indexed_insert_compound_z: MinKey }, max: { indexed_insert_compound_x: MaxKey, indexed_insert_compound_y: MaxKey, indexed_insert_compound_z: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_compound_x: 0.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: "1" }, { indexed_insert_compound_x: 7.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "(" }, { indexed_insert_compound_x: 15.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "0" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76dca4787b9985d1cf7') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.080-0400 m31100| 2015-07-09T14:03:26.077-0400 W SHARDING [conn38] could not acquire collection lock for db36.coll36 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db36.coll36 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.081-0400 m31100| 2015-07-09T14:03:26.078-0400 I SHARDING [conn15] request split points lookup for chunk db36.coll36 { : MinKey, : MinKey, : MinKey } -->> { : MaxKey, : MaxKey, : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.081-0400 m31100| 2015-07-09T14:03:26.078-0400 I SHARDING [conn40] request split points lookup for chunk db36.coll36 { : MinKey, : MinKey, : MinKey } -->> { : MaxKey, : MaxKey, : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.081-0400 m30999| 2015-07-09T14:03:26.078-0400 W SHARDING [conn231] splitChunk failed - cmd: { splitChunk: "db36.coll36", keyPattern: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: 1.0 }, min: { indexed_insert_compound_x: MinKey, indexed_insert_compound_y: MinKey, indexed_insert_compound_z: MinKey }, max: { indexed_insert_compound_x: MaxKey, indexed_insert_compound_y: MaxKey, indexed_insert_compound_z: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_compound_x: 0.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: "1" }, { indexed_insert_compound_x: 7.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "(" }, { indexed_insert_compound_x: 15.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "0" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76dca4787b9985d1cf7') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db36.coll36 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.082-0400 m31100| 2015-07-09T14:03:26.079-0400 W SHARDING [conn39] could not acquire collection lock for db36.coll36 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db36.coll36 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.082-0400 m30998| 2015-07-09T14:03:26.079-0400 W SHARDING [conn228] splitChunk failed - cmd: { splitChunk: "db36.coll36", keyPattern: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: 1.0 }, min: { indexed_insert_compound_x: MinKey, indexed_insert_compound_y: MinKey, indexed_insert_compound_z: MinKey }, max: { indexed_insert_compound_x: MaxKey, indexed_insert_compound_y: MaxKey, indexed_insert_compound_z: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_compound_x: 0.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: "1" }, { indexed_insert_compound_x: 7.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "(" }, { indexed_insert_compound_x: 15.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "0" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76dca4787b9985d1cf7') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db36.coll36 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.084-0400 m30998| 2015-07-09T14:03:26.084-0400 I SHARDING [conn231] ChunkManager: time to load chunks for db36.coll36: 2ms sequenceNumber: 44 version: 1|3||559eb76dca4787b9985d1cf7 based on: 1|0||559eb76dca4787b9985d1cf7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.087-0400 m31100| 2015-07-09T14:03:26.086-0400 I SHARDING [conn32] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:26.086-0400-559eb76e792e00bb672749a4", server: "bs-osx108-8", clientAddr: "127.0.0.1:62634", time: new Date(1436465006086), what: "multi-split", ns: "db36.coll36", details: { before: { min: { indexed_insert_compound_x: MinKey, indexed_insert_compound_y: MinKey, indexed_insert_compound_z: MinKey }, max: { indexed_insert_compound_x: MaxKey, indexed_insert_compound_y: MaxKey, indexed_insert_compound_z: MaxKey } }, number: 1, of: 3, chunk: { min: { indexed_insert_compound_x: MinKey, indexed_insert_compound_y: MinKey, indexed_insert_compound_z: MinKey }, max: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: """ }, lastmod: Timestamp 1000|1, lastmodEpoch: ObjectId('559eb76dca4787b9985d1cf7') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.087-0400 m31100| 2015-07-09T14:03:26.086-0400 I SHARDING [conn34] request split points lookup for chunk db36.coll36 { : MinKey, : MinKey, : MinKey } -->> { : MaxKey, : MaxKey, : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.088-0400 m31100| 2015-07-09T14:03:26.087-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db36.coll36", keyPattern: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: 1.0 }, min: { indexed_insert_compound_x: MinKey, indexed_insert_compound_y: MinKey, indexed_insert_compound_z: MinKey }, max: { indexed_insert_compound_x: MaxKey, indexed_insert_compound_y: MaxKey, indexed_insert_compound_z: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_compound_x: 0.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: "1" }, { indexed_insert_compound_x: 7.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "(" }, { indexed_insert_compound_x: 10.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "+" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76dca4787b9985d1cf7') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.089-0400 m31100| 2015-07-09T14:03:26.087-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db36.coll36", keyPattern: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: 1.0 }, min: { indexed_insert_compound_x: MinKey, indexed_insert_compound_y: MinKey, indexed_insert_compound_z: MinKey }, max: { indexed_insert_compound_x: MaxKey, indexed_insert_compound_y: MaxKey, indexed_insert_compound_z: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_compound_x: 0.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: "1" }, { indexed_insert_compound_x: 7.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "(" }, { indexed_insert_compound_x: 10.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "+" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76dca4787b9985d1cf7') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.089-0400 m31100| 2015-07-09T14:03:26.088-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db36.coll36", keyPattern: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: 1.0 }, min: { indexed_insert_compound_x: MinKey, indexed_insert_compound_y: MinKey, indexed_insert_compound_z: MinKey }, max: { indexed_insert_compound_x: MaxKey, indexed_insert_compound_y: MaxKey, indexed_insert_compound_z: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_compound_x: 0.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: "1" }, { indexed_insert_compound_x: 3.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: "4" }, { indexed_insert_compound_x: 8.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: ")" }, { indexed_insert_compound_x: 14.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "/" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76dca4787b9985d1cf7') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.090-0400 m31100| 2015-07-09T14:03:26.089-0400 W SHARDING [conn15] could not acquire collection lock for db36.coll36 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db36.coll36 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.091-0400 m30999| 2015-07-09T14:03:26.090-0400 W SHARDING [conn236] splitChunk failed - cmd: { splitChunk: "db36.coll36", keyPattern: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: 1.0 }, min: { indexed_insert_compound_x: MinKey, indexed_insert_compound_y: MinKey, indexed_insert_compound_z: MinKey }, max: { indexed_insert_compound_x: MaxKey, indexed_insert_compound_y: MaxKey, indexed_insert_compound_z: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_compound_x: 0.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: "1" }, { indexed_insert_compound_x: 7.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "(" }, { indexed_insert_compound_x: 10.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "+" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76dca4787b9985d1cf7') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db36.coll36 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.091-0400 m31100| 2015-07-09T14:03:26.090-0400 W SHARDING [conn34] could not acquire collection lock for db36.coll36 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db36.coll36 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.092-0400 m30999| 2015-07-09T14:03:26.090-0400 W SHARDING [conn232] splitChunk failed - cmd: { splitChunk: "db36.coll36", keyPattern: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: 1.0 }, min: { indexed_insert_compound_x: MinKey, indexed_insert_compound_y: MinKey, indexed_insert_compound_z: MinKey }, max: { indexed_insert_compound_x: MaxKey, indexed_insert_compound_y: MaxKey, indexed_insert_compound_z: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_compound_x: 0.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: "1" }, { indexed_insert_compound_x: 3.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: "4" }, { indexed_insert_compound_x: 8.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: ")" }, { indexed_insert_compound_x: 14.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "/" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76dca4787b9985d1cf7') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db36.coll36 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.092-0400 m31100| 2015-07-09T14:03:26.091-0400 W SHARDING [conn40] could not acquire collection lock for db36.coll36 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db36.coll36 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.092-0400 m30999| 2015-07-09T14:03:26.091-0400 W SHARDING [conn235] splitChunk failed - cmd: { splitChunk: "db36.coll36", keyPattern: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: 1.0 }, min: { indexed_insert_compound_x: MinKey, indexed_insert_compound_y: MinKey, indexed_insert_compound_z: MinKey }, max: { indexed_insert_compound_x: MaxKey, indexed_insert_compound_y: MaxKey, indexed_insert_compound_z: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_compound_x: 0.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: "1" }, { indexed_insert_compound_x: 7.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "(" }, { indexed_insert_compound_x: 10.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "+" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76dca4787b9985d1cf7') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db36.coll36 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.096-0400 setting random seed: 6964109381660 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.098-0400 m31100| 2015-07-09T14:03:26.098-0400 I SHARDING [conn40] request split points lookup for chunk db36.coll36 { : MinKey, : MinKey, : MinKey } -->> { : MaxKey, : MaxKey, : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.100-0400 m31100| 2015-07-09T14:03:26.098-0400 I SHARDING [conn15] request split points lookup for chunk db36.coll36 { : MinKey, : MinKey, : MinKey } -->> { : MaxKey, : MaxKey, : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.100-0400 m31100| 2015-07-09T14:03:26.098-0400 I SHARDING [conn34] request split points lookup for chunk db36.coll36 { : MinKey, : MinKey, : MinKey } -->> { : MaxKey, : MaxKey, : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.101-0400 m31100| 2015-07-09T14:03:26.099-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db36.coll36", keyPattern: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: 1.0 }, min: { indexed_insert_compound_x: MinKey, indexed_insert_compound_y: MinKey, indexed_insert_compound_z: MinKey }, max: { indexed_insert_compound_x: MaxKey, indexed_insert_compound_y: MaxKey, indexed_insert_compound_z: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_compound_x: 0.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "!" }, { indexed_insert_compound_x: 2.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "#" }, { indexed_insert_compound_x: 6.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "'" }, { indexed_insert_compound_x: 10.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "+" }, { indexed_insert_compound_x: 13.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "." } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76dca4787b9985d1cf7') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.102-0400 m31100| 2015-07-09T14:03:26.099-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db36.coll36", keyPattern: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: 1.0 }, min: { indexed_insert_compound_x: MinKey, indexed_insert_compound_y: MinKey, indexed_insert_compound_z: MinKey }, max: { indexed_insert_compound_x: MaxKey, indexed_insert_compound_y: MaxKey, indexed_insert_compound_z: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_compound_x: 0.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "!" }, { indexed_insert_compound_x: 2.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "#" }, { indexed_insert_compound_x: 6.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "'" }, { indexed_insert_compound_x: 10.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "+" }, { indexed_insert_compound_x: 13.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "." } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76dca4787b9985d1cf7') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.103-0400 m31100| 2015-07-09T14:03:26.099-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db36.coll36", keyPattern: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: 1.0 }, min: { indexed_insert_compound_x: MinKey, indexed_insert_compound_y: MinKey, indexed_insert_compound_z: MinKey }, max: { indexed_insert_compound_x: MaxKey, indexed_insert_compound_y: MaxKey, indexed_insert_compound_z: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_compound_x: 0.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "!" }, { indexed_insert_compound_x: 2.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "#" }, { indexed_insert_compound_x: 6.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "'" }, { indexed_insert_compound_x: 10.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "+" }, { indexed_insert_compound_x: 13.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "." } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76dca4787b9985d1cf7') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.103-0400 m31100| 2015-07-09T14:03:26.100-0400 W SHARDING [conn40] could not acquire collection lock for db36.coll36 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db36.coll36 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.105-0400 m30999| 2015-07-09T14:03:26.101-0400 W SHARDING [conn230] splitChunk failed - cmd: { splitChunk: "db36.coll36", keyPattern: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: 1.0 }, min: { indexed_insert_compound_x: MinKey, indexed_insert_compound_y: MinKey, indexed_insert_compound_z: MinKey }, max: { indexed_insert_compound_x: MaxKey, indexed_insert_compound_y: MaxKey, indexed_insert_compound_z: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_compound_x: 0.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "!" }, { indexed_insert_compound_x: 2.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "#" }, { indexed_insert_compound_x: 6.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "'" }, { indexed_insert_compound_x: 10.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "+" }, { indexed_insert_compound_x: 13.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "." } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76dca4787b9985d1cf7') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db36.coll36 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.108-0400 m30999| 2015-07-09T14:03:26.101-0400 W SHARDING [conn231] splitChunk failed - cmd: { splitChunk: "db36.coll36", keyPattern: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: 1.0 }, min: { indexed_insert_compound_x: MinKey, indexed_insert_compound_y: MinKey, indexed_insert_compound_z: MinKey }, max: { indexed_insert_compound_x: MaxKey, indexed_insert_compound_y: MaxKey, indexed_insert_compound_z: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_compound_x: 0.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "!" }, { indexed_insert_compound_x: 2.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "#" }, { indexed_insert_compound_x: 6.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "'" }, { indexed_insert_compound_x: 10.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "+" }, { indexed_insert_compound_x: 13.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "." } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76dca4787b9985d1cf7') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db36.coll36 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.109-0400 m31100| 2015-07-09T14:03:26.101-0400 W SHARDING [conn15] could not acquire collection lock for db36.coll36 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db36.coll36 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.109-0400 m31100| 2015-07-09T14:03:26.101-0400 W SHARDING [conn34] could not acquire collection lock for db36.coll36 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db36.coll36 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.110-0400 m30999| 2015-07-09T14:03:26.102-0400 W SHARDING [conn229] splitChunk failed - cmd: { splitChunk: "db36.coll36", keyPattern: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: 1.0 }, min: { indexed_insert_compound_x: MinKey, indexed_insert_compound_y: MinKey, indexed_insert_compound_z: MinKey }, max: { indexed_insert_compound_x: MaxKey, indexed_insert_compound_y: MaxKey, indexed_insert_compound_z: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_compound_x: 0.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "!" }, { indexed_insert_compound_x: 2.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "#" }, { indexed_insert_compound_x: 6.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "'" }, { indexed_insert_compound_x: 10.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "+" }, { indexed_insert_compound_x: 13.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "." } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76dca4787b9985d1cf7') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db36.coll36 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.110-0400 setting random seed: 2279214919544 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.112-0400 setting random seed: 7952374364249 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.116-0400 m31100| 2015-07-09T14:03:26.115-0400 I SHARDING [conn34] request split points lookup for chunk db36.coll36 { : MinKey, : MinKey, : MinKey } -->> { : MaxKey, : MaxKey, : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.119-0400 m31100| 2015-07-09T14:03:26.116-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db36.coll36", keyPattern: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: 1.0 }, min: { indexed_insert_compound_x: MinKey, indexed_insert_compound_y: MinKey, indexed_insert_compound_z: MinKey }, max: { indexed_insert_compound_x: MaxKey, indexed_insert_compound_y: MaxKey, indexed_insert_compound_z: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_compound_x: 0.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "!" }, { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: "2" }, { indexed_insert_compound_x: 3.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "$" }, { indexed_insert_compound_x: 7.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "(" }, { indexed_insert_compound_x: 8.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: ")" }, { indexed_insert_compound_x: 11.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "," }, { indexed_insert_compound_x: 15.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "0" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76dca4787b9985d1cf7') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.120-0400 m31100| 2015-07-09T14:03:26.117-0400 W SHARDING [conn34] could not acquire collection lock for db36.coll36 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db36.coll36 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.124-0400 m30999| 2015-07-09T14:03:26.117-0400 W SHARDING [conn228] splitChunk failed - cmd: { splitChunk: "db36.coll36", keyPattern: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: 1.0 }, min: { indexed_insert_compound_x: MinKey, indexed_insert_compound_y: MinKey, indexed_insert_compound_z: MinKey }, max: { indexed_insert_compound_x: MaxKey, indexed_insert_compound_y: MaxKey, indexed_insert_compound_z: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_compound_x: 0.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "!" }, { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: "2" }, { indexed_insert_compound_x: 3.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "$" }, { indexed_insert_compound_x: 7.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "(" }, { indexed_insert_compound_x: 8.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: ")" }, { indexed_insert_compound_x: 11.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "," }, { indexed_insert_compound_x: 15.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "0" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76dca4787b9985d1cf7') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db36.coll36 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.124-0400 m31100| 2015-07-09T14:03:26.120-0400 I SHARDING [conn34] request split points lookup for chunk db36.coll36 { : MinKey, : MinKey, : MinKey } -->> { : MaxKey, : MaxKey, : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.125-0400 m31100| 2015-07-09T14:03:26.121-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db36.coll36", keyPattern: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: 1.0 }, min: { indexed_insert_compound_x: MinKey, indexed_insert_compound_y: MinKey, indexed_insert_compound_z: MinKey }, max: { indexed_insert_compound_x: MaxKey, indexed_insert_compound_y: MaxKey, indexed_insert_compound_z: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_compound_x: 0.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "!" }, { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: """ }, { indexed_insert_compound_x: 2.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "#" }, { indexed_insert_compound_x: 5.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "&" }, { indexed_insert_compound_x: 7.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "(" }, { indexed_insert_compound_x: 10.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "+" }, { indexed_insert_compound_x: 13.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "." } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76dca4787b9985d1cf7') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.129-0400 m31100| 2015-07-09T14:03:26.122-0400 I SHARDING [conn15] request split points lookup for chunk db36.coll36 { : MinKey, : MinKey, : MinKey } -->> { : MaxKey, : MaxKey, : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.130-0400 m31100| 2015-07-09T14:03:26.122-0400 W SHARDING [conn34] could not acquire collection lock for db36.coll36 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db36.coll36 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.132-0400 m30999| 2015-07-09T14:03:26.123-0400 W SHARDING [conn230] splitChunk failed - cmd: { splitChunk: "db36.coll36", keyPattern: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: 1.0 }, min: { indexed_insert_compound_x: MinKey, indexed_insert_compound_y: MinKey, indexed_insert_compound_z: MinKey }, max: { indexed_insert_compound_x: MaxKey, indexed_insert_compound_y: MaxKey, indexed_insert_compound_z: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_compound_x: 0.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "!" }, { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: """ }, { indexed_insert_compound_x: 2.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "#" }, { indexed_insert_compound_x: 5.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "&" }, { indexed_insert_compound_x: 7.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "(" }, { indexed_insert_compound_x: 10.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "+" }, { indexed_insert_compound_x: 13.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "." } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76dca4787b9985d1cf7') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db36.coll36 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.133-0400 m31100| 2015-07-09T14:03:26.123-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db36.coll36", keyPattern: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: 1.0 }, min: { indexed_insert_compound_x: MinKey, indexed_insert_compound_y: MinKey, indexed_insert_compound_z: MinKey }, max: { indexed_insert_compound_x: MaxKey, indexed_insert_compound_y: MaxKey, indexed_insert_compound_z: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_compound_x: 0.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "!" }, { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: """ }, { indexed_insert_compound_x: 2.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "#" }, { indexed_insert_compound_x: 4.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "%" }, { indexed_insert_compound_x: 7.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "(" }, { indexed_insert_compound_x: 10.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "+" }, { indexed_insert_compound_x: 12.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "-" }, { indexed_insert_compound_x: 14.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "/" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76dca4787b9985d1cf7') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.133-0400 m31100| 2015-07-09T14:03:26.124-0400 W SHARDING [conn15] could not acquire collection lock for db36.coll36 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db36.coll36 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.134-0400 m30999| 2015-07-09T14:03:26.124-0400 W SHARDING [conn227] splitChunk failed - cmd: { splitChunk: "db36.coll36", keyPattern: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: 1.0 }, min: { indexed_insert_compound_x: MinKey, indexed_insert_compound_y: MinKey, indexed_insert_compound_z: MinKey }, max: { indexed_insert_compound_x: MaxKey, indexed_insert_compound_y: MaxKey, indexed_insert_compound_z: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_compound_x: 0.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "!" }, { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: """ }, { indexed_insert_compound_x: 2.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "#" }, { indexed_insert_compound_x: 4.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "%" }, { indexed_insert_compound_x: 7.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "(" }, { indexed_insert_compound_x: 10.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "+" }, { indexed_insert_compound_x: 12.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "-" }, { indexed_insert_compound_x: 14.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "/" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76dca4787b9985d1cf7') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db36.coll36 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.134-0400 m31100| 2015-07-09T14:03:26.126-0400 I SHARDING [conn15] request split points lookup for chunk db36.coll36 { : MinKey, : MinKey, : MinKey } -->> { : MaxKey, : MaxKey, : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.134-0400 m31100| 2015-07-09T14:03:26.126-0400 I SHARDING [conn34] request split points lookup for chunk db36.coll36 { : MinKey, : MinKey, : MinKey } -->> { : MaxKey, : MaxKey, : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.135-0400 m31100| 2015-07-09T14:03:26.127-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db36.coll36", keyPattern: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: 1.0 }, min: { indexed_insert_compound_x: MinKey, indexed_insert_compound_y: MinKey, indexed_insert_compound_z: MinKey }, max: { indexed_insert_compound_x: MaxKey, indexed_insert_compound_y: MaxKey, indexed_insert_compound_z: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_compound_x: 0.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "!" }, { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: """ }, { indexed_insert_compound_x: 2.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "#" }, { indexed_insert_compound_x: 3.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: "4" }, { indexed_insert_compound_x: 6.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "'" }, { indexed_insert_compound_x: 7.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "(" }, { indexed_insert_compound_x: 10.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "+" }, { indexed_insert_compound_x: 12.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "-" }, { indexed_insert_compound_x: 14.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "/" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76dca4787b9985d1cf7') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.136-0400 m31100| 2015-07-09T14:03:26.127-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db36.coll36", keyPattern: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: 1.0 }, min: { indexed_insert_compound_x: MinKey, indexed_insert_compound_y: MinKey, indexed_insert_compound_z: MinKey }, max: { indexed_insert_compound_x: MaxKey, indexed_insert_compound_y: MaxKey, indexed_insert_compound_z: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_compound_x: 0.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "!" }, { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: """ }, { indexed_insert_compound_x: 2.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "#" }, { indexed_insert_compound_x: 3.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: "4" }, { indexed_insert_compound_x: 6.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "'" }, { indexed_insert_compound_x: 7.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "(" }, { indexed_insert_compound_x: 10.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "+" }, { indexed_insert_compound_x: 12.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "-" }, { indexed_insert_compound_x: 14.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "/" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76dca4787b9985d1cf7') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.136-0400 m31100| 2015-07-09T14:03:26.128-0400 W SHARDING [conn34] could not acquire collection lock for db36.coll36 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db36.coll36 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.140-0400 m30999| 2015-07-09T14:03:26.128-0400 W SHARDING [conn232] splitChunk failed - cmd: { splitChunk: "db36.coll36", keyPattern: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: 1.0 }, min: { indexed_insert_compound_x: MinKey, indexed_insert_compound_y: MinKey, indexed_insert_compound_z: MinKey }, max: { indexed_insert_compound_x: MaxKey, indexed_insert_compound_y: MaxKey, indexed_insert_compound_z: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_compound_x: 0.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "!" }, { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: """ }, { indexed_insert_compound_x: 2.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "#" }, { indexed_insert_compound_x: 3.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: "4" }, { indexed_insert_compound_x: 6.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "'" }, { indexed_insert_compound_x: 7.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "(" }, { indexed_insert_compound_x: 10.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "+" }, { indexed_insert_compound_x: 12.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "-" }, { indexed_insert_compound_x: 14.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "/" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76dca4787b9985d1cf7') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db36.coll36 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.140-0400 m31100| 2015-07-09T14:03:26.130-0400 W SHARDING [conn15] could not acquire collection lock for db36.coll36 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db36.coll36 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.143-0400 m30999| 2015-07-09T14:03:26.130-0400 W SHARDING [conn229] splitChunk failed - cmd: { splitChunk: "db36.coll36", keyPattern: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: 1.0 }, min: { indexed_insert_compound_x: MinKey, indexed_insert_compound_y: MinKey, indexed_insert_compound_z: MinKey }, max: { indexed_insert_compound_x: MaxKey, indexed_insert_compound_y: MaxKey, indexed_insert_compound_z: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_compound_x: 0.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "!" }, { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: """ }, { indexed_insert_compound_x: 2.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "#" }, { indexed_insert_compound_x: 3.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: "4" }, { indexed_insert_compound_x: 6.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "'" }, { indexed_insert_compound_x: 7.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "(" }, { indexed_insert_compound_x: 10.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "+" }, { indexed_insert_compound_x: 12.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "-" }, { indexed_insert_compound_x: 14.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "/" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76dca4787b9985d1cf7') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db36.coll36 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.144-0400 m31100| 2015-07-09T14:03:26.138-0400 I SHARDING [conn32] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:26.138-0400-559eb76e792e00bb672749a5", server: "bs-osx108-8", clientAddr: "127.0.0.1:62634", time: new Date(1436465006138), what: "multi-split", ns: "db36.coll36", details: { before: { min: { indexed_insert_compound_x: MinKey, indexed_insert_compound_y: MinKey, indexed_insert_compound_z: MinKey }, max: { indexed_insert_compound_x: MaxKey, indexed_insert_compound_y: MaxKey, indexed_insert_compound_z: MaxKey } }, number: 2, of: 3, chunk: { min: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: """ }, max: { indexed_insert_compound_x: 15.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "0" }, lastmod: Timestamp 1000|2, lastmodEpoch: ObjectId('559eb76dca4787b9985d1cf7') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.144-0400 m31100| 2015-07-09T14:03:26.139-0400 I SHARDING [conn15] request split points lookup for chunk db36.coll36 { : MinKey, : MinKey, : MinKey } -->> { : MaxKey, : MaxKey, : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.144-0400 m31100| 2015-07-09T14:03:26.139-0400 I SHARDING [conn34] request split points lookup for chunk db36.coll36 { : MinKey, : MinKey, : MinKey } -->> { : MaxKey, : MaxKey, : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.146-0400 m31100| 2015-07-09T14:03:26.139-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db36.coll36", keyPattern: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: 1.0 }, min: { indexed_insert_compound_x: MinKey, indexed_insert_compound_y: MinKey, indexed_insert_compound_z: MinKey }, max: { indexed_insert_compound_x: MaxKey, indexed_insert_compound_y: MaxKey, indexed_insert_compound_z: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_compound_x: 0.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "!" }, { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: """ }, { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: "2" }, { indexed_insert_compound_x: 3.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "$" }, { indexed_insert_compound_x: 4.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "%" }, { indexed_insert_compound_x: 6.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "'" }, { indexed_insert_compound_x: 7.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "(" }, { indexed_insert_compound_x: 10.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "+" }, { indexed_insert_compound_x: 11.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "," }, { indexed_insert_compound_x: 13.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "." } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76dca4787b9985d1cf7') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.148-0400 m31100| 2015-07-09T14:03:26.141-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db36.coll36", keyPattern: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: 1.0 }, min: { indexed_insert_compound_x: MinKey, indexed_insert_compound_y: MinKey, indexed_insert_compound_z: MinKey }, max: { indexed_insert_compound_x: MaxKey, indexed_insert_compound_y: MaxKey, indexed_insert_compound_z: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_compound_x: 0.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "!" }, { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: """ }, { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: "2" }, { indexed_insert_compound_x: 3.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "$" }, { indexed_insert_compound_x: 4.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "%" }, { indexed_insert_compound_x: 6.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "'" }, { indexed_insert_compound_x: 7.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "(" }, { indexed_insert_compound_x: 10.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "+" }, { indexed_insert_compound_x: 11.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "," }, { indexed_insert_compound_x: 13.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "." } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76dca4787b9985d1cf7') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.148-0400 m31100| 2015-07-09T14:03:26.141-0400 I SHARDING [conn40] request split points lookup for chunk db36.coll36 { : MinKey, : MinKey, : MinKey } -->> { : MaxKey, : MaxKey, : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.150-0400 m31100| 2015-07-09T14:03:26.142-0400 W SHARDING [conn15] could not acquire collection lock for db36.coll36 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db36.coll36 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.152-0400 m30999| 2015-07-09T14:03:26.142-0400 W SHARDING [conn228] splitChunk failed - cmd: { splitChunk: "db36.coll36", keyPattern: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: 1.0 }, min: { indexed_insert_compound_x: MinKey, indexed_insert_compound_y: MinKey, indexed_insert_compound_z: MinKey }, max: { indexed_insert_compound_x: MaxKey, indexed_insert_compound_y: MaxKey, indexed_insert_compound_z: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_compound_x: 0.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "!" }, { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: """ }, { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: "2" }, { indexed_insert_compound_x: 3.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "$" }, { indexed_insert_compound_x: 4.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "%" }, { indexed_insert_compound_x: 6.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "'" }, { indexed_insert_compound_x: 7.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "(" }, { indexed_insert_compound_x: 10.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "+" }, { indexed_insert_compound_x: 11.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "," }, { indexed_insert_compound_x: 13.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "." } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76dca4787b9985d1cf7') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db36.coll36 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.154-0400 m31100| 2015-07-09T14:03:26.142-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db36.coll36", keyPattern: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: 1.0 }, min: { indexed_insert_compound_x: MinKey, indexed_insert_compound_y: MinKey, indexed_insert_compound_z: MinKey }, max: { indexed_insert_compound_x: MaxKey, indexed_insert_compound_y: MaxKey, indexed_insert_compound_z: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_compound_x: 0.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "!" }, { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: """ }, { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: "2" }, { indexed_insert_compound_x: 2.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: "3" }, { indexed_insert_compound_x: 4.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "%" }, { indexed_insert_compound_x: 6.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "'" }, { indexed_insert_compound_x: 7.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "(" }, { indexed_insert_compound_x: 10.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "+" }, { indexed_insert_compound_x: 11.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "," }, { indexed_insert_compound_x: 13.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "." }, { indexed_insert_compound_x: 15.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "0" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76dca4787b9985d1cf7') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.154-0400 m31100| 2015-07-09T14:03:26.143-0400 W SHARDING [conn34] could not acquire collection lock for db36.coll36 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db36.coll36 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.157-0400 m30999| 2015-07-09T14:03:26.143-0400 W SHARDING [conn227] splitChunk failed - cmd: { splitChunk: "db36.coll36", keyPattern: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: 1.0 }, min: { indexed_insert_compound_x: MinKey, indexed_insert_compound_y: MinKey, indexed_insert_compound_z: MinKey }, max: { indexed_insert_compound_x: MaxKey, indexed_insert_compound_y: MaxKey, indexed_insert_compound_z: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_compound_x: 0.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "!" }, { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: """ }, { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: "2" }, { indexed_insert_compound_x: 3.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "$" }, { indexed_insert_compound_x: 4.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "%" }, { indexed_insert_compound_x: 6.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "'" }, { indexed_insert_compound_x: 7.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "(" }, { indexed_insert_compound_x: 10.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "+" }, { indexed_insert_compound_x: 11.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "," }, { indexed_insert_compound_x: 13.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "." } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76dca4787b9985d1cf7') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db36.coll36 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.157-0400 m31100| 2015-07-09T14:03:26.144-0400 W SHARDING [conn40] could not acquire collection lock for db36.coll36 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db36.coll36 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.160-0400 m30999| 2015-07-09T14:03:26.145-0400 W SHARDING [conn234] splitChunk failed - cmd: { splitChunk: "db36.coll36", keyPattern: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: 1.0 }, min: { indexed_insert_compound_x: MinKey, indexed_insert_compound_y: MinKey, indexed_insert_compound_z: MinKey }, max: { indexed_insert_compound_x: MaxKey, indexed_insert_compound_y: MaxKey, indexed_insert_compound_z: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_compound_x: 0.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "!" }, { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: """ }, { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: "2" }, { indexed_insert_compound_x: 2.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: "3" }, { indexed_insert_compound_x: 4.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "%" }, { indexed_insert_compound_x: 6.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "'" }, { indexed_insert_compound_x: 7.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "(" }, { indexed_insert_compound_x: 10.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "+" }, { indexed_insert_compound_x: 11.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "," }, { indexed_insert_compound_x: 13.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "." }, { indexed_insert_compound_x: 15.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "0" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76dca4787b9985d1cf7') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db36.coll36 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.183-0400 m31100| 2015-07-09T14:03:26.148-0400 I SHARDING [conn40] request split points lookup for chunk db36.coll36 { : MinKey, : MinKey, : MinKey } -->> { : MaxKey, : MaxKey, : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.188-0400 m31100| 2015-07-09T14:03:26.149-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db36.coll36", keyPattern: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: 1.0 }, min: { indexed_insert_compound_x: MinKey, indexed_insert_compound_y: MinKey, indexed_insert_compound_z: MinKey }, max: { indexed_insert_compound_x: MaxKey, indexed_insert_compound_y: MaxKey, indexed_insert_compound_z: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_compound_x: 0.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "!" }, { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: """ }, { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: "2" }, { indexed_insert_compound_x: 2.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "#" }, { indexed_insert_compound_x: 3.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "$" }, { indexed_insert_compound_x: 4.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "%" }, { indexed_insert_compound_x: 7.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "(" }, { indexed_insert_compound_x: 8.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: ")" }, { indexed_insert_compound_x: 10.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "+" }, { indexed_insert_compound_x: 11.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "," }, { indexed_insert_compound_x: 12.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "-" }, { indexed_insert_compound_x: 14.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "/" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76dca4787b9985d1cf7') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.188-0400 m31100| 2015-07-09T14:03:26.151-0400 W SHARDING [conn40] could not acquire collection lock for db36.coll36 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db36.coll36 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.188-0400 m31100| 2015-07-09T14:03:26.151-0400 I SHARDING [conn34] request split points lookup for chunk db36.coll36 { : MinKey, : MinKey, : MinKey } -->> { : MaxKey, : MaxKey, : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.189-0400 m30999| 2015-07-09T14:03:26.151-0400 W SHARDING [conn230] splitChunk failed - cmd: { splitChunk: "db36.coll36", keyPattern: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: 1.0 }, min: { indexed_insert_compound_x: MinKey, indexed_insert_compound_y: MinKey, indexed_insert_compound_z: MinKey }, max: { indexed_insert_compound_x: MaxKey, indexed_insert_compound_y: MaxKey, indexed_insert_compound_z: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_compound_x: 0.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "!" }, { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: """ }, { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: "2" }, { indexed_insert_compound_x: 2.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "#" }, { indexed_insert_compound_x: 3.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "$" }, { indexed_insert_compound_x: 4.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "%" }, { indexed_insert_compound_x: 7.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "(" }, { indexed_insert_compound_x: 8.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: ")" }, { indexed_insert_compound_x: 10.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "+" }, { indexed_insert_compound_x: 11.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "," }, { indexed_insert_compound_x: 12.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "-" }, { indexed_insert_compound_x: 14.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "/" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76dca4787b9985d1cf7') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db36.coll36 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.190-0400 m31100| 2015-07-09T14:03:26.152-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db36.coll36", keyPattern: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: 1.0 }, min: { indexed_insert_compound_x: MinKey, indexed_insert_compound_y: MinKey, indexed_insert_compound_z: MinKey }, max: { indexed_insert_compound_x: MaxKey, indexed_insert_compound_y: MaxKey, indexed_insert_compound_z: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_compound_x: 0.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "!" }, { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: """ }, { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: "2" }, { indexed_insert_compound_x: 2.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "#" }, { indexed_insert_compound_x: 3.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "$" }, { indexed_insert_compound_x: 4.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "%" }, { indexed_insert_compound_x: 6.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "'" }, { indexed_insert_compound_x: 7.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "(" }, { indexed_insert_compound_x: 9.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "*" }, { indexed_insert_compound_x: 10.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "+" }, { indexed_insert_compound_x: 12.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "-" }, { indexed_insert_compound_x: 14.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "/" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76dca4787b9985d1cf7') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.191-0400 m30999| 2015-07-09T14:03:26.154-0400 W SHARDING [conn235] splitChunk failed - cmd: { splitChunk: "db36.coll36", keyPattern: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: 1.0 }, min: { indexed_insert_compound_x: MinKey, indexed_insert_compound_y: MinKey, indexed_insert_compound_z: MinKey }, max: { indexed_insert_compound_x: MaxKey, indexed_insert_compound_y: MaxKey, indexed_insert_compound_z: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_compound_x: 0.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "!" }, { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: """ }, { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: "2" }, { indexed_insert_compound_x: 2.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "#" }, { indexed_insert_compound_x: 3.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "$" }, { indexed_insert_compound_x: 4.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "%" }, { indexed_insert_compound_x: 6.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "'" }, { indexed_insert_compound_x: 7.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "(" }, { indexed_insert_compound_x: 9.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "*" }, { indexed_insert_compound_x: 10.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "+" }, { indexed_insert_compound_x: 12.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "-" }, { indexed_insert_compound_x: 14.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "/" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76dca4787b9985d1cf7') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db36.coll36 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.191-0400 m31100| 2015-07-09T14:03:26.153-0400 W SHARDING [conn34] could not acquire collection lock for db36.coll36 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db36.coll36 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.192-0400 m31100| 2015-07-09T14:03:26.158-0400 I SHARDING [conn34] request split points lookup for chunk db36.coll36 { : MinKey, : MinKey, : MinKey } -->> { : MaxKey, : MaxKey, : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.200-0400 m31100| 2015-07-09T14:03:26.160-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db36.coll36", keyPattern: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: 1.0 }, min: { indexed_insert_compound_x: MinKey, indexed_insert_compound_y: MinKey, indexed_insert_compound_z: MinKey }, max: { indexed_insert_compound_x: MaxKey, indexed_insert_compound_y: MaxKey, indexed_insert_compound_z: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_compound_x: 0.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "!" }, { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: """ }, { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: "2" }, { indexed_insert_compound_x: 2.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "#" }, { indexed_insert_compound_x: 3.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "$" }, { indexed_insert_compound_x: 4.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "%" }, { indexed_insert_compound_x: 5.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "&" }, { indexed_insert_compound_x: 7.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "(" }, { indexed_insert_compound_x: 8.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: ")" }, { indexed_insert_compound_x: 10.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "+" }, { indexed_insert_compound_x: 11.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "," }, { indexed_insert_compound_x: 12.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "-" }, { indexed_insert_compound_x: 14.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "/" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76dca4787b9985d1cf7') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.200-0400 m31100| 2015-07-09T14:03:26.162-0400 W SHARDING [conn34] could not acquire collection lock for db36.coll36 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db36.coll36 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.205-0400 m30999| 2015-07-09T14:03:26.162-0400 W SHARDING [conn228] splitChunk failed - cmd: { splitChunk: "db36.coll36", keyPattern: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: 1.0 }, min: { indexed_insert_compound_x: MinKey, indexed_insert_compound_y: MinKey, indexed_insert_compound_z: MinKey }, max: { indexed_insert_compound_x: MaxKey, indexed_insert_compound_y: MaxKey, indexed_insert_compound_z: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_compound_x: 0.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "!" }, { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: """ }, { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: "2" }, { indexed_insert_compound_x: 2.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "#" }, { indexed_insert_compound_x: 3.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "$" }, { indexed_insert_compound_x: 4.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "%" }, { indexed_insert_compound_x: 5.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "&" }, { indexed_insert_compound_x: 7.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "(" }, { indexed_insert_compound_x: 8.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: ")" }, { indexed_insert_compound_x: 10.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "+" }, { indexed_insert_compound_x: 11.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "," }, { indexed_insert_compound_x: 12.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "-" }, { indexed_insert_compound_x: 14.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "/" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76dca4787b9985d1cf7') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db36.coll36 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.205-0400 m30999| 2015-07-09T14:03:26.165-0400 I SHARDING [conn227] ChunkManager: time to load chunks for db36.coll36: 0ms sequenceNumber: 162 version: 1|3||559eb76dca4787b9985d1cf7 based on: 1|0||559eb76dca4787b9985d1cf7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.206-0400 m31100| 2015-07-09T14:03:26.192-0400 I SHARDING [conn32] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:26.192-0400-559eb76e792e00bb672749a6", server: "bs-osx108-8", clientAddr: "127.0.0.1:62634", time: new Date(1436465006192), what: "multi-split", ns: "db36.coll36", details: { before: { min: { indexed_insert_compound_x: MinKey, indexed_insert_compound_y: MinKey, indexed_insert_compound_z: MinKey }, max: { indexed_insert_compound_x: MaxKey, indexed_insert_compound_y: MaxKey, indexed_insert_compound_z: MaxKey } }, number: 3, of: 3, chunk: { min: { indexed_insert_compound_x: 15.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "0" }, max: { indexed_insert_compound_x: MaxKey, indexed_insert_compound_y: MaxKey, indexed_insert_compound_z: MaxKey }, lastmod: Timestamp 1000|3, lastmodEpoch: ObjectId('559eb76dca4787b9985d1cf7') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.292-0400 m31100| 2015-07-09T14:03:26.246-0400 I SHARDING [conn32] distributed lock 'db36.coll36/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.293-0400 m31100| 2015-07-09T14:03:26.246-0400 I COMMAND [conn32] command db36.coll36 command: splitChunk { splitChunk: "db36.coll36", keyPattern: { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 1.0, indexed_insert_compound_z: 1.0 }, min: { indexed_insert_compound_x: MinKey, indexed_insert_compound_y: MinKey, indexed_insert_compound_z: MinKey }, max: { indexed_insert_compound_x: MaxKey, indexed_insert_compound_y: MaxKey, indexed_insert_compound_z: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_compound_x: 1.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: """ }, { indexed_insert_compound_x: 15.0, indexed_insert_compound_y: 0.0, indexed_insert_compound_z: "0" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76dca4787b9985d1cf7') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:306 locks:{ Global: { acquireCount: { r: 4, w: 2 } }, Database: { acquireCount: { r: 1, w: 2 } }, Collection: { acquireCount: { r: 1, W: 2 }, acquireWaitCount: { W: 2 }, timeAcquiringMicros: { W: 13966 } } } protocol:op_command 181ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.293-0400 m30998| 2015-07-09T14:03:26.247-0400 I SHARDING [conn233] autosplitted db36.coll36 shard: ns: db36.coll36, shard: test-rs0, lastmod: 1|0||000000000000000000000000, min: { indexed_insert_compound_x: MinKey, indexed_insert_compound_y: MinKey, indexed_insert_compound_z: MinKey }, max: { indexed_insert_compound_x: MaxKey, indexed_insert_compound_y: MaxKey, indexed_insert_compound_z: MaxKey } into 3 (splitThreshold 921) (migrate suggested, but no migrations allowed) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.491-0400 m30998| 2015-07-09T14:03:26.489-0400 I NETWORK [conn227] end connection 127.0.0.1:63400 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.498-0400 m30999| 2015-07-09T14:03:26.498-0400 I NETWORK [conn227] end connection 127.0.0.1:63402 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.515-0400 m30999| 2015-07-09T14:03:26.514-0400 I NETWORK [conn232] end connection 127.0.0.1:63409 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.516-0400 m30998| 2015-07-09T14:03:26.516-0400 I NETWORK [conn228] end connection 127.0.0.1:63401 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.522-0400 m30999| 2015-07-09T14:03:26.522-0400 I NETWORK [conn228] end connection 127.0.0.1:63403 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.551-0400 m30998| 2015-07-09T14:03:26.548-0400 I NETWORK [conn232] end connection 127.0.0.1:63411 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.566-0400 m30999| 2015-07-09T14:03:26.565-0400 I NETWORK [conn230] end connection 127.0.0.1:63407 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.573-0400 m30999| 2015-07-09T14:03:26.570-0400 I NETWORK [conn236] end connection 127.0.0.1:63419 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.591-0400 m30999| 2015-07-09T14:03:26.588-0400 I NETWORK [conn235] end connection 127.0.0.1:63418 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.594-0400 m30998| 2015-07-09T14:03:26.594-0400 I NETWORK [conn236] end connection 127.0.0.1:63417 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.607-0400 m30999| 2015-07-09T14:03:26.607-0400 I NETWORK [conn233] end connection 127.0.0.1:63413 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.608-0400 m30999| 2015-07-09T14:03:26.607-0400 I NETWORK [conn231] end connection 127.0.0.1:63408 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.618-0400 m30999| 2015-07-09T14:03:26.617-0400 I NETWORK [conn229] end connection 127.0.0.1:63406 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.624-0400 m30998| 2015-07-09T14:03:26.623-0400 I NETWORK [conn229] end connection 127.0.0.1:63404 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.629-0400 m30998| 2015-07-09T14:03:26.628-0400 I NETWORK [conn231] end connection 127.0.0.1:63410 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.638-0400 m30998| 2015-07-09T14:03:26.634-0400 I NETWORK [conn234] end connection 127.0.0.1:63414 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.666-0400 m30998| 2015-07-09T14:03:26.666-0400 I NETWORK [conn230] end connection 127.0.0.1:63405 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.699-0400 m30998| 2015-07-09T14:03:26.697-0400 I NETWORK [conn233] end connection 127.0.0.1:63412 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.712-0400 m30998| 2015-07-09T14:03:26.712-0400 I NETWORK [conn235] end connection 127.0.0.1:63415 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.717-0400 m30999| 2015-07-09T14:03:26.717-0400 I NETWORK [conn234] end connection 127.0.0.1:63416 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.737-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.738-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.738-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.738-0400 jstests/concurrency/fsm_workloads/indexed_insert_compound.js: Workload completed in 957 ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.738-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.738-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.738-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.738-0400 m30999| 2015-07-09T14:03:26.738-0400 I COMMAND [conn1] DROP: db36.coll36 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.738-0400 m30999| 2015-07-09T14:03:26.738-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:26.738-0400-559eb76eca4787b9985d1cf9", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465006738), what: "dropCollection.start", ns: "db36.coll36", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.794-0400 m30999| 2015-07-09T14:03:26.793-0400 I SHARDING [conn1] distributed lock 'db36.coll36/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb76eca4787b9985d1cfa [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.795-0400 m31100| 2015-07-09T14:03:26.794-0400 I COMMAND [conn34] CMD: drop db36.coll36 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.798-0400 m31200| 2015-07-09T14:03:26.797-0400 I COMMAND [conn18] CMD: drop db36.coll36 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.799-0400 m31101| 2015-07-09T14:03:26.799-0400 I COMMAND [repl writer worker 9] CMD: drop db36.coll36 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.799-0400 m31102| 2015-07-09T14:03:26.799-0400 I COMMAND [repl writer worker 8] CMD: drop db36.coll36 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.802-0400 m31202| 2015-07-09T14:03:26.802-0400 I COMMAND [repl writer worker 5] CMD: drop db36.coll36 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.802-0400 m31201| 2015-07-09T14:03:26.802-0400 I COMMAND [repl writer worker 14] CMD: drop db36.coll36 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.854-0400 m31100| 2015-07-09T14:03:26.854-0400 I SHARDING [conn34] remotely refreshing metadata for db36.coll36 with requested shard version 0|0||000000000000000000000000, current shard version is 1|3||559eb76dca4787b9985d1cf7, current metadata version is 1|3||559eb76dca4787b9985d1cf7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.855-0400 m31100| 2015-07-09T14:03:26.855-0400 W SHARDING [conn34] no chunks found when reloading db36.coll36, previous version was 0|0||559eb76dca4787b9985d1cf7, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.856-0400 m31100| 2015-07-09T14:03:26.855-0400 I SHARDING [conn34] dropping metadata for db36.coll36 at shard version 1|3||559eb76dca4787b9985d1cf7, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.857-0400 m30999| 2015-07-09T14:03:26.857-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:26.857-0400-559eb76eca4787b9985d1cfb", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465006857), what: "dropCollection", ns: "db36.coll36", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.912-0400 m30999| 2015-07-09T14:03:26.911-0400 I SHARDING [conn1] distributed lock 'db36.coll36/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.967-0400 m30999| 2015-07-09T14:03:26.967-0400 I COMMAND [conn1] DROP DATABASE: db36 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.967-0400 m30999| 2015-07-09T14:03:26.967-0400 I SHARDING [conn1] DBConfig::dropDatabase: db36 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:26.968-0400 m30999| 2015-07-09T14:03:26.967-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:26.967-0400-559eb76eca4787b9985d1cfc", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465006967), what: "dropDatabase.start", ns: "db36", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.073-0400 m30999| 2015-07-09T14:03:27.073-0400 I SHARDING [conn1] DBConfig::dropDatabase: db36 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.074-0400 m31100| 2015-07-09T14:03:27.074-0400 I COMMAND [conn28] dropDatabase db36 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.074-0400 m31100| 2015-07-09T14:03:27.074-0400 I COMMAND [conn28] dropDatabase db36 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.075-0400 m30999| 2015-07-09T14:03:27.075-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:27.075-0400-559eb76fca4787b9985d1cfd", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465007075), what: "dropDatabase", ns: "db36", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.076-0400 m31102| 2015-07-09T14:03:27.075-0400 I COMMAND [repl writer worker 5] dropDatabase db36 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.076-0400 m31102| 2015-07-09T14:03:27.075-0400 I COMMAND [repl writer worker 5] dropDatabase db36 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.076-0400 m31101| 2015-07-09T14:03:27.075-0400 I COMMAND [repl writer worker 1] dropDatabase db36 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.076-0400 m31101| 2015-07-09T14:03:27.075-0400 I COMMAND [repl writer worker 1] dropDatabase db36 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.160-0400 m31100| 2015-07-09T14:03:27.160-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.164-0400 m31102| 2015-07-09T14:03:27.164-0400 I COMMAND [repl writer worker 10] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.164-0400 m31101| 2015-07-09T14:03:27.164-0400 I COMMAND [repl writer worker 10] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.197-0400 m31200| 2015-07-09T14:03:27.197-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.198-0400 m31202| 2015-07-09T14:03:27.198-0400 I COMMAND [repl writer worker 12] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.199-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.199-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.199-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.199-0400 jstests/concurrency/fsm_workloads/update_array_noindex.js [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.199-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.199-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.200-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.201-0400 m31201| 2015-07-09T14:03:27.201-0400 I COMMAND [repl writer worker 2] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.207-0400 m30999| 2015-07-09T14:03:27.207-0400 I SHARDING [conn1] distributed lock 'db37/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb76fca4787b9985d1cfe [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.211-0400 m30999| 2015-07-09T14:03:27.211-0400 I SHARDING [conn1] Placing [db37] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.211-0400 m30999| 2015-07-09T14:03:27.211-0400 I SHARDING [conn1] Enabling sharding for database [db37] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.265-0400 m30999| 2015-07-09T14:03:27.264-0400 I SHARDING [conn1] distributed lock 'db37/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.292-0400 m31100| 2015-07-09T14:03:27.292-0400 I INDEX [conn145] build index on: db37.coll37 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db37.coll37" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.292-0400 m31100| 2015-07-09T14:03:27.292-0400 I INDEX [conn145] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.304-0400 m31100| 2015-07-09T14:03:27.304-0400 I INDEX [conn145] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.305-0400 m30999| 2015-07-09T14:03:27.305-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db37.coll37", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.308-0400 m30999| 2015-07-09T14:03:27.308-0400 I SHARDING [conn1] distributed lock 'db37.coll37/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb76fca4787b9985d1cff [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.309-0400 m31102| 2015-07-09T14:03:27.309-0400 I INDEX [repl writer worker 9] build index on: db37.coll37 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db37.coll37" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.310-0400 m31102| 2015-07-09T14:03:27.309-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.310-0400 m30999| 2015-07-09T14:03:27.309-0400 I SHARDING [conn1] enable sharding on: db37.coll37 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.310-0400 m30999| 2015-07-09T14:03:27.309-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:27.309-0400-559eb76fca4787b9985d1d00", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465007309), what: "shardCollection.start", ns: "db37.coll37", details: { shardKey: { _id: "hashed" }, collection: "db37.coll37", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.310-0400 m31101| 2015-07-09T14:03:27.310-0400 I INDEX [repl writer worker 6] build index on: db37.coll37 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db37.coll37" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.310-0400 m31101| 2015-07-09T14:03:27.310-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.312-0400 m31102| 2015-07-09T14:03:27.311-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.317-0400 m31101| 2015-07-09T14:03:27.317-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.362-0400 m30999| 2015-07-09T14:03:27.362-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db37.coll37 using new epoch 559eb76fca4787b9985d1d01 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.471-0400 m30999| 2015-07-09T14:03:27.470-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db37.coll37: 0ms sequenceNumber: 163 version: 1|1||559eb76fca4787b9985d1d01 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.526-0400 m30999| 2015-07-09T14:03:27.526-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db37.coll37: 0ms sequenceNumber: 164 version: 1|1||559eb76fca4787b9985d1d01 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.528-0400 m31100| 2015-07-09T14:03:27.528-0400 I SHARDING [conn52] remotely refreshing metadata for db37.coll37 with requested shard version 1|1||559eb76fca4787b9985d1d01, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.531-0400 m31100| 2015-07-09T14:03:27.530-0400 I SHARDING [conn52] collection db37.coll37 was previously unsharded, new metadata loaded with shard version 1|1||559eb76fca4787b9985d1d01 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.531-0400 m31100| 2015-07-09T14:03:27.531-0400 I SHARDING [conn52] collection version was loaded at version 1|1||559eb76fca4787b9985d1d01, took 2ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.532-0400 m30999| 2015-07-09T14:03:27.531-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:27.531-0400-559eb76fca4787b9985d1d02", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465007531), what: "shardCollection", ns: "db37.coll37", details: { version: "1|1||559eb76fca4787b9985d1d01" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.586-0400 m30999| 2015-07-09T14:03:27.585-0400 I SHARDING [conn1] distributed lock 'db37.coll37/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.587-0400 m30999| 2015-07-09T14:03:27.586-0400 I SHARDING [conn1] moving chunk ns: db37.coll37 moving ( ns: db37.coll37, shard: test-rs0, lastmod: 1|1||000000000000000000000000, min: { _id: 0 }, max: { _id: MaxKey }) test-rs0 -> test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.587-0400 m31100| 2015-07-09T14:03:27.587-0400 I SHARDING [conn34] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.588-0400 m31100| 2015-07-09T14:03:27.588-0400 I SHARDING [conn34] received moveChunk request: { moveChunk: "db37.coll37", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb76fca4787b9985d1d01') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.591-0400 m31100| 2015-07-09T14:03:27.591-0400 I SHARDING [conn34] distributed lock 'db37.coll37/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb76f792e00bb672749a8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.592-0400 m31100| 2015-07-09T14:03:27.591-0400 I SHARDING [conn34] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:27.591-0400-559eb76f792e00bb672749a9", server: "bs-osx108-8", clientAddr: "127.0.0.1:62636", time: new Date(1436465007591), what: "moveChunk.start", ns: "db37.coll37", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.645-0400 m31100| 2015-07-09T14:03:27.644-0400 I SHARDING [conn34] remotely refreshing metadata for db37.coll37 based on current shard version 1|1||559eb76fca4787b9985d1d01, current metadata version is 1|1||559eb76fca4787b9985d1d01 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.646-0400 m31100| 2015-07-09T14:03:27.646-0400 I SHARDING [conn34] metadata of collection db37.coll37 already up to date (shard version : 1|1||559eb76fca4787b9985d1d01, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.646-0400 m31100| 2015-07-09T14:03:27.646-0400 I SHARDING [conn34] moveChunk request accepted at version 1|1||559eb76fca4787b9985d1d01 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.646-0400 m31100| 2015-07-09T14:03:27.646-0400 I SHARDING [conn34] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.647-0400 m31200| 2015-07-09T14:03:27.647-0400 I SHARDING [conn16] remotely refreshing metadata for db37.coll37, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.649-0400 m31200| 2015-07-09T14:03:27.648-0400 I SHARDING [conn16] collection db37.coll37 was previously unsharded, new metadata loaded with shard version 0|0||559eb76fca4787b9985d1d01 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.649-0400 m31200| 2015-07-09T14:03:27.648-0400 I SHARDING [conn16] collection version was loaded at version 1|1||559eb76fca4787b9985d1d01, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.649-0400 m31200| 2015-07-09T14:03:27.649-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: 0 } -> { _id: MaxKey } for collection db37.coll37 from test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 at epoch 559eb76fca4787b9985d1d01 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.651-0400 m31100| 2015-07-09T14:03:27.650-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db37.coll37", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.654-0400 m31100| 2015-07-09T14:03:27.654-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db37.coll37", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.660-0400 m31100| 2015-07-09T14:03:27.659-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db37.coll37", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.660-0400 m31200| 2015-07-09T14:03:27.660-0400 I INDEX [migrateThread] build index on: db37.coll37 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db37.coll37" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.661-0400 m31200| 2015-07-09T14:03:27.660-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.670-0400 m31100| 2015-07-09T14:03:27.669-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db37.coll37", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.671-0400 m31200| 2015-07-09T14:03:27.670-0400 I INDEX [migrateThread] build index on: db37.coll37 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db37.coll37" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.671-0400 m31200| 2015-07-09T14:03:27.670-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.682-0400 m31200| 2015-07-09T14:03:27.682-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.683-0400 m31200| 2015-07-09T14:03:27.682-0400 I SHARDING [migrateThread] Deleter starting delete for: db37.coll37 from { _id: 0 } -> { _id: MaxKey }, with opId: 68069 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.683-0400 m31200| 2015-07-09T14:03:27.683-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db37.coll37 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.686-0400 m31100| 2015-07-09T14:03:27.686-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db37.coll37", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.690-0400 m31202| 2015-07-09T14:03:27.689-0400 I INDEX [repl writer worker 4] build index on: db37.coll37 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db37.coll37" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.691-0400 m31202| 2015-07-09T14:03:27.690-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.698-0400 m31202| 2015-07-09T14:03:27.698-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.699-0400 m31201| 2015-07-09T14:03:27.698-0400 I INDEX [repl writer worker 4] build index on: db37.coll37 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db37.coll37" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.699-0400 m31201| 2015-07-09T14:03:27.698-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.700-0400 m31200| 2015-07-09T14:03:27.699-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.700-0400 m31200| 2015-07-09T14:03:27.699-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db37.coll37' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.702-0400 m31201| 2015-07-09T14:03:27.702-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.720-0400 m31100| 2015-07-09T14:03:27.720-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db37.coll37", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.720-0400 m31100| 2015-07-09T14:03:27.720-0400 I SHARDING [conn34] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.721-0400 m31100| 2015-07-09T14:03:27.721-0400 I SHARDING [conn34] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.721-0400 m31100| 2015-07-09T14:03:27.721-0400 I SHARDING [conn34] moveChunk setting version to: 2|0||559eb76fca4787b9985d1d01 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.724-0400 m31200| 2015-07-09T14:03:27.723-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db37.coll37' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.724-0400 m31200| 2015-07-09T14:03:27.723-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:27.723-0400-559eb76fd5a107a5b9c0db31", server: "bs-osx108-8", clientAddr: "", time: new Date(1436465007723), what: "moveChunk.to", ns: "db37.coll37", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 5: 33, step 2 of 5: 16, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 24, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.777-0400 m31100| 2015-07-09T14:03:27.776-0400 I SHARDING [conn34] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db37.coll37", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.777-0400 m31100| 2015-07-09T14:03:27.777-0400 I SHARDING [conn34] moveChunk updating self version to: 2|1||559eb76fca4787b9985d1d01 through { _id: MinKey } -> { _id: 0 } for collection 'db37.coll37' [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.778-0400 m31100| 2015-07-09T14:03:27.778-0400 I SHARDING [conn34] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:27.778-0400-559eb76f792e00bb672749aa", server: "bs-osx108-8", clientAddr: "127.0.0.1:62636", time: new Date(1436465007778), what: "moveChunk.commit", ns: "db37.coll37", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.831-0400 m31100| 2015-07-09T14:03:27.831-0400 I SHARDING [conn34] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.832-0400 m31100| 2015-07-09T14:03:27.831-0400 I SHARDING [conn34] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.832-0400 m31100| 2015-07-09T14:03:27.831-0400 I SHARDING [conn34] Deleter starting delete for: db37.coll37 from { _id: 0 } -> { _id: MaxKey }, with opId: 67025 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.832-0400 m31100| 2015-07-09T14:03:27.831-0400 I SHARDING [conn34] rangeDeleter deleted 0 documents for db37.coll37 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.832-0400 m31100| 2015-07-09T14:03:27.831-0400 I SHARDING [conn34] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.833-0400 m31100| 2015-07-09T14:03:27.832-0400 I SHARDING [conn34] distributed lock 'db37.coll37/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.833-0400 m31100| 2015-07-09T14:03:27.832-0400 I SHARDING [conn34] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:27.832-0400-559eb76f792e00bb672749ab", server: "bs-osx108-8", clientAddr: "127.0.0.1:62636", time: new Date(1436465007832), what: "moveChunk.from", ns: "db37.coll37", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 6: 0, step 2 of 6: 58, step 3 of 6: 2, step 4 of 6: 71, step 5 of 6: 111, step 6 of 6: 0, to: "test-rs1", from: "test-rs0", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.885-0400 m31100| 2015-07-09T14:03:27.885-0400 I COMMAND [conn34] command db37.coll37 command: moveChunk { moveChunk: "db37.coll37", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb76fca4787b9985d1d01') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 297ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.887-0400 m30999| 2015-07-09T14:03:27.886-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db37.coll37: 0ms sequenceNumber: 165 version: 2|1||559eb76fca4787b9985d1d01 based on: 1|1||559eb76fca4787b9985d1d01 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.888-0400 m31100| 2015-07-09T14:03:27.888-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db37.coll37", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76fca4787b9985d1d01') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.893-0400 m31100| 2015-07-09T14:03:27.892-0400 I SHARDING [conn34] distributed lock 'db37.coll37/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb76f792e00bb672749ac [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.893-0400 m31100| 2015-07-09T14:03:27.892-0400 I SHARDING [conn34] remotely refreshing metadata for db37.coll37 based on current shard version 2|0||559eb76fca4787b9985d1d01, current metadata version is 2|0||559eb76fca4787b9985d1d01 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.894-0400 m31100| 2015-07-09T14:03:27.894-0400 I SHARDING [conn34] updating metadata for db37.coll37 from shard version 2|0||559eb76fca4787b9985d1d01 to shard version 2|1||559eb76fca4787b9985d1d01 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.894-0400 m31100| 2015-07-09T14:03:27.894-0400 I SHARDING [conn34] collection version was loaded at version 2|1||559eb76fca4787b9985d1d01, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.895-0400 m31100| 2015-07-09T14:03:27.894-0400 I SHARDING [conn34] splitChunk accepted at version 2|1||559eb76fca4787b9985d1d01 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.896-0400 m31100| 2015-07-09T14:03:27.895-0400 I SHARDING [conn34] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:27.895-0400-559eb76f792e00bb672749ad", server: "bs-osx108-8", clientAddr: "127.0.0.1:62636", time: new Date(1436465007895), what: "split", ns: "db37.coll37", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559eb76fca4787b9985d1d01') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559eb76fca4787b9985d1d01') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.950-0400 m31100| 2015-07-09T14:03:27.949-0400 I SHARDING [conn34] distributed lock 'db37.coll37/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.952-0400 m30999| 2015-07-09T14:03:27.951-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db37.coll37: 0ms sequenceNumber: 166 version: 2|3||559eb76fca4787b9985d1d01 based on: 2|1||559eb76fca4787b9985d1d01 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.952-0400 m31200| 2015-07-09T14:03:27.952-0400 I SHARDING [conn18] received splitChunk request: { splitChunk: "db37.coll37", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb76fca4787b9985d1d01') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.955-0400 m31200| 2015-07-09T14:03:27.955-0400 I SHARDING [conn18] distributed lock 'db37.coll37/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb76fd5a107a5b9c0db32 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.956-0400 m31200| 2015-07-09T14:03:27.955-0400 I SHARDING [conn18] remotely refreshing metadata for db37.coll37 based on current shard version 0|0||559eb76fca4787b9985d1d01, current metadata version is 1|1||559eb76fca4787b9985d1d01 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.957-0400 m31200| 2015-07-09T14:03:27.956-0400 I SHARDING [conn18] updating metadata for db37.coll37 from shard version 0|0||559eb76fca4787b9985d1d01 to shard version 2|0||559eb76fca4787b9985d1d01 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.957-0400 m31200| 2015-07-09T14:03:27.957-0400 I SHARDING [conn18] collection version was loaded at version 2|3||559eb76fca4787b9985d1d01, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.957-0400 m31200| 2015-07-09T14:03:27.957-0400 I SHARDING [conn18] splitChunk accepted at version 2|0||559eb76fca4787b9985d1d01 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:27.959-0400 m31200| 2015-07-09T14:03:27.958-0400 I SHARDING [conn18] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:27.958-0400-559eb76fd5a107a5b9c0db33", server: "bs-osx108-8", clientAddr: "127.0.0.1:62585", time: new Date(1436465007958), what: "split", ns: "db37.coll37", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559eb76fca4787b9985d1d01') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559eb76fca4787b9985d1d01') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.013-0400 m31200| 2015-07-09T14:03:28.012-0400 I SHARDING [conn18] distributed lock 'db37.coll37/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.014-0400 m30999| 2015-07-09T14:03:28.014-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db37.coll37: 0ms sequenceNumber: 167 version: 2|5||559eb76fca4787b9985d1d01 based on: 2|3||559eb76fca4787b9985d1d01 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.017-0400 m30999| 2015-07-09T14:03:28.016-0400 I SHARDING [conn1] sharded connection to test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.017-0400 m30999| 2015-07-09T14:03:28.016-0400 I SHARDING [conn1] retrying command: { listIndexes: "coll37" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.017-0400 m31100| 2015-07-09T14:03:28.016-0400 I NETWORK [conn52] end connection 127.0.0.1:62686 (93 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.030-0400 m31100| 2015-07-09T14:03:28.029-0400 I INDEX [conn60] build index on: db37.coll37 properties: { v: 1, key: { arr: 1.0 }, name: "arr_1", ns: "db37.coll37" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.030-0400 m31100| 2015-07-09T14:03:28.029-0400 I INDEX [conn60] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.030-0400 m31200| 2015-07-09T14:03:28.029-0400 I INDEX [conn41] build index on: db37.coll37 properties: { v: 1, key: { arr: 1.0 }, name: "arr_1", ns: "db37.coll37" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.030-0400 m31200| 2015-07-09T14:03:28.029-0400 I INDEX [conn41] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.034-0400 m31200| 2015-07-09T14:03:28.034-0400 I INDEX [conn41] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.035-0400 m31100| 2015-07-09T14:03:28.035-0400 I INDEX [conn60] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.038-0400 m31202| 2015-07-09T14:03:28.038-0400 I INDEX [repl writer worker 6] build index on: db37.coll37 properties: { v: 1, key: { arr: 1.0 }, name: "arr_1", ns: "db37.coll37" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.039-0400 m31202| 2015-07-09T14:03:28.038-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.044-0400 m31201| 2015-07-09T14:03:28.043-0400 I INDEX [repl writer worker 6] build index on: db37.coll37 properties: { v: 1, key: { arr: 1.0 }, name: "arr_1", ns: "db37.coll37" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.044-0400 m31201| 2015-07-09T14:03:28.043-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.047-0400 m31202| 2015-07-09T14:03:28.047-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.049-0400 m31102| 2015-07-09T14:03:28.049-0400 I INDEX [repl writer worker 0] build index on: db37.coll37 properties: { v: 1, key: { arr: 1.0 }, name: "arr_1", ns: "db37.coll37" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.050-0400 m31102| 2015-07-09T14:03:28.049-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.054-0400 m31201| 2015-07-09T14:03:28.054-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.057-0400 m31101| 2015-07-09T14:03:28.057-0400 I INDEX [repl writer worker 7] build index on: db37.coll37 properties: { v: 1, key: { arr: 1.0 }, name: "arr_1", ns: "db37.coll37" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.057-0400 m31101| 2015-07-09T14:03:28.057-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.058-0400 m31102| 2015-07-09T14:03:28.057-0400 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.064-0400 m31101| 2015-07-09T14:03:28.064-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.068-0400 m31200| 2015-07-09T14:03:28.067-0400 I COMMAND [conn18] CMD: dropIndexes db37.coll37 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.068-0400 m31100| 2015-07-09T14:03:28.067-0400 I COMMAND [conn34] CMD: dropIndexes db37.coll37 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.069-0400 Using 5 threads (requested 5) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.069-0400 m31102| 2015-07-09T14:03:28.069-0400 I COMMAND [repl writer worker 7] CMD: dropIndexes db37.coll37 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.080-0400 m31101| 2015-07-09T14:03:28.070-0400 I COMMAND [repl writer worker 15] CMD: dropIndexes db37.coll37 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.080-0400 m31202| 2015-07-09T14:03:28.071-0400 I COMMAND [repl writer worker 7] CMD: dropIndexes db37.coll37 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.080-0400 m31201| 2015-07-09T14:03:28.071-0400 I COMMAND [repl writer worker 0] CMD: dropIndexes db37.coll37 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.114-0400 m30999| 2015-07-09T14:03:28.114-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63420 #237 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.127-0400 m30999| 2015-07-09T14:03:28.127-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63421 #238 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.127-0400 m30998| 2015-07-09T14:03:28.127-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63422 #237 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.128-0400 m30999| 2015-07-09T14:03:28.127-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63423 #239 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.134-0400 m30998| 2015-07-09T14:03:28.134-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63424 #238 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.148-0400 setting random seed: 9728606021963 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.148-0400 setting random seed: 9756594127975 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.148-0400 setting random seed: 9077585237100 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.148-0400 setting random seed: 6975150993093 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.149-0400 setting random seed: 8681487268768 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.151-0400 m30998| 2015-07-09T14:03:28.151-0400 I SHARDING [conn238] ChunkManager: time to load chunks for db37.coll37: 0ms sequenceNumber: 45 version: 2|5||559eb76fca4787b9985d1d01 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.196-0400 m30999| 2015-07-09T14:03:28.195-0400 I NETWORK [conn239] end connection 127.0.0.1:63423 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.216-0400 m30998| 2015-07-09T14:03:28.215-0400 I NETWORK [conn237] end connection 127.0.0.1:63422 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.222-0400 m30998| 2015-07-09T14:03:28.221-0400 I NETWORK [conn238] end connection 127.0.0.1:63424 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.227-0400 m30999| 2015-07-09T14:03:28.227-0400 I NETWORK [conn237] end connection 127.0.0.1:63420 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.239-0400 m30999| 2015-07-09T14:03:28.236-0400 I NETWORK [conn238] end connection 127.0.0.1:63421 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.250-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.250-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.251-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.251-0400 jstests/concurrency/fsm_workloads/update_array_noindex.js: Workload completed in 181 ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.251-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.251-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.251-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.251-0400 m30999| 2015-07-09T14:03:28.251-0400 I COMMAND [conn1] DROP: db37.coll37 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.251-0400 m30999| 2015-07-09T14:03:28.251-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:28.251-0400-559eb770ca4787b9985d1d03", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465008251), what: "dropCollection.start", ns: "db37.coll37", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.308-0400 m30999| 2015-07-09T14:03:28.308-0400 I SHARDING [conn1] distributed lock 'db37.coll37/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb770ca4787b9985d1d04 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.309-0400 m31100| 2015-07-09T14:03:28.309-0400 I COMMAND [conn34] CMD: drop db37.coll37 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.312-0400 m31200| 2015-07-09T14:03:28.311-0400 I COMMAND [conn18] CMD: drop db37.coll37 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.313-0400 m31101| 2015-07-09T14:03:28.312-0400 I COMMAND [repl writer worker 3] CMD: drop db37.coll37 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.313-0400 m31102| 2015-07-09T14:03:28.313-0400 I COMMAND [repl writer worker 0] CMD: drop db37.coll37 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.315-0400 m31201| 2015-07-09T14:03:28.315-0400 I COMMAND [repl writer worker 7] CMD: drop db37.coll37 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.316-0400 m31202| 2015-07-09T14:03:28.315-0400 I COMMAND [repl writer worker 3] CMD: drop db37.coll37 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.367-0400 m31100| 2015-07-09T14:03:28.367-0400 I SHARDING [conn34] remotely refreshing metadata for db37.coll37 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559eb76fca4787b9985d1d01, current metadata version is 2|3||559eb76fca4787b9985d1d01 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.369-0400 m31100| 2015-07-09T14:03:28.368-0400 W SHARDING [conn34] no chunks found when reloading db37.coll37, previous version was 0|0||559eb76fca4787b9985d1d01, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.369-0400 m31100| 2015-07-09T14:03:28.369-0400 I SHARDING [conn34] dropping metadata for db37.coll37 at shard version 2|3||559eb76fca4787b9985d1d01, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.370-0400 m31200| 2015-07-09T14:03:28.370-0400 I SHARDING [conn18] remotely refreshing metadata for db37.coll37 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559eb76fca4787b9985d1d01, current metadata version is 2|5||559eb76fca4787b9985d1d01 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.372-0400 m31200| 2015-07-09T14:03:28.371-0400 W SHARDING [conn18] no chunks found when reloading db37.coll37, previous version was 0|0||559eb76fca4787b9985d1d01, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.372-0400 m31200| 2015-07-09T14:03:28.371-0400 I SHARDING [conn18] dropping metadata for db37.coll37 at shard version 2|5||559eb76fca4787b9985d1d01, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.373-0400 m30999| 2015-07-09T14:03:28.372-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:28.372-0400-559eb770ca4787b9985d1d05", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465008372), what: "dropCollection", ns: "db37.coll37", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.428-0400 m30999| 2015-07-09T14:03:28.427-0400 I SHARDING [conn1] distributed lock 'db37.coll37/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.483-0400 m30999| 2015-07-09T14:03:28.483-0400 I COMMAND [conn1] DROP DATABASE: db37 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.484-0400 m30999| 2015-07-09T14:03:28.483-0400 I SHARDING [conn1] DBConfig::dropDatabase: db37 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.484-0400 m30999| 2015-07-09T14:03:28.483-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:28.483-0400-559eb770ca4787b9985d1d06", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465008483), what: "dropDatabase.start", ns: "db37", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.590-0400 m30999| 2015-07-09T14:03:28.589-0400 I SHARDING [conn1] DBConfig::dropDatabase: db37 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.590-0400 m31100| 2015-07-09T14:03:28.590-0400 I COMMAND [conn28] dropDatabase db37 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.590-0400 m31100| 2015-07-09T14:03:28.590-0400 I COMMAND [conn28] dropDatabase db37 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.591-0400 m30999| 2015-07-09T14:03:28.590-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:28.590-0400-559eb770ca4787b9985d1d07", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465008590), what: "dropDatabase", ns: "db37", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.591-0400 m31101| 2015-07-09T14:03:28.591-0400 I COMMAND [repl writer worker 6] dropDatabase db37 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.591-0400 m31102| 2015-07-09T14:03:28.591-0400 I COMMAND [repl writer worker 11] dropDatabase db37 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.592-0400 m31102| 2015-07-09T14:03:28.591-0400 I COMMAND [repl writer worker 11] dropDatabase db37 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.592-0400 m31101| 2015-07-09T14:03:28.591-0400 I COMMAND [repl writer worker 6] dropDatabase db37 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.679-0400 m31100| 2015-07-09T14:03:28.678-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.682-0400 m31102| 2015-07-09T14:03:28.682-0400 I COMMAND [repl writer worker 7] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.683-0400 m31101| 2015-07-09T14:03:28.682-0400 I COMMAND [repl writer worker 8] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.717-0400 m31200| 2015-07-09T14:03:28.717-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.718-0400 m31201| 2015-07-09T14:03:28.718-0400 I COMMAND [repl writer worker 15] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.719-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.719-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.719-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.719-0400 jstests/concurrency/fsm_workloads/indexed_insert_base.js [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.719-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.719-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.719-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.720-0400 m31202| 2015-07-09T14:03:28.720-0400 I COMMAND [repl writer worker 0] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.724-0400 m30999| 2015-07-09T14:03:28.724-0400 I SHARDING [conn1] distributed lock 'db38/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb770ca4787b9985d1d08 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.728-0400 m30999| 2015-07-09T14:03:28.728-0400 I SHARDING [conn1] Placing [db38] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.728-0400 m30999| 2015-07-09T14:03:28.728-0400 I SHARDING [conn1] Enabling sharding for database [db38] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.782-0400 m30999| 2015-07-09T14:03:28.781-0400 I SHARDING [conn1] distributed lock 'db38/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.803-0400 m31100| 2015-07-09T14:03:28.802-0400 I INDEX [conn145] build index on: db38.coll38 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db38.coll38" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.803-0400 m31100| 2015-07-09T14:03:28.802-0400 I INDEX [conn145] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.811-0400 m31100| 2015-07-09T14:03:28.811-0400 I INDEX [conn145] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.813-0400 m30999| 2015-07-09T14:03:28.812-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db38.coll38", key: { x: 1.0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.816-0400 m30999| 2015-07-09T14:03:28.815-0400 I SHARDING [conn1] distributed lock 'db38.coll38/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb770ca4787b9985d1d09 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.817-0400 m30999| 2015-07-09T14:03:28.816-0400 I SHARDING [conn1] enable sharding on: db38.coll38 with shard key: { x: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.818-0400 m30999| 2015-07-09T14:03:28.816-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:28.816-0400-559eb770ca4787b9985d1d0a", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465008816), what: "shardCollection.start", ns: "db38.coll38", details: { shardKey: { x: 1.0 }, collection: "db38.coll38", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 1 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.823-0400 m31102| 2015-07-09T14:03:28.822-0400 I INDEX [repl writer worker 12] build index on: db38.coll38 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db38.coll38" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.823-0400 m31102| 2015-07-09T14:03:28.822-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.826-0400 m31101| 2015-07-09T14:03:28.825-0400 I INDEX [repl writer worker 15] build index on: db38.coll38 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db38.coll38" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.826-0400 m31101| 2015-07-09T14:03:28.825-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.832-0400 m31102| 2015-07-09T14:03:28.832-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.835-0400 m31101| 2015-07-09T14:03:28.835-0400 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.870-0400 m30999| 2015-07-09T14:03:28.870-0400 I SHARDING [conn1] going to create 1 chunk(s) for: db38.coll38 using new epoch 559eb770ca4787b9985d1d0b [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.924-0400 m30999| 2015-07-09T14:03:28.923-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db38.coll38: 0ms sequenceNumber: 168 version: 1|0||559eb770ca4787b9985d1d0b based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.979-0400 m30999| 2015-07-09T14:03:28.978-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db38.coll38: 0ms sequenceNumber: 169 version: 1|0||559eb770ca4787b9985d1d0b based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.981-0400 m31100| 2015-07-09T14:03:28.980-0400 I SHARDING [conn55] remotely refreshing metadata for db38.coll38 with requested shard version 1|0||559eb770ca4787b9985d1d0b, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.982-0400 m31100| 2015-07-09T14:03:28.982-0400 I SHARDING [conn55] collection db38.coll38 was previously unsharded, new metadata loaded with shard version 1|0||559eb770ca4787b9985d1d0b [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.982-0400 m31100| 2015-07-09T14:03:28.982-0400 I SHARDING [conn55] collection version was loaded at version 1|0||559eb770ca4787b9985d1d0b, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:28.983-0400 m30999| 2015-07-09T14:03:28.982-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:28.982-0400-559eb770ca4787b9985d1d0c", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465008982), what: "shardCollection", ns: "db38.coll38", details: { version: "1|0||559eb770ca4787b9985d1d0b" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.037-0400 m30999| 2015-07-09T14:03:29.037-0400 I SHARDING [conn1] distributed lock 'db38.coll38/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.053-0400 m31200| 2015-07-09T14:03:29.052-0400 I INDEX [conn39] build index on: db38.coll38 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db38.coll38" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.053-0400 m31200| 2015-07-09T14:03:29.052-0400 I INDEX [conn39] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.059-0400 m31200| 2015-07-09T14:03:29.059-0400 I INDEX [conn39] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.060-0400 Using 20 threads (requested 20) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.179-0400 m31202| 2015-07-09T14:03:29.170-0400 I INDEX [repl writer worker 1] build index on: db38.coll38 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db38.coll38" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.179-0400 m31202| 2015-07-09T14:03:29.170-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.213-0400 m31201| 2015-07-09T14:03:29.210-0400 I INDEX [repl writer worker 8] build index on: db38.coll38 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db38.coll38" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.213-0400 m31201| 2015-07-09T14:03:29.210-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.222-0400 m31202| 2015-07-09T14:03:29.218-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.261-0400 m31201| 2015-07-09T14:03:29.261-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.274-0400 m30998| 2015-07-09T14:03:29.273-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63425 #239 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.274-0400 m30999| 2015-07-09T14:03:29.274-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63426 #240 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.284-0400 m30998| 2015-07-09T14:03:29.284-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63427 #240 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.285-0400 m30999| 2015-07-09T14:03:29.285-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63428 #241 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.296-0400 m30999| 2015-07-09T14:03:29.296-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63429 #242 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.316-0400 m30998| 2015-07-09T14:03:29.315-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63430 #241 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.316-0400 m30999| 2015-07-09T14:03:29.316-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63435 #243 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.325-0400 m30999| 2015-07-09T14:03:29.325-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63436 #244 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.326-0400 m30998| 2015-07-09T14:03:29.325-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63431 #242 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.330-0400 m30998| 2015-07-09T14:03:29.330-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63432 #243 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.330-0400 m30998| 2015-07-09T14:03:29.330-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63433 #244 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.333-0400 m30999| 2015-07-09T14:03:29.332-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63437 #245 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.336-0400 m30998| 2015-07-09T14:03:29.335-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63434 #245 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.337-0400 m30999| 2015-07-09T14:03:29.337-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63439 #246 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.337-0400 m30998| 2015-07-09T14:03:29.337-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63438 #246 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.338-0400 m30999| 2015-07-09T14:03:29.338-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63440 #247 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.340-0400 m30999| 2015-07-09T14:03:29.340-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63441 #248 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.343-0400 m30999| 2015-07-09T14:03:29.343-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63442 #249 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.343-0400 m30998| 2015-07-09T14:03:29.343-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63443 #247 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.344-0400 m30998| 2015-07-09T14:03:29.344-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63444 #248 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.351-0400 setting random seed: 3400216535665 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.351-0400 setting random seed: 4217763878405 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.354-0400 setting random seed: 4189575747586 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.354-0400 setting random seed: 4504277990199 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.354-0400 setting random seed: 2330480022355 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.354-0400 setting random seed: 3055026261135 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.355-0400 setting random seed: 3109319633804 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.355-0400 setting random seed: 5826394818723 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.357-0400 setting random seed: 4911058591678 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.357-0400 setting random seed: 9184030792675 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.357-0400 m30998| 2015-07-09T14:03:29.356-0400 I SHARDING [conn240] ChunkManager: time to load chunks for db38.coll38: 0ms sequenceNumber: 46 version: 1|0||559eb770ca4787b9985d1d0b based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.358-0400 setting random seed: 2381994975730 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.358-0400 setting random seed: 161788780242 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.360-0400 setting random seed: 4199622478336 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.362-0400 setting random seed: 7439341531135 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.366-0400 setting random seed: 1362795517779 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.367-0400 setting random seed: 6192842819727 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.371-0400 setting random seed: 8377753971144 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.371-0400 setting random seed: 3013172009959 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.371-0400 setting random seed: 6005604378879 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.379-0400 setting random seed: 7957537360489 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.407-0400 m31100| 2015-07-09T14:03:29.407-0400 I SHARDING [conn39] request split points lookup for chunk db38.coll38 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.415-0400 m31100| 2015-07-09T14:03:29.413-0400 I SHARDING [conn34] request split points lookup for chunk db38.coll38 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.423-0400 m31100| 2015-07-09T14:03:29.422-0400 I SHARDING [conn39] request split points lookup for chunk db38.coll38 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.423-0400 m31100| 2015-07-09T14:03:29.423-0400 I SHARDING [conn39] received splitChunk request: { splitChunk: "db38.coll38", keyPattern: { x: 1.0 }, min: { x: MinKey }, max: { x: MaxKey }, from: "test-rs0", splitKeys: [ { x: 1.0 }, { x: 15.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb770ca4787b9985d1d0b') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.425-0400 m31100| 2015-07-09T14:03:29.425-0400 I SHARDING [conn39] distributed lock 'db38.coll38/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb771792e00bb672749af [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.426-0400 m31100| 2015-07-09T14:03:29.425-0400 I SHARDING [conn39] remotely refreshing metadata for db38.coll38 based on current shard version 1|0||559eb770ca4787b9985d1d0b, current metadata version is 1|0||559eb770ca4787b9985d1d0b [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.426-0400 m31100| 2015-07-09T14:03:29.425-0400 I SHARDING [conn32] request split points lookup for chunk db38.coll38 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.434-0400 m31100| 2015-07-09T14:03:29.433-0400 I SHARDING [conn35] request split points lookup for chunk db38.coll38 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.434-0400 m31100| 2015-07-09T14:03:29.433-0400 I SHARDING [conn39] metadata of collection db38.coll38 already up to date (shard version : 1|0||559eb770ca4787b9985d1d0b, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.434-0400 m31100| 2015-07-09T14:03:29.433-0400 I SHARDING [conn37] request split points lookup for chunk db38.coll38 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.435-0400 m31100| 2015-07-09T14:03:29.433-0400 I SHARDING [conn36] request split points lookup for chunk db38.coll38 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.435-0400 m31100| 2015-07-09T14:03:29.433-0400 I SHARDING [conn34] request split points lookup for chunk db38.coll38 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.435-0400 m31100| 2015-07-09T14:03:29.433-0400 I SHARDING [conn132] request split points lookup for chunk db38.coll38 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.435-0400 m31100| 2015-07-09T14:03:29.434-0400 I SHARDING [conn15] request split points lookup for chunk db38.coll38 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.435-0400 m31100| 2015-07-09T14:03:29.434-0400 I SHARDING [conn40] request split points lookup for chunk db38.coll38 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.435-0400 m31100| 2015-07-09T14:03:29.434-0400 I SHARDING [conn39] splitChunk accepted at version 1|0||559eb770ca4787b9985d1d0b [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.435-0400 m31100| 2015-07-09T14:03:29.434-0400 I SHARDING [conn38] request split points lookup for chunk db38.coll38 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.438-0400 m31100| 2015-07-09T14:03:29.436-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db38.coll38", keyPattern: { x: 1.0 }, min: { x: MinKey }, max: { x: MaxKey }, from: "test-rs0", splitKeys: [ { x: 1.0 }, { x: 14.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb770ca4787b9985d1d0b') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.438-0400 m31100| 2015-07-09T14:03:29.437-0400 W SHARDING [conn35] could not acquire collection lock for db38.coll38 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db38.coll38 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.439-0400 m31100| 2015-07-09T14:03:29.438-0400 I SHARDING [conn32] received splitChunk request: { splitChunk: "db38.coll38", keyPattern: { x: 1.0 }, min: { x: MinKey }, max: { x: MaxKey }, from: "test-rs0", splitKeys: [ { x: 1.0 }, { x: 12.0 }, { x: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb770ca4787b9985d1d0b') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.440-0400 m30998| 2015-07-09T14:03:29.437-0400 W SHARDING [conn246] splitChunk failed - cmd: { splitChunk: "db38.coll38", keyPattern: { x: 1.0 }, min: { x: MinKey }, max: { x: MaxKey }, from: "test-rs0", splitKeys: [ { x: 1.0 }, { x: 14.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb770ca4787b9985d1d0b') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db38.coll38 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.445-0400 m31100| 2015-07-09T14:03:29.439-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db38.coll38", keyPattern: { x: 1.0 }, min: { x: MinKey }, max: { x: MaxKey }, from: "test-rs0", splitKeys: [ { x: 1.0 }, { x: 12.0 }, { x: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb770ca4787b9985d1d0b') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.446-0400 m31100| 2015-07-09T14:03:29.440-0400 W SHARDING [conn32] could not acquire collection lock for db38.coll38 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db38.coll38 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.447-0400 m31100| 2015-07-09T14:03:29.440-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db38.coll38", keyPattern: { x: 1.0 }, min: { x: MinKey }, max: { x: MaxKey }, from: "test-rs0", splitKeys: [ { x: 1.0 }, { x: 12.0 }, { x: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb770ca4787b9985d1d0b') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.447-0400 m31100| 2015-07-09T14:03:29.440-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db38.coll38", keyPattern: { x: 1.0 }, min: { x: MinKey }, max: { x: MaxKey }, from: "test-rs0", splitKeys: [ { x: 1.0 }, { x: 12.0 }, { x: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb770ca4787b9985d1d0b') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.448-0400 m30998| 2015-07-09T14:03:29.440-0400 W SHARDING [conn247] splitChunk failed - cmd: { splitChunk: "db38.coll38", keyPattern: { x: 1.0 }, min: { x: MinKey }, max: { x: MaxKey }, from: "test-rs0", splitKeys: [ { x: 1.0 }, { x: 12.0 }, { x: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb770ca4787b9985d1d0b') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db38.coll38 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.448-0400 m31100| 2015-07-09T14:03:29.441-0400 I SHARDING [conn132] received splitChunk request: { splitChunk: "db38.coll38", keyPattern: { x: 1.0 }, min: { x: MinKey }, max: { x: MaxKey }, from: "test-rs0", splitKeys: [ { x: 1.0 }, { x: 12.0 }, { x: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb770ca4787b9985d1d0b') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.448-0400 m31100| 2015-07-09T14:03:29.441-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db38.coll38", keyPattern: { x: 1.0 }, min: { x: MinKey }, max: { x: MaxKey }, from: "test-rs0", splitKeys: [ { x: 1.0 }, { x: 12.0 }, { x: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb770ca4787b9985d1d0b') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.449-0400 m31100| 2015-07-09T14:03:29.441-0400 W SHARDING [conn37] could not acquire collection lock for db38.coll38 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db38.coll38 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.450-0400 m30999| 2015-07-09T14:03:29.441-0400 W SHARDING [conn247] splitChunk failed - cmd: { splitChunk: "db38.coll38", keyPattern: { x: 1.0 }, min: { x: MinKey }, max: { x: MaxKey }, from: "test-rs0", splitKeys: [ { x: 1.0 }, { x: 12.0 }, { x: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb770ca4787b9985d1d0b') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db38.coll38 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.450-0400 m31100| 2015-07-09T14:03:29.441-0400 I SHARDING [conn36] received splitChunk request: { splitChunk: "db38.coll38", keyPattern: { x: 1.0 }, min: { x: MinKey }, max: { x: MaxKey }, from: "test-rs0", splitKeys: [ { x: 1.0 }, { x: 12.0 }, { x: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb770ca4787b9985d1d0b') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.450-0400 m30998| 2015-07-09T14:03:29.442-0400 I SHARDING [conn246] ChunkManager: time to load chunks for db38.coll38: 2ms sequenceNumber: 47 version: 1|3||559eb770ca4787b9985d1d0b based on: 1|0||559eb770ca4787b9985d1d0b [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.450-0400 m31100| 2015-07-09T14:03:29.442-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db38.coll38", keyPattern: { x: 1.0 }, min: { x: MinKey }, max: { x: MaxKey }, from: "test-rs0", splitKeys: [ { x: 1.0 }, { x: 12.0 }, { x: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb770ca4787b9985d1d0b') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.451-0400 m31100| 2015-07-09T14:03:29.442-0400 W SHARDING [conn34] could not acquire collection lock for db38.coll38 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db38.coll38 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.451-0400 m31100| 2015-07-09T14:03:29.443-0400 W SHARDING [conn38] could not acquire collection lock for db38.coll38 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db38.coll38 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.451-0400 m30999| 2015-07-09T14:03:29.442-0400 W SHARDING [conn244] splitChunk failed - cmd: { splitChunk: "db38.coll38", keyPattern: { x: 1.0 }, min: { x: MinKey }, max: { x: MaxKey }, from: "test-rs0", splitKeys: [ { x: 1.0 }, { x: 12.0 }, { x: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb770ca4787b9985d1d0b') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db38.coll38 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.451-0400 m30999| 2015-07-09T14:03:29.443-0400 W SHARDING [conn243] splitChunk failed - cmd: { splitChunk: "db38.coll38", keyPattern: { x: 1.0 }, min: { x: MinKey }, max: { x: MaxKey }, from: "test-rs0", splitKeys: [ { x: 1.0 }, { x: 12.0 }, { x: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb770ca4787b9985d1d0b') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db38.coll38 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.452-0400 m31100| 2015-07-09T14:03:29.443-0400 W SHARDING [conn132] could not acquire collection lock for db38.coll38 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db38.coll38 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.452-0400 m31100| 2015-07-09T14:03:29.444-0400 W SHARDING [conn15] could not acquire collection lock for db38.coll38 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db38.coll38 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.453-0400 m30999| 2015-07-09T14:03:29.444-0400 W SHARDING [conn249] splitChunk failed - cmd: { splitChunk: "db38.coll38", keyPattern: { x: 1.0 }, min: { x: MinKey }, max: { x: MaxKey }, from: "test-rs0", splitKeys: [ { x: 1.0 }, { x: 12.0 }, { x: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb770ca4787b9985d1d0b') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db38.coll38 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.453-0400 m31100| 2015-07-09T14:03:29.444-0400 W SHARDING [conn40] could not acquire collection lock for db38.coll38 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db38.coll38 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.453-0400 m30998| 2015-07-09T14:03:29.444-0400 W SHARDING [conn248] splitChunk failed - cmd: { splitChunk: "db38.coll38", keyPattern: { x: 1.0 }, min: { x: MinKey }, max: { x: MaxKey }, from: "test-rs0", splitKeys: [ { x: 1.0 }, { x: 12.0 }, { x: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb770ca4787b9985d1d0b') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db38.coll38 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.454-0400 m30999| 2015-07-09T14:03:29.444-0400 W SHARDING [conn242] splitChunk failed - cmd: { splitChunk: "db38.coll38", keyPattern: { x: 1.0 }, min: { x: MinKey }, max: { x: MaxKey }, from: "test-rs0", splitKeys: [ { x: 1.0 }, { x: 12.0 }, { x: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb770ca4787b9985d1d0b') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db38.coll38 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.454-0400 m31100| 2015-07-09T14:03:29.445-0400 W SHARDING [conn36] could not acquire collection lock for db38.coll38 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db38.coll38 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.455-0400 m30998| 2015-07-09T14:03:29.446-0400 W SHARDING [conn240] splitChunk failed - cmd: { splitChunk: "db38.coll38", keyPattern: { x: 1.0 }, min: { x: MinKey }, max: { x: MaxKey }, from: "test-rs0", splitKeys: [ { x: 1.0 }, { x: 12.0 }, { x: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb770ca4787b9985d1d0b') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db38.coll38 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.455-0400 m30999| 2015-07-09T14:03:29.448-0400 I SHARDING [conn243] ChunkManager: time to load chunks for db38.coll38: 0ms sequenceNumber: 170 version: 1|3||559eb770ca4787b9985d1d0b based on: 1|0||559eb770ca4787b9985d1d0b [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.456-0400 m31100| 2015-07-09T14:03:29.449-0400 I SHARDING [conn39] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:29.449-0400-559eb771792e00bb672749b0", server: "bs-osx108-8", clientAddr: "127.0.0.1:62641", time: new Date(1436465009449), what: "multi-split", ns: "db38.coll38", details: { before: { min: { x: MinKey }, max: { x: MaxKey } }, number: 1, of: 3, chunk: { min: { x: MinKey }, max: { x: 1.0 }, lastmod: Timestamp 1000|1, lastmodEpoch: ObjectId('559eb770ca4787b9985d1d0b') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.480-0400 m31100| 2015-07-09T14:03:29.478-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63445 #151 (94 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.503-0400 m31100| 2015-07-09T14:03:29.501-0400 I SHARDING [conn39] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:29.501-0400-559eb771792e00bb672749b1", server: "bs-osx108-8", clientAddr: "127.0.0.1:62641", time: new Date(1436465009501), what: "multi-split", ns: "db38.coll38", details: { before: { min: { x: MinKey }, max: { x: MaxKey } }, number: 2, of: 3, chunk: { min: { x: 1.0 }, max: { x: 15.0 }, lastmod: Timestamp 1000|2, lastmodEpoch: ObjectId('559eb770ca4787b9985d1d0b') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.554-0400 m31100| 2015-07-09T14:03:29.553-0400 I SHARDING [conn39] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:29.553-0400-559eb771792e00bb672749b2", server: "bs-osx108-8", clientAddr: "127.0.0.1:62641", time: new Date(1436465009553), what: "multi-split", ns: "db38.coll38", details: { before: { min: { x: MinKey }, max: { x: MaxKey } }, number: 3, of: 3, chunk: { min: { x: 15.0 }, max: { x: MaxKey }, lastmod: Timestamp 1000|3, lastmodEpoch: ObjectId('559eb770ca4787b9985d1d0b') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.609-0400 m31100| 2015-07-09T14:03:29.609-0400 I SHARDING [conn39] distributed lock 'db38.coll38/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.611-0400 m31100| 2015-07-09T14:03:29.610-0400 I COMMAND [conn39] command db38.coll38 command: splitChunk { splitChunk: "db38.coll38", keyPattern: { x: 1.0 }, min: { x: MinKey }, max: { x: MaxKey }, from: "test-rs0", splitKeys: [ { x: 1.0 }, { x: 15.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb770ca4787b9985d1d0b') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 4, w: 2 } }, Database: { acquireCount: { r: 1, w: 2 } }, Collection: { acquireCount: { r: 1, W: 2 }, acquireWaitCount: { W: 2 }, timeAcquiringMicros: { W: 20722 } } } protocol:op_command 187ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.611-0400 m30998| 2015-07-09T14:03:29.611-0400 I SHARDING [conn239] autosplitted db38.coll38 shard: ns: db38.coll38, shard: test-rs0, lastmod: 1|0||000000000000000000000000, min: { x: MinKey }, max: { x: MaxKey } into 3 (splitThreshold 921) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.757-0400 m30998| 2015-07-09T14:03:29.756-0400 I NETWORK [conn240] end connection 127.0.0.1:63427 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.811-0400 m30998| 2015-07-09T14:03:29.810-0400 I NETWORK [conn241] end connection 127.0.0.1:63430 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.823-0400 m30999| 2015-07-09T14:03:29.823-0400 I NETWORK [conn244] end connection 127.0.0.1:63436 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.843-0400 m30999| 2015-07-09T14:03:29.843-0400 I NETWORK [conn241] end connection 127.0.0.1:63428 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.858-0400 m30999| 2015-07-09T14:03:29.857-0400 I NETWORK [conn242] end connection 127.0.0.1:63429 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.865-0400 m30998| 2015-07-09T14:03:29.864-0400 I NETWORK [conn242] end connection 127.0.0.1:63431 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.871-0400 m30999| 2015-07-09T14:03:29.871-0400 I NETWORK [conn248] end connection 127.0.0.1:63441 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.876-0400 m30999| 2015-07-09T14:03:29.876-0400 I NETWORK [conn243] end connection 127.0.0.1:63435 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.880-0400 m30998| 2015-07-09T14:03:29.877-0400 I NETWORK [conn246] end connection 127.0.0.1:63438 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.894-0400 m30998| 2015-07-09T14:03:29.887-0400 I NETWORK [conn248] end connection 127.0.0.1:63444 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.894-0400 m30998| 2015-07-09T14:03:29.892-0400 I NETWORK [conn247] end connection 127.0.0.1:63443 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.894-0400 m30999| 2015-07-09T14:03:29.892-0400 I NETWORK [conn247] end connection 127.0.0.1:63440 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.928-0400 m30998| 2015-07-09T14:03:29.921-0400 I NETWORK [conn243] end connection 127.0.0.1:63432 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.928-0400 m30999| 2015-07-09T14:03:29.925-0400 I NETWORK [conn240] end connection 127.0.0.1:63426 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.958-0400 m30998| 2015-07-09T14:03:29.951-0400 I NETWORK [conn245] end connection 127.0.0.1:63434 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.971-0400 m30998| 2015-07-09T14:03:29.971-0400 I NETWORK [conn239] end connection 127.0.0.1:63425 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.971-0400 m30999| 2015-07-09T14:03:29.971-0400 I NETWORK [conn246] end connection 127.0.0.1:63439 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.981-0400 m30998| 2015-07-09T14:03:29.980-0400 I NETWORK [conn244] end connection 127.0.0.1:63433 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.996-0400 m30999| 2015-07-09T14:03:29.991-0400 I NETWORK [conn249] end connection 127.0.0.1:63442 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:29.997-0400 m30999| 2015-07-09T14:03:29.995-0400 I NETWORK [conn245] end connection 127.0.0.1:63437 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.015-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.016-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.016-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.016-0400 jstests/concurrency/fsm_workloads/indexed_insert_base.js: Workload completed in 955 ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.016-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.016-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.016-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.016-0400 m30999| 2015-07-09T14:03:30.016-0400 I COMMAND [conn1] DROP: db38.coll38 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.016-0400 m30999| 2015-07-09T14:03:30.016-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:30.016-0400-559eb772ca4787b9985d1d0d", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465010016), what: "dropCollection.start", ns: "db38.coll38", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.073-0400 m30999| 2015-07-09T14:03:30.073-0400 I SHARDING [conn1] distributed lock 'db38.coll38/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb772ca4787b9985d1d0e [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.074-0400 m31100| 2015-07-09T14:03:30.074-0400 I COMMAND [conn15] CMD: drop db38.coll38 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.076-0400 m31200| 2015-07-09T14:03:30.076-0400 I COMMAND [conn18] CMD: drop db38.coll38 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.078-0400 m31101| 2015-07-09T14:03:30.078-0400 I COMMAND [repl writer worker 11] CMD: drop db38.coll38 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.079-0400 m31202| 2015-07-09T14:03:30.078-0400 I COMMAND [repl writer worker 7] CMD: drop db38.coll38 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.079-0400 m31102| 2015-07-09T14:03:30.079-0400 I COMMAND [repl writer worker 13] CMD: drop db38.coll38 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.080-0400 m31201| 2015-07-09T14:03:30.080-0400 I COMMAND [repl writer worker 12] CMD: drop db38.coll38 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.131-0400 m31100| 2015-07-09T14:03:30.131-0400 I SHARDING [conn15] remotely refreshing metadata for db38.coll38 with requested shard version 0|0||000000000000000000000000, current shard version is 1|3||559eb770ca4787b9985d1d0b, current metadata version is 1|3||559eb770ca4787b9985d1d0b [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.133-0400 m31100| 2015-07-09T14:03:30.133-0400 W SHARDING [conn15] no chunks found when reloading db38.coll38, previous version was 0|0||559eb770ca4787b9985d1d0b, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.133-0400 m31100| 2015-07-09T14:03:30.133-0400 I SHARDING [conn15] dropping metadata for db38.coll38 at shard version 1|3||559eb770ca4787b9985d1d0b, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.135-0400 m30999| 2015-07-09T14:03:30.135-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:30.135-0400-559eb772ca4787b9985d1d0f", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465010135), what: "dropCollection", ns: "db38.coll38", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.188-0400 m30999| 2015-07-09T14:03:30.188-0400 I SHARDING [conn1] distributed lock 'db38.coll38/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.243-0400 m30999| 2015-07-09T14:03:30.243-0400 I COMMAND [conn1] DROP DATABASE: db38 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.243-0400 m30999| 2015-07-09T14:03:30.243-0400 I SHARDING [conn1] DBConfig::dropDatabase: db38 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.244-0400 m30999| 2015-07-09T14:03:30.243-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:30.243-0400-559eb772ca4787b9985d1d10", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465010243), what: "dropDatabase.start", ns: "db38", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.352-0400 m30999| 2015-07-09T14:03:30.352-0400 I SHARDING [conn1] DBConfig::dropDatabase: db38 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.353-0400 m31100| 2015-07-09T14:03:30.352-0400 I COMMAND [conn28] dropDatabase db38 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.353-0400 m31100| 2015-07-09T14:03:30.353-0400 I COMMAND [conn28] dropDatabase db38 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.354-0400 m30999| 2015-07-09T14:03:30.353-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:30.353-0400-559eb772ca4787b9985d1d11", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465010353), what: "dropDatabase", ns: "db38", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.354-0400 m31102| 2015-07-09T14:03:30.354-0400 I COMMAND [repl writer worker 14] dropDatabase db38 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.354-0400 m31101| 2015-07-09T14:03:30.354-0400 I COMMAND [repl writer worker 13] dropDatabase db38 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.355-0400 m31102| 2015-07-09T14:03:30.354-0400 I COMMAND [repl writer worker 14] dropDatabase db38 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.355-0400 m31101| 2015-07-09T14:03:30.354-0400 I COMMAND [repl writer worker 13] dropDatabase db38 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.437-0400 m31100| 2015-07-09T14:03:30.437-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.441-0400 m31102| 2015-07-09T14:03:30.440-0400 I COMMAND [repl writer worker 12] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.441-0400 m31101| 2015-07-09T14:03:30.440-0400 I COMMAND [repl writer worker 3] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.476-0400 m31200| 2015-07-09T14:03:30.476-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.479-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.479-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.479-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.480-0400 jstests/concurrency/fsm_workloads/create_collection.js [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.480-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.480-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.480-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.480-0400 m31202| 2015-07-09T14:03:30.479-0400 I COMMAND [repl writer worker 5] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.480-0400 m31201| 2015-07-09T14:03:30.479-0400 I COMMAND [repl writer worker 3] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.486-0400 m30999| 2015-07-09T14:03:30.486-0400 I SHARDING [conn1] distributed lock 'db39/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb772ca4787b9985d1d12 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.491-0400 m30999| 2015-07-09T14:03:30.490-0400 I SHARDING [conn1] Placing [db39] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.491-0400 m30999| 2015-07-09T14:03:30.490-0400 I SHARDING [conn1] Enabling sharding for database [db39] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.545-0400 m30999| 2015-07-09T14:03:30.545-0400 I SHARDING [conn1] distributed lock 'db39/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.567-0400 m31100| 2015-07-09T14:03:30.567-0400 I INDEX [conn68] build index on: db39.coll39 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db39.coll39" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.567-0400 m31100| 2015-07-09T14:03:30.567-0400 I INDEX [conn68] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.576-0400 m31100| 2015-07-09T14:03:30.576-0400 I INDEX [conn68] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.578-0400 m30999| 2015-07-09T14:03:30.577-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db39.coll39", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.580-0400 m30999| 2015-07-09T14:03:30.580-0400 I SHARDING [conn1] distributed lock 'db39.coll39/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb772ca4787b9985d1d13 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.581-0400 m30999| 2015-07-09T14:03:30.581-0400 I SHARDING [conn1] enable sharding on: db39.coll39 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.582-0400 m30999| 2015-07-09T14:03:30.581-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:30.581-0400-559eb772ca4787b9985d1d14", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465010581), what: "shardCollection.start", ns: "db39.coll39", details: { shardKey: { _id: "hashed" }, collection: "db39.coll39", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.586-0400 m31101| 2015-07-09T14:03:30.585-0400 I INDEX [repl writer worker 6] build index on: db39.coll39 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db39.coll39" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.586-0400 m31101| 2015-07-09T14:03:30.585-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.592-0400 m31101| 2015-07-09T14:03:30.591-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.594-0400 m31102| 2015-07-09T14:03:30.594-0400 I INDEX [repl writer worker 2] build index on: db39.coll39 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db39.coll39" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.594-0400 m31102| 2015-07-09T14:03:30.594-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.600-0400 m31102| 2015-07-09T14:03:30.600-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.634-0400 m30999| 2015-07-09T14:03:30.634-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db39.coll39 using new epoch 559eb772ca4787b9985d1d15 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.742-0400 m30999| 2015-07-09T14:03:30.741-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db39.coll39: 0ms sequenceNumber: 171 version: 1|1||559eb772ca4787b9985d1d15 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.796-0400 m30999| 2015-07-09T14:03:30.796-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db39.coll39: 0ms sequenceNumber: 172 version: 1|1||559eb772ca4787b9985d1d15 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.798-0400 m31100| 2015-07-09T14:03:30.798-0400 I SHARDING [conn45] remotely refreshing metadata for db39.coll39 with requested shard version 1|1||559eb772ca4787b9985d1d15, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.800-0400 m31100| 2015-07-09T14:03:30.799-0400 I SHARDING [conn45] collection db39.coll39 was previously unsharded, new metadata loaded with shard version 1|1||559eb772ca4787b9985d1d15 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.800-0400 m31100| 2015-07-09T14:03:30.799-0400 I SHARDING [conn45] collection version was loaded at version 1|1||559eb772ca4787b9985d1d15, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.800-0400 m30999| 2015-07-09T14:03:30.800-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:30.800-0400-559eb772ca4787b9985d1d16", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465010800), what: "shardCollection", ns: "db39.coll39", details: { version: "1|1||559eb772ca4787b9985d1d15" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.854-0400 m30999| 2015-07-09T14:03:30.854-0400 I SHARDING [conn1] distributed lock 'db39.coll39/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.855-0400 m30999| 2015-07-09T14:03:30.855-0400 I SHARDING [conn1] moving chunk ns: db39.coll39 moving ( ns: db39.coll39, shard: test-rs0, lastmod: 1|1||000000000000000000000000, min: { _id: 0 }, max: { _id: MaxKey }) test-rs0 -> test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.856-0400 m31100| 2015-07-09T14:03:30.855-0400 I SHARDING [conn15] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.856-0400 m31100| 2015-07-09T14:03:30.856-0400 I SHARDING [conn15] received moveChunk request: { moveChunk: "db39.coll39", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb772ca4787b9985d1d15') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.861-0400 m31100| 2015-07-09T14:03:30.861-0400 I SHARDING [conn15] distributed lock 'db39.coll39/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb772792e00bb672749b4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.862-0400 m31100| 2015-07-09T14:03:30.861-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:30.861-0400-559eb772792e00bb672749b5", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436465010861), what: "moveChunk.start", ns: "db39.coll39", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.915-0400 m31100| 2015-07-09T14:03:30.914-0400 I SHARDING [conn15] remotely refreshing metadata for db39.coll39 based on current shard version 1|1||559eb772ca4787b9985d1d15, current metadata version is 1|1||559eb772ca4787b9985d1d15 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.916-0400 m31100| 2015-07-09T14:03:30.916-0400 I SHARDING [conn15] metadata of collection db39.coll39 already up to date (shard version : 1|1||559eb772ca4787b9985d1d15, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.917-0400 m31100| 2015-07-09T14:03:30.916-0400 I SHARDING [conn15] moveChunk request accepted at version 1|1||559eb772ca4787b9985d1d15 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.917-0400 m31100| 2015-07-09T14:03:30.917-0400 I SHARDING [conn15] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.917-0400 m31200| 2015-07-09T14:03:30.917-0400 I SHARDING [conn16] remotely refreshing metadata for db39.coll39, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.919-0400 m31200| 2015-07-09T14:03:30.919-0400 I SHARDING [conn16] collection db39.coll39 was previously unsharded, new metadata loaded with shard version 0|0||559eb772ca4787b9985d1d15 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.919-0400 m31200| 2015-07-09T14:03:30.919-0400 I SHARDING [conn16] collection version was loaded at version 1|1||559eb772ca4787b9985d1d15, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.919-0400 m31200| 2015-07-09T14:03:30.919-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: 0 } -> { _id: MaxKey } for collection db39.coll39 from test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 at epoch 559eb772ca4787b9985d1d15 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.921-0400 m31100| 2015-07-09T14:03:30.921-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db39.coll39", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.925-0400 m31100| 2015-07-09T14:03:30.924-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db39.coll39", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.930-0400 m31100| 2015-07-09T14:03:30.929-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db39.coll39", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.937-0400 m31200| 2015-07-09T14:03:30.936-0400 I INDEX [migrateThread] build index on: db39.coll39 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db39.coll39" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.937-0400 m31200| 2015-07-09T14:03:30.936-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.940-0400 m31100| 2015-07-09T14:03:30.939-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db39.coll39", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.944-0400 m31200| 2015-07-09T14:03:30.943-0400 I INDEX [migrateThread] build index on: db39.coll39 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db39.coll39" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.944-0400 m31200| 2015-07-09T14:03:30.943-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.953-0400 m31200| 2015-07-09T14:03:30.953-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.954-0400 m31200| 2015-07-09T14:03:30.953-0400 I SHARDING [migrateThread] Deleter starting delete for: db39.coll39 from { _id: 0 } -> { _id: MaxKey }, with opId: 68331 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.954-0400 m31200| 2015-07-09T14:03:30.954-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db39.coll39 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.957-0400 m31100| 2015-07-09T14:03:30.957-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db39.coll39", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.960-0400 m31201| 2015-07-09T14:03:30.958-0400 I INDEX [repl writer worker 11] build index on: db39.coll39 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db39.coll39" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.960-0400 m31201| 2015-07-09T14:03:30.958-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.966-0400 m31201| 2015-07-09T14:03:30.965-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.968-0400 m31200| 2015-07-09T14:03:30.967-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.968-0400 m31200| 2015-07-09T14:03:30.967-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db39.coll39' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.969-0400 m31202| 2015-07-09T14:03:30.968-0400 I INDEX [repl writer worker 8] build index on: db39.coll39 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db39.coll39" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.969-0400 m31202| 2015-07-09T14:03:30.968-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.974-0400 m31202| 2015-07-09T14:03:30.973-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.991-0400 m31100| 2015-07-09T14:03:30.991-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db39.coll39", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.992-0400 m31100| 2015-07-09T14:03:30.991-0400 I SHARDING [conn15] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.992-0400 m31100| 2015-07-09T14:03:30.992-0400 I SHARDING [conn15] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:30.992-0400 m31100| 2015-07-09T14:03:30.992-0400 I SHARDING [conn15] moveChunk setting version to: 2|0||559eb772ca4787b9985d1d15 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:31.003-0400 m31200| 2015-07-09T14:03:31.002-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db39.coll39' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:31.003-0400 m31200| 2015-07-09T14:03:31.002-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:31.002-0400-559eb773d5a107a5b9c0db34", server: "bs-osx108-8", clientAddr: "", time: new Date(1436465011002), what: "moveChunk.to", ns: "db39.coll39", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 5: 34, step 2 of 5: 12, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 35, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:31.033-0400 m31100| 2015-07-09T14:03:31.032-0400 I SHARDING [conn15] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db39.coll39", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:31.033-0400 m31100| 2015-07-09T14:03:31.032-0400 I SHARDING [conn15] moveChunk updating self version to: 2|1||559eb772ca4787b9985d1d15 through { _id: MinKey } -> { _id: 0 } for collection 'db39.coll39' [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:31.045-0400 m31100| 2015-07-09T14:03:31.044-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:31.044-0400-559eb773792e00bb672749b6", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436465011044), what: "moveChunk.commit", ns: "db39.coll39", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:31.098-0400 m31100| 2015-07-09T14:03:31.098-0400 I SHARDING [conn15] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:31.098-0400 m31100| 2015-07-09T14:03:31.098-0400 I SHARDING [conn15] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:31.099-0400 m31100| 2015-07-09T14:03:31.098-0400 I SHARDING [conn15] Deleter starting delete for: db39.coll39 from { _id: 0 } -> { _id: MaxKey }, with opId: 69116 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:31.099-0400 m31100| 2015-07-09T14:03:31.098-0400 I SHARDING [conn15] rangeDeleter deleted 0 documents for db39.coll39 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:31.099-0400 m31100| 2015-07-09T14:03:31.098-0400 I SHARDING [conn15] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:31.100-0400 m31100| 2015-07-09T14:03:31.099-0400 I SHARDING [conn15] distributed lock 'db39.coll39/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:31.101-0400 m31100| 2015-07-09T14:03:31.100-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:31.100-0400-559eb773792e00bb672749b7", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436465011100), what: "moveChunk.from", ns: "db39.coll39", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 6: 0, step 2 of 6: 60, step 3 of 6: 3, step 4 of 6: 71, step 5 of 6: 106, step 6 of 6: 0, to: "test-rs1", from: "test-rs0", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:31.154-0400 m31100| 2015-07-09T14:03:31.153-0400 I COMMAND [conn15] command db39.coll39 command: moveChunk { moveChunk: "db39.coll39", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb772ca4787b9985d1d15') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 298ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:31.156-0400 m30999| 2015-07-09T14:03:31.155-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db39.coll39: 0ms sequenceNumber: 173 version: 2|1||559eb772ca4787b9985d1d15 based on: 1|1||559eb772ca4787b9985d1d15 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:31.157-0400 m31100| 2015-07-09T14:03:31.156-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db39.coll39", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb772ca4787b9985d1d15') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:31.160-0400 m31100| 2015-07-09T14:03:31.160-0400 I SHARDING [conn15] distributed lock 'db39.coll39/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb773792e00bb672749b8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:31.160-0400 m31100| 2015-07-09T14:03:31.160-0400 I SHARDING [conn15] remotely refreshing metadata for db39.coll39 based on current shard version 2|0||559eb772ca4787b9985d1d15, current metadata version is 2|0||559eb772ca4787b9985d1d15 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:31.162-0400 m31100| 2015-07-09T14:03:31.161-0400 I SHARDING [conn15] updating metadata for db39.coll39 from shard version 2|0||559eb772ca4787b9985d1d15 to shard version 2|1||559eb772ca4787b9985d1d15 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:31.162-0400 m31100| 2015-07-09T14:03:31.162-0400 I SHARDING [conn15] collection version was loaded at version 2|1||559eb772ca4787b9985d1d15, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:31.162-0400 m31100| 2015-07-09T14:03:31.162-0400 I SHARDING [conn15] splitChunk accepted at version 2|1||559eb772ca4787b9985d1d15 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:31.164-0400 m31100| 2015-07-09T14:03:31.163-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:31.163-0400-559eb773792e00bb672749b9", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436465011163), what: "split", ns: "db39.coll39", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559eb772ca4787b9985d1d15') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559eb772ca4787b9985d1d15') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:31.216-0400 m31100| 2015-07-09T14:03:31.216-0400 I SHARDING [conn15] distributed lock 'db39.coll39/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:31.218-0400 m30999| 2015-07-09T14:03:31.218-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db39.coll39: 0ms sequenceNumber: 174 version: 2|3||559eb772ca4787b9985d1d15 based on: 2|1||559eb772ca4787b9985d1d15 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:31.219-0400 m31200| 2015-07-09T14:03:31.218-0400 I SHARDING [conn18] received splitChunk request: { splitChunk: "db39.coll39", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb772ca4787b9985d1d15') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:31.224-0400 m31200| 2015-07-09T14:03:31.223-0400 I SHARDING [conn18] distributed lock 'db39.coll39/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb773d5a107a5b9c0db35 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:31.224-0400 m31200| 2015-07-09T14:03:31.223-0400 I SHARDING [conn18] remotely refreshing metadata for db39.coll39 based on current shard version 0|0||559eb772ca4787b9985d1d15, current metadata version is 1|1||559eb772ca4787b9985d1d15 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:31.225-0400 m31200| 2015-07-09T14:03:31.225-0400 I SHARDING [conn18] updating metadata for db39.coll39 from shard version 0|0||559eb772ca4787b9985d1d15 to shard version 2|0||559eb772ca4787b9985d1d15 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:31.226-0400 m31200| 2015-07-09T14:03:31.225-0400 I SHARDING [conn18] collection version was loaded at version 2|3||559eb772ca4787b9985d1d15, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:31.226-0400 m31200| 2015-07-09T14:03:31.225-0400 I SHARDING [conn18] splitChunk accepted at version 2|0||559eb772ca4787b9985d1d15 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:31.227-0400 m31200| 2015-07-09T14:03:31.226-0400 I SHARDING [conn18] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:31.226-0400-559eb773d5a107a5b9c0db36", server: "bs-osx108-8", clientAddr: "127.0.0.1:62585", time: new Date(1436465011226), what: "split", ns: "db39.coll39", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559eb772ca4787b9985d1d15') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559eb772ca4787b9985d1d15') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:31.281-0400 m31200| 2015-07-09T14:03:31.280-0400 I SHARDING [conn18] distributed lock 'db39.coll39/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:31.283-0400 m30999| 2015-07-09T14:03:31.282-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db39.coll39: 0ms sequenceNumber: 175 version: 2|5||559eb772ca4787b9985d1d15 based on: 2|3||559eb772ca4787b9985d1d15 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:31.283-0400 Using 5 threads (requested 5) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:31.328-0400 m30999| 2015-07-09T14:03:31.328-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63446 #250 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:31.338-0400 m30999| 2015-07-09T14:03:31.338-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63447 #251 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:31.338-0400 m30998| 2015-07-09T14:03:31.338-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63448 #249 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:31.340-0400 m30998| 2015-07-09T14:03:31.339-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63449 #250 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:31.349-0400 m30998| 2015-07-09T14:03:31.348-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63450 #251 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:31.355-0400 setting random seed: 5436479817144 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:31.355-0400 setting random seed: 916329296305 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:31.355-0400 setting random seed: 8188440660014 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:31.355-0400 setting random seed: 8335109679028 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:31.355-0400 setting random seed: 3122236952185 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:31.358-0400 m30998| 2015-07-09T14:03:31.357-0400 I SHARDING [conn249] ChunkManager: time to load chunks for db39.coll39: 0ms sequenceNumber: 48 version: 2|5||559eb772ca4787b9985d1d15 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:31.504-0400 m31100| 2015-07-09T14:03:31.503-0400 I COMMAND [conn49] command db39.create_collection4_1 command: create { create: "create_collection4_1" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 79350 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 109ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:31.524-0400 m31100| 2015-07-09T14:03:31.523-0400 I COMMAND [conn46] command db39.create_collection0_1 command: create { create: "create_collection0_1" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 92227 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 111ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:31.539-0400 m31100| 2015-07-09T14:03:31.538-0400 I COMMAND [conn48] command db39.create_collection2_1 command: create { create: "create_collection2_1" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 93085 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 107ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:31.560-0400 m31100| 2015-07-09T14:03:31.559-0400 I COMMAND [conn45] command db39.create_collection1_2 command: create { create: "create_collection1_2" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 94260 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 114ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:31.584-0400 m31100| 2015-07-09T14:03:31.583-0400 I COMMAND [conn56] command db39.create_collection3_2 command: create { create: "create_collection3_2" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 84939 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 109ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:31.979-0400 m31100| 2015-07-09T14:03:31.978-0400 I COMMAND [conn49] command db39.create_collection4_6 command: create { create: "create_collection4_6" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 84135 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 104ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:32.002-0400 m31100| 2015-07-09T14:03:32.001-0400 I COMMAND [conn46] command db39.create_collection0_6 command: create { create: "create_collection0_6" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 86747 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 109ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:32.026-0400 m31100| 2015-07-09T14:03:32.025-0400 I COMMAND [conn48] command db39.create_collection2_6 command: create { create: "create_collection2_6" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 92105 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 115ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:32.044-0400 m31100| 2015-07-09T14:03:32.043-0400 I COMMAND [conn45] command db39.create_collection1_7 command: create { create: "create_collection1_7" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 101205 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 119ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:32.063-0400 m31100| 2015-07-09T14:03:32.062-0400 I COMMAND [conn56] command db39.create_collection3_7 command: create { create: "create_collection3_7" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 85154 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 103ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:32.086-0400 m31100| 2015-07-09T14:03:32.084-0400 I COMMAND [conn49] command db39.create_collection4_7 command: create { create: "create_collection4_7" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 83161 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 104ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:32.151-0400 m31100| 2015-07-09T14:03:32.150-0400 I COMMAND [conn45] command db39.create_collection1_8 command: create { create: "create_collection1_8" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 79337 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 106ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:32.170-0400 m31100| 2015-07-09T14:03:32.169-0400 I COMMAND [conn56] command db39.create_collection3_8 command: create { create: "create_collection3_8" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 86455 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 105ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:32.292-0400 m31100| 2015-07-09T14:03:32.290-0400 I COMMAND [conn49] command db39.create_collection4_9 command: create { create: "create_collection4_9" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 78340 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 103ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:32.310-0400 m31100| 2015-07-09T14:03:32.309-0400 I COMMAND [conn46] command db39.create_collection0_9 command: create { create: "create_collection0_9" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 85691 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 104ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:32.328-0400 m31100| 2015-07-09T14:03:32.327-0400 I COMMAND [conn48] command db39.create_collection2_9 command: create { create: "create_collection2_9" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 83733 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 101ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:32.357-0400 m31100| 2015-07-09T14:03:32.356-0400 I COMMAND [conn45] command db39.create_collection1_10 command: create { create: "create_collection1_10" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 82049 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 111ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:32.375-0400 m31100| 2015-07-09T14:03:32.374-0400 I COMMAND [conn56] command db39.create_collection3_10 command: create { create: "create_collection3_10" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 90145 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 108ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:32.394-0400 m31100| 2015-07-09T14:03:32.393-0400 I COMMAND [conn49] command db39.create_collection4_10 command: create { create: "create_collection4_10" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 82995 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 101ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:32.431-0400 m31100| 2015-07-09T14:03:32.430-0400 I COMMAND [conn48] command db39.create_collection2_10 command: create { create: "create_collection2_10" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 80613 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 101ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:32.786-0400 m31100| 2015-07-09T14:03:32.785-0400 I COMMAND [conn46] command db39.create_collection0_14 command: create { create: "create_collection0_14" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 85630 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 110ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:32.803-0400 m31100| 2015-07-09T14:03:32.802-0400 I COMMAND [conn48] command db39.create_collection2_14 command: create { create: "create_collection2_14" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 92415 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 109ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:32.829-0400 m31100| 2015-07-09T14:03:32.829-0400 I COMMAND [conn45] command db39.create_collection1_15 command: create { create: "create_collection1_15" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 81719 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 108ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:32.852-0400 m31100| 2015-07-09T14:03:32.847-0400 I COMMAND [conn56] command db39.create_collection3_15 command: create { create: "create_collection3_15" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 89579 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 107ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.071-0400 m31100| 2015-07-09T14:03:32.879-0400 I COMMAND [conn49] command db39.create_collection4_15 command: create { create: "create_collection4_15" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 86869 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 118ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.072-0400 m31100| 2015-07-09T14:03:32.896-0400 I COMMAND [conn46] command db39.create_collection0_15 command: create { create: "create_collection0_15" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 92694 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 109ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.073-0400 m31100| 2015-07-09T14:03:32.935-0400 I COMMAND [conn48] command db39.create_collection2_15 command: create { create: "create_collection2_15" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 92305 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 131ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.073-0400 m31100| 2015-07-09T14:03:32.968-0400 I COMMAND [conn45] command db39.create_collection1_16 command: create { create: "create_collection1_16" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 105236 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 138ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.074-0400 m31100| 2015-07-09T14:03:32.989-0400 I COMMAND [conn56] command db39.create_collection3_16 command: create { create: "create_collection3_16" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 120044 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 140ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.074-0400 m31100| 2015-07-09T14:03:33.010-0400 I COMMAND [conn49] command db39.create_collection4_16 command: create { create: "create_collection4_16" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 108312 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 129ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.075-0400 m31100| 2015-07-09T14:03:33.032-0400 I COMMAND [conn46] command db39.create_collection0_16 command: create { create: "create_collection0_16" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 113441 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 135ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.075-0400 m31100| 2015-07-09T14:03:33.060-0400 I COMMAND [conn48] command db39.create_collection2_16 command: create { create: "create_collection2_16" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 95629 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 123ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.078-0400 m31100| 2015-07-09T14:03:33.077-0400 I COMMAND [conn45] command db39.create_collection1_17 command: create { create: "create_collection1_17" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 89400 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 105ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.094-0400 m31100| 2015-07-09T14:03:33.093-0400 I COMMAND [conn56] command db39.create_collection3_17 command: create { create: "create_collection3_17" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 86975 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 103ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.127-0400 m31100| 2015-07-09T14:03:33.126-0400 I COMMAND [conn49] command db39.create_collection4_17 command: create { create: "create_collection4_17" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 82030 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 114ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.148-0400 m31100| 2015-07-09T14:03:33.148-0400 I COMMAND [conn46] command db39.create_collection0_17 command: create { create: "create_collection0_17" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 92597 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 113ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.182-0400 m31100| 2015-07-09T14:03:33.181-0400 I COMMAND [conn45] command db39.create_collection1_18 command: create { create: "create_collection1_18" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 83869 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 102ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.187-0400 m30999| 2015-07-09T14:03:33.187-0400 I NETWORK [conn251] end connection 127.0.0.1:63447 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.198-0400 m31100| 2015-07-09T14:03:33.197-0400 I COMMAND [conn56] command db39.create_collection3_18 command: create { create: "create_collection3_18" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 86959 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 102ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.205-0400 m30999| 2015-07-09T14:03:33.205-0400 I NETWORK [conn250] end connection 127.0.0.1:63446 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.216-0400 m30998| 2015-07-09T14:03:33.216-0400 I NETWORK [conn249] end connection 127.0.0.1:63448 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.234-0400 m30998| 2015-07-09T14:03:33.233-0400 I NETWORK [conn250] end connection 127.0.0.1:63449 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.254-0400 m30998| 2015-07-09T14:03:33.253-0400 I NETWORK [conn251] end connection 127.0.0.1:63450 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.276-0400 m30999| 2015-07-09T14:03:33.276-0400 I COMMAND [conn1] DROP: db39.create_collection0_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.277-0400 m30999| 2015-07-09T14:03:33.276-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.277-0400 m31100| 2015-07-09T14:03:33.276-0400 I COMMAND [conn56] CMD: drop db39.create_collection0_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.282-0400 m30999| 2015-07-09T14:03:33.282-0400 I COMMAND [conn1] DROP: db39.create_collection0_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.282-0400 m30999| 2015-07-09T14:03:33.282-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.283-0400 m31100| 2015-07-09T14:03:33.282-0400 I COMMAND [conn56] CMD: drop db39.create_collection0_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.289-0400 m30999| 2015-07-09T14:03:33.288-0400 I COMMAND [conn1] DROP: db39.create_collection0_10 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.289-0400 m30999| 2015-07-09T14:03:33.288-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.289-0400 m31100| 2015-07-09T14:03:33.289-0400 I COMMAND [conn56] CMD: drop db39.create_collection0_10 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.291-0400 m30999| 2015-07-09T14:03:33.291-0400 I COMMAND [conn1] DROP: db39.create_collection0_11 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.292-0400 m30999| 2015-07-09T14:03:33.291-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.292-0400 m31100| 2015-07-09T14:03:33.291-0400 I COMMAND [conn56] CMD: drop db39.create_collection0_11 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.296-0400 m30999| 2015-07-09T14:03:33.295-0400 I COMMAND [conn1] DROP: db39.create_collection0_12 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.296-0400 m30999| 2015-07-09T14:03:33.295-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.296-0400 m31100| 2015-07-09T14:03:33.295-0400 I COMMAND [conn56] CMD: drop db39.create_collection0_12 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.298-0400 m30999| 2015-07-09T14:03:33.298-0400 I COMMAND [conn1] DROP: db39.create_collection0_13 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.299-0400 m30999| 2015-07-09T14:03:33.298-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.299-0400 m31100| 2015-07-09T14:03:33.299-0400 I COMMAND [conn56] CMD: drop db39.create_collection0_13 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.303-0400 m30999| 2015-07-09T14:03:33.302-0400 I COMMAND [conn1] DROP: db39.create_collection0_14 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.303-0400 m30999| 2015-07-09T14:03:33.302-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.303-0400 m31100| 2015-07-09T14:03:33.302-0400 I COMMAND [conn56] CMD: drop db39.create_collection0_14 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.310-0400 m30999| 2015-07-09T14:03:33.310-0400 I COMMAND [conn1] DROP: db39.create_collection0_15 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.310-0400 m30999| 2015-07-09T14:03:33.310-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.310-0400 m31100| 2015-07-09T14:03:33.310-0400 I COMMAND [conn56] CMD: drop db39.create_collection0_15 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.314-0400 m30999| 2015-07-09T14:03:33.314-0400 I COMMAND [conn1] DROP: db39.create_collection0_16 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.315-0400 m30999| 2015-07-09T14:03:33.314-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.315-0400 m31100| 2015-07-09T14:03:33.315-0400 I COMMAND [conn56] CMD: drop db39.create_collection0_16 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.318-0400 m30999| 2015-07-09T14:03:33.318-0400 I COMMAND [conn1] DROP: db39.create_collection0_17 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.319-0400 m30999| 2015-07-09T14:03:33.318-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.319-0400 m31100| 2015-07-09T14:03:33.318-0400 I COMMAND [conn56] CMD: drop db39.create_collection0_17 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.321-0400 m30999| 2015-07-09T14:03:33.321-0400 I COMMAND [conn1] DROP: db39.create_collection0_18 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.321-0400 m30999| 2015-07-09T14:03:33.321-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.321-0400 m31100| 2015-07-09T14:03:33.321-0400 I COMMAND [conn56] CMD: drop db39.create_collection0_18 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.329-0400 m30999| 2015-07-09T14:03:33.329-0400 I COMMAND [conn1] DROP: db39.create_collection0_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.329-0400 m30999| 2015-07-09T14:03:33.329-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.330-0400 m31100| 2015-07-09T14:03:33.330-0400 I COMMAND [conn56] CMD: drop db39.create_collection0_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.332-0400 m30999| 2015-07-09T14:03:33.332-0400 I COMMAND [conn1] DROP: db39.create_collection0_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.332-0400 m30999| 2015-07-09T14:03:33.332-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.332-0400 m31100| 2015-07-09T14:03:33.332-0400 I COMMAND [conn56] CMD: drop db39.create_collection0_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.341-0400 m30999| 2015-07-09T14:03:33.341-0400 I COMMAND [conn1] DROP: db39.create_collection0_4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.342-0400 m30999| 2015-07-09T14:03:33.341-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.342-0400 m31100| 2015-07-09T14:03:33.341-0400 I COMMAND [conn56] CMD: drop db39.create_collection0_4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.342-0400 m31102| 2015-07-09T14:03:33.341-0400 I COMMAND [repl writer worker 2] CMD: drop db39.create_collection0_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.345-0400 m30999| 2015-07-09T14:03:33.344-0400 I COMMAND [conn1] DROP: db39.create_collection0_5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.345-0400 m30999| 2015-07-09T14:03:33.344-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.345-0400 m31100| 2015-07-09T14:03:33.344-0400 I COMMAND [conn56] CMD: drop db39.create_collection0_5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.345-0400 m31102| 2015-07-09T14:03:33.345-0400 I COMMAND [repl writer worker 10] CMD: drop db39.create_collection0_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.347-0400 m30999| 2015-07-09T14:03:33.346-0400 I COMMAND [conn1] DROP: db39.create_collection0_6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.347-0400 m30999| 2015-07-09T14:03:33.346-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.348-0400 m31100| 2015-07-09T14:03:33.347-0400 I COMMAND [conn56] CMD: drop db39.create_collection0_6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.349-0400 m31102| 2015-07-09T14:03:33.348-0400 I COMMAND [repl writer worker 8] CMD: drop db39.create_collection0_10 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.350-0400 m30999| 2015-07-09T14:03:33.350-0400 I COMMAND [conn1] DROP: db39.create_collection0_7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.351-0400 m30999| 2015-07-09T14:03:33.350-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.351-0400 m31100| 2015-07-09T14:03:33.350-0400 I COMMAND [conn56] CMD: drop db39.create_collection0_7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.351-0400 m31102| 2015-07-09T14:03:33.351-0400 I COMMAND [repl writer worker 5] CMD: drop db39.create_collection0_11 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.352-0400 m30999| 2015-07-09T14:03:33.352-0400 I COMMAND [conn1] DROP: db39.create_collection0_8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.352-0400 m30999| 2015-07-09T14:03:33.352-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.353-0400 m31100| 2015-07-09T14:03:33.352-0400 I COMMAND [conn56] CMD: drop db39.create_collection0_8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.354-0400 m30999| 2015-07-09T14:03:33.354-0400 I COMMAND [conn1] DROP: db39.create_collection0_9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.354-0400 m30999| 2015-07-09T14:03:33.354-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.354-0400 m31102| 2015-07-09T14:03:33.354-0400 I COMMAND [repl writer worker 11] CMD: drop db39.create_collection0_12 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.355-0400 m31100| 2015-07-09T14:03:33.354-0400 I COMMAND [conn56] CMD: drop db39.create_collection0_9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.356-0400 m31102| 2015-07-09T14:03:33.356-0400 I COMMAND [repl writer worker 3] CMD: drop db39.create_collection0_13 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.357-0400 m30999| 2015-07-09T14:03:33.356-0400 I COMMAND [conn1] DROP: db39.create_collection1_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.357-0400 m30999| 2015-07-09T14:03:33.356-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.357-0400 m31100| 2015-07-09T14:03:33.357-0400 I COMMAND [conn56] CMD: drop db39.create_collection1_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.357-0400 m31101| 2015-07-09T14:03:33.357-0400 I COMMAND [repl writer worker 6] CMD: drop db39.create_collection0_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.359-0400 m31102| 2015-07-09T14:03:33.359-0400 I COMMAND [repl writer worker 4] CMD: drop db39.create_collection0_14 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.359-0400 m31101| 2015-07-09T14:03:33.359-0400 I COMMAND [repl writer worker 12] CMD: drop db39.create_collection0_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.360-0400 m30999| 2015-07-09T14:03:33.360-0400 I COMMAND [conn1] DROP: db39.create_collection1_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.361-0400 m30999| 2015-07-09T14:03:33.360-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.361-0400 m31102| 2015-07-09T14:03:33.360-0400 I COMMAND [repl writer worker 6] CMD: drop db39.create_collection0_15 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.361-0400 m31100| 2015-07-09T14:03:33.360-0400 I COMMAND [conn56] CMD: drop db39.create_collection1_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.361-0400 m31101| 2015-07-09T14:03:33.361-0400 I COMMAND [repl writer worker 9] CMD: drop db39.create_collection0_10 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.362-0400 m31102| 2015-07-09T14:03:33.362-0400 I COMMAND [repl writer worker 1] CMD: drop db39.create_collection0_16 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.362-0400 m30999| 2015-07-09T14:03:33.362-0400 I COMMAND [conn1] DROP: db39.create_collection1_10 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.362-0400 m30999| 2015-07-09T14:03:33.362-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.362-0400 m31100| 2015-07-09T14:03:33.362-0400 I COMMAND [conn56] CMD: drop db39.create_collection1_10 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.363-0400 m31101| 2015-07-09T14:03:33.363-0400 I COMMAND [repl writer worker 10] CMD: drop db39.create_collection0_11 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.364-0400 m31102| 2015-07-09T14:03:33.364-0400 I COMMAND [repl writer worker 7] CMD: drop db39.create_collection0_17 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.364-0400 m30999| 2015-07-09T14:03:33.364-0400 I COMMAND [conn1] DROP: db39.create_collection1_11 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.364-0400 m30999| 2015-07-09T14:03:33.364-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.364-0400 m31101| 2015-07-09T14:03:33.364-0400 I COMMAND [repl writer worker 5] CMD: drop db39.create_collection0_12 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.364-0400 m31100| 2015-07-09T14:03:33.364-0400 I COMMAND [conn56] CMD: drop db39.create_collection1_11 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.365-0400 m30999| 2015-07-09T14:03:33.365-0400 I COMMAND [conn1] DROP: db39.create_collection1_12 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.365-0400 m30999| 2015-07-09T14:03:33.365-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.366-0400 m31100| 2015-07-09T14:03:33.365-0400 I COMMAND [conn56] CMD: drop db39.create_collection1_12 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.366-0400 m31101| 2015-07-09T14:03:33.366-0400 I COMMAND [repl writer worker 7] CMD: drop db39.create_collection0_13 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.367-0400 m31102| 2015-07-09T14:03:33.366-0400 I COMMAND [repl writer worker 13] CMD: drop db39.create_collection0_18 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.367-0400 m31101| 2015-07-09T14:03:33.367-0400 I COMMAND [repl writer worker 8] CMD: drop db39.create_collection0_14 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.368-0400 m31102| 2015-07-09T14:03:33.368-0400 I COMMAND [repl writer worker 14] CMD: drop db39.create_collection0_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.369-0400 m30999| 2015-07-09T14:03:33.368-0400 I COMMAND [conn1] DROP: db39.create_collection1_13 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.369-0400 m30999| 2015-07-09T14:03:33.368-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.369-0400 m31101| 2015-07-09T14:03:33.368-0400 I COMMAND [repl writer worker 1] CMD: drop db39.create_collection0_15 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.369-0400 m31100| 2015-07-09T14:03:33.368-0400 I COMMAND [conn56] CMD: drop db39.create_collection1_13 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.370-0400 m31102| 2015-07-09T14:03:33.369-0400 I COMMAND [repl writer worker 9] CMD: drop db39.create_collection0_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.370-0400 m31101| 2015-07-09T14:03:33.370-0400 I COMMAND [repl writer worker 0] CMD: drop db39.create_collection0_16 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.371-0400 m30999| 2015-07-09T14:03:33.371-0400 I COMMAND [conn1] DROP: db39.create_collection1_14 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.371-0400 m30999| 2015-07-09T14:03:33.371-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.372-0400 m31101| 2015-07-09T14:03:33.371-0400 I COMMAND [repl writer worker 15] CMD: drop db39.create_collection0_17 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.372-0400 m31102| 2015-07-09T14:03:33.371-0400 I COMMAND [repl writer worker 0] CMD: drop db39.create_collection0_4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.373-0400 m31100| 2015-07-09T14:03:33.372-0400 I COMMAND [conn56] CMD: drop db39.create_collection1_14 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.373-0400 m31101| 2015-07-09T14:03:33.372-0400 I COMMAND [repl writer worker 11] CMD: drop db39.create_collection0_18 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.374-0400 m31102| 2015-07-09T14:03:33.374-0400 I COMMAND [repl writer worker 12] CMD: drop db39.create_collection0_5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.374-0400 m30999| 2015-07-09T14:03:33.374-0400 I COMMAND [conn1] DROP: db39.create_collection1_15 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.374-0400 m30999| 2015-07-09T14:03:33.374-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.374-0400 m31101| 2015-07-09T14:03:33.374-0400 I COMMAND [repl writer worker 13] CMD: drop db39.create_collection0_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.375-0400 m31100| 2015-07-09T14:03:33.374-0400 I COMMAND [conn56] CMD: drop db39.create_collection1_15 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.376-0400 m31102| 2015-07-09T14:03:33.376-0400 I COMMAND [repl writer worker 15] CMD: drop db39.create_collection0_6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.376-0400 m31101| 2015-07-09T14:03:33.376-0400 I COMMAND [repl writer worker 4] CMD: drop db39.create_collection0_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.377-0400 m30999| 2015-07-09T14:03:33.377-0400 I COMMAND [conn1] DROP: db39.create_collection1_16 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.377-0400 m30999| 2015-07-09T14:03:33.377-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.377-0400 m31100| 2015-07-09T14:03:33.377-0400 I COMMAND [conn56] CMD: drop db39.create_collection1_16 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.377-0400 m31102| 2015-07-09T14:03:33.377-0400 I COMMAND [repl writer worker 2] CMD: drop db39.create_collection0_7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.378-0400 m31101| 2015-07-09T14:03:33.377-0400 I COMMAND [repl writer worker 14] CMD: drop db39.create_collection0_4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.379-0400 m30999| 2015-07-09T14:03:33.378-0400 I COMMAND [conn1] DROP: db39.create_collection1_17 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.379-0400 m30999| 2015-07-09T14:03:33.379-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.379-0400 m31100| 2015-07-09T14:03:33.379-0400 I COMMAND [conn56] CMD: drop db39.create_collection1_17 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.379-0400 m31102| 2015-07-09T14:03:33.379-0400 I COMMAND [repl writer worker 10] CMD: drop db39.create_collection0_8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.380-0400 m31101| 2015-07-09T14:03:33.379-0400 I COMMAND [repl writer worker 3] CMD: drop db39.create_collection0_5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.381-0400 m31102| 2015-07-09T14:03:33.380-0400 I COMMAND [repl writer worker 8] CMD: drop db39.create_collection0_9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.381-0400 m30999| 2015-07-09T14:03:33.380-0400 I COMMAND [conn1] DROP: db39.create_collection1_18 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.381-0400 m30999| 2015-07-09T14:03:33.380-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.382-0400 m31100| 2015-07-09T14:03:33.381-0400 I COMMAND [conn56] CMD: drop db39.create_collection1_18 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.382-0400 m31101| 2015-07-09T14:03:33.381-0400 I COMMAND [repl writer worker 2] CMD: drop db39.create_collection0_6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.382-0400 m30999| 2015-07-09T14:03:33.382-0400 I COMMAND [conn1] DROP: db39.create_collection1_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.382-0400 m30999| 2015-07-09T14:03:33.382-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.383-0400 m31101| 2015-07-09T14:03:33.382-0400 I COMMAND [repl writer worker 6] CMD: drop db39.create_collection0_7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.383-0400 m31100| 2015-07-09T14:03:33.382-0400 I COMMAND [conn56] CMD: drop db39.create_collection1_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.383-0400 m31102| 2015-07-09T14:03:33.383-0400 I COMMAND [repl writer worker 5] CMD: drop db39.create_collection1_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.384-0400 m31101| 2015-07-09T14:03:33.384-0400 I COMMAND [repl writer worker 12] CMD: drop db39.create_collection0_8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.385-0400 m31102| 2015-07-09T14:03:33.385-0400 I COMMAND [repl writer worker 11] CMD: drop db39.create_collection1_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.386-0400 m31101| 2015-07-09T14:03:33.385-0400 I COMMAND [repl writer worker 9] CMD: drop db39.create_collection0_9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.386-0400 m30999| 2015-07-09T14:03:33.385-0400 I COMMAND [conn1] DROP: db39.create_collection1_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.386-0400 m30999| 2015-07-09T14:03:33.386-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.386-0400 m31100| 2015-07-09T14:03:33.386-0400 I COMMAND [conn56] CMD: drop db39.create_collection1_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.387-0400 m31102| 2015-07-09T14:03:33.387-0400 I COMMAND [repl writer worker 3] CMD: drop db39.create_collection1_10 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.387-0400 m30999| 2015-07-09T14:03:33.387-0400 I COMMAND [conn1] DROP: db39.create_collection1_4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.388-0400 m30999| 2015-07-09T14:03:33.387-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.388-0400 m31101| 2015-07-09T14:03:33.387-0400 I COMMAND [repl writer worker 10] CMD: drop db39.create_collection1_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.388-0400 m31100| 2015-07-09T14:03:33.388-0400 I COMMAND [conn56] CMD: drop db39.create_collection1_4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.389-0400 m31102| 2015-07-09T14:03:33.388-0400 I COMMAND [repl writer worker 4] CMD: drop db39.create_collection1_11 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.390-0400 m31101| 2015-07-09T14:03:33.389-0400 I COMMAND [repl writer worker 5] CMD: drop db39.create_collection1_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.390-0400 m30999| 2015-07-09T14:03:33.390-0400 I COMMAND [conn1] DROP: db39.create_collection1_5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.390-0400 m30999| 2015-07-09T14:03:33.390-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.390-0400 m31102| 2015-07-09T14:03:33.390-0400 I COMMAND [repl writer worker 6] CMD: drop db39.create_collection1_12 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.390-0400 m31100| 2015-07-09T14:03:33.390-0400 I COMMAND [conn56] CMD: drop db39.create_collection1_5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.391-0400 m31101| 2015-07-09T14:03:33.391-0400 I COMMAND [repl writer worker 7] CMD: drop db39.create_collection1_10 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.392-0400 m31102| 2015-07-09T14:03:33.391-0400 I COMMAND [repl writer worker 1] CMD: drop db39.create_collection1_13 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.393-0400 m31101| 2015-07-09T14:03:33.392-0400 I COMMAND [repl writer worker 8] CMD: drop db39.create_collection1_11 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.393-0400 m31102| 2015-07-09T14:03:33.393-0400 I COMMAND [repl writer worker 7] CMD: drop db39.create_collection1_14 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.394-0400 m31101| 2015-07-09T14:03:33.394-0400 I COMMAND [repl writer worker 1] CMD: drop db39.create_collection1_12 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.395-0400 m31102| 2015-07-09T14:03:33.394-0400 I COMMAND [repl writer worker 13] CMD: drop db39.create_collection1_15 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.395-0400 m31101| 2015-07-09T14:03:33.395-0400 I COMMAND [repl writer worker 0] CMD: drop db39.create_collection1_13 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.396-0400 m30999| 2015-07-09T14:03:33.395-0400 I COMMAND [conn1] DROP: db39.create_collection1_6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.396-0400 m30999| 2015-07-09T14:03:33.396-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.396-0400 m31100| 2015-07-09T14:03:33.396-0400 I COMMAND [conn56] CMD: drop db39.create_collection1_6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.396-0400 m31101| 2015-07-09T14:03:33.396-0400 I COMMAND [repl writer worker 15] CMD: drop db39.create_collection1_14 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.397-0400 m31102| 2015-07-09T14:03:33.397-0400 I COMMAND [repl writer worker 14] CMD: drop db39.create_collection1_16 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.398-0400 m31101| 2015-07-09T14:03:33.397-0400 I COMMAND [repl writer worker 11] CMD: drop db39.create_collection1_15 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.398-0400 m31102| 2015-07-09T14:03:33.398-0400 I COMMAND [repl writer worker 9] CMD: drop db39.create_collection1_17 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.399-0400 m31101| 2015-07-09T14:03:33.399-0400 I COMMAND [repl writer worker 13] CMD: drop db39.create_collection1_16 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.400-0400 m30999| 2015-07-09T14:03:33.399-0400 I COMMAND [conn1] DROP: db39.create_collection1_7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.400-0400 m30999| 2015-07-09T14:03:33.399-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.400-0400 m31102| 2015-07-09T14:03:33.399-0400 I COMMAND [repl writer worker 0] CMD: drop db39.create_collection1_18 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.400-0400 m31100| 2015-07-09T14:03:33.400-0400 I COMMAND [conn56] CMD: drop db39.create_collection1_7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.401-0400 m31101| 2015-07-09T14:03:33.400-0400 I COMMAND [repl writer worker 4] CMD: drop db39.create_collection1_17 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.401-0400 m31102| 2015-07-09T14:03:33.401-0400 I COMMAND [repl writer worker 12] CMD: drop db39.create_collection1_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.403-0400 m31101| 2015-07-09T14:03:33.402-0400 I COMMAND [repl writer worker 14] CMD: drop db39.create_collection1_18 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.403-0400 m31102| 2015-07-09T14:03:33.403-0400 I COMMAND [repl writer worker 15] CMD: drop db39.create_collection1_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.403-0400 m30999| 2015-07-09T14:03:33.403-0400 I COMMAND [conn1] DROP: db39.create_collection1_8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.404-0400 m30999| 2015-07-09T14:03:33.403-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.404-0400 m31100| 2015-07-09T14:03:33.403-0400 I COMMAND [conn56] CMD: drop db39.create_collection1_8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.404-0400 m31101| 2015-07-09T14:03:33.404-0400 I COMMAND [repl writer worker 3] CMD: drop db39.create_collection1_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.405-0400 m31102| 2015-07-09T14:03:33.404-0400 I COMMAND [repl writer worker 2] CMD: drop db39.create_collection1_4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.405-0400 m31101| 2015-07-09T14:03:33.405-0400 I COMMAND [repl writer worker 2] CMD: drop db39.create_collection1_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.406-0400 m30999| 2015-07-09T14:03:33.405-0400 I COMMAND [conn1] DROP: db39.create_collection1_9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.406-0400 m30999| 2015-07-09T14:03:33.405-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.406-0400 m31100| 2015-07-09T14:03:33.406-0400 I COMMAND [conn56] CMD: drop db39.create_collection1_9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.406-0400 m31102| 2015-07-09T14:03:33.406-0400 I COMMAND [repl writer worker 10] CMD: drop db39.create_collection1_5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.407-0400 m31101| 2015-07-09T14:03:33.406-0400 I COMMAND [repl writer worker 6] CMD: drop db39.create_collection1_4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.407-0400 m30999| 2015-07-09T14:03:33.407-0400 I COMMAND [conn1] DROP: db39.create_collection2_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.407-0400 m30999| 2015-07-09T14:03:33.407-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.408-0400 m31102| 2015-07-09T14:03:33.407-0400 I COMMAND [repl writer worker 8] CMD: drop db39.create_collection1_6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.408-0400 m31100| 2015-07-09T14:03:33.408-0400 I COMMAND [conn56] CMD: drop db39.create_collection2_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.409-0400 m31101| 2015-07-09T14:03:33.408-0400 I COMMAND [repl writer worker 12] CMD: drop db39.create_collection1_5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.409-0400 m30999| 2015-07-09T14:03:33.409-0400 I COMMAND [conn1] DROP: db39.create_collection2_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.409-0400 m30999| 2015-07-09T14:03:33.409-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.409-0400 m31102| 2015-07-09T14:03:33.409-0400 I COMMAND [repl writer worker 5] CMD: drop db39.create_collection1_7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.410-0400 m31100| 2015-07-09T14:03:33.409-0400 I COMMAND [conn56] CMD: drop db39.create_collection2_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.410-0400 m31101| 2015-07-09T14:03:33.409-0400 I COMMAND [repl writer worker 9] CMD: drop db39.create_collection1_6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.411-0400 m31102| 2015-07-09T14:03:33.410-0400 I COMMAND [repl writer worker 11] CMD: drop db39.create_collection1_8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.411-0400 m31101| 2015-07-09T14:03:33.411-0400 I COMMAND [repl writer worker 10] CMD: drop db39.create_collection1_7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.412-0400 m31102| 2015-07-09T14:03:33.411-0400 I COMMAND [repl writer worker 3] CMD: drop db39.create_collection1_9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.418-0400 m31101| 2015-07-09T14:03:33.412-0400 I COMMAND [repl writer worker 5] CMD: drop db39.create_collection1_8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.418-0400 m30999| 2015-07-09T14:03:33.416-0400 I COMMAND [conn1] DROP: db39.create_collection2_10 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.418-0400 m31100| 2015-07-09T14:03:33.416-0400 I COMMAND [conn56] CMD: drop db39.create_collection2_10 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.419-0400 m31102| 2015-07-09T14:03:33.413-0400 I COMMAND [repl writer worker 4] CMD: drop db39.create_collection2_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.419-0400 m31101| 2015-07-09T14:03:33.414-0400 I COMMAND [repl writer worker 7] CMD: drop db39.create_collection1_9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.419-0400 m30999| 2015-07-09T14:03:33.416-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.419-0400 m31102| 2015-07-09T14:03:33.414-0400 I COMMAND [repl writer worker 6] CMD: drop db39.create_collection2_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.419-0400 m31101| 2015-07-09T14:03:33.415-0400 I COMMAND [repl writer worker 8] CMD: drop db39.create_collection2_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.419-0400 m31101| 2015-07-09T14:03:33.416-0400 I COMMAND [repl writer worker 1] CMD: drop db39.create_collection2_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.420-0400 m30999| 2015-07-09T14:03:33.418-0400 I COMMAND [conn1] DROP: db39.create_collection2_11 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.420-0400 m30999| 2015-07-09T14:03:33.419-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.420-0400 m31100| 2015-07-09T14:03:33.419-0400 I COMMAND [conn56] CMD: drop db39.create_collection2_11 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.420-0400 m31102| 2015-07-09T14:03:33.419-0400 I COMMAND [repl writer worker 1] CMD: drop db39.create_collection2_10 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.421-0400 m31101| 2015-07-09T14:03:33.420-0400 I COMMAND [repl writer worker 0] CMD: drop db39.create_collection2_10 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.421-0400 m30999| 2015-07-09T14:03:33.420-0400 I COMMAND [conn1] DROP: db39.create_collection2_12 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.421-0400 m30999| 2015-07-09T14:03:33.420-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.421-0400 m31100| 2015-07-09T14:03:33.420-0400 I COMMAND [conn56] CMD: drop db39.create_collection2_12 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.422-0400 m30999| 2015-07-09T14:03:33.421-0400 I COMMAND [conn1] DROP: db39.create_collection2_13 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.422-0400 m30999| 2015-07-09T14:03:33.421-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.422-0400 m31100| 2015-07-09T14:03:33.422-0400 I COMMAND [conn56] CMD: drop db39.create_collection2_13 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.422-0400 m31102| 2015-07-09T14:03:33.422-0400 I COMMAND [repl writer worker 7] CMD: drop db39.create_collection2_11 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.422-0400 m31101| 2015-07-09T14:03:33.422-0400 I COMMAND [repl writer worker 15] CMD: drop db39.create_collection2_11 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.423-0400 m31102| 2015-07-09T14:03:33.423-0400 I COMMAND [repl writer worker 13] CMD: drop db39.create_collection2_12 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.424-0400 m31101| 2015-07-09T14:03:33.424-0400 I COMMAND [repl writer worker 11] CMD: drop db39.create_collection2_12 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.425-0400 m31102| 2015-07-09T14:03:33.424-0400 I COMMAND [repl writer worker 14] CMD: drop db39.create_collection2_13 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.425-0400 m30999| 2015-07-09T14:03:33.424-0400 I COMMAND [conn1] DROP: db39.create_collection2_14 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.425-0400 m30999| 2015-07-09T14:03:33.424-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.425-0400 m31100| 2015-07-09T14:03:33.425-0400 I COMMAND [conn56] CMD: drop db39.create_collection2_14 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.426-0400 m31101| 2015-07-09T14:03:33.426-0400 I COMMAND [repl writer worker 13] CMD: drop db39.create_collection2_13 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.429-0400 m30999| 2015-07-09T14:03:33.429-0400 I COMMAND [conn1] DROP: db39.create_collection2_15 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.429-0400 m30999| 2015-07-09T14:03:33.429-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.430-0400 m31100| 2015-07-09T14:03:33.429-0400 I COMMAND [conn56] CMD: drop db39.create_collection2_15 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.430-0400 m31102| 2015-07-09T14:03:33.430-0400 I COMMAND [repl writer worker 9] CMD: drop db39.create_collection2_14 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.431-0400 m31101| 2015-07-09T14:03:33.430-0400 I COMMAND [repl writer worker 4] CMD: drop db39.create_collection2_14 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.431-0400 m30999| 2015-07-09T14:03:33.431-0400 I COMMAND [conn1] DROP: db39.create_collection2_16 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.431-0400 m30999| 2015-07-09T14:03:33.431-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.432-0400 m31100| 2015-07-09T14:03:33.431-0400 I COMMAND [conn56] CMD: drop db39.create_collection2_16 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.433-0400 m31101| 2015-07-09T14:03:33.433-0400 I COMMAND [repl writer worker 14] CMD: drop db39.create_collection2_15 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.433-0400 m31102| 2015-07-09T14:03:33.433-0400 I COMMAND [repl writer worker 0] CMD: drop db39.create_collection2_15 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.434-0400 m30999| 2015-07-09T14:03:33.433-0400 I COMMAND [conn1] DROP: db39.create_collection2_17 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.434-0400 m30999| 2015-07-09T14:03:33.433-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.434-0400 m31100| 2015-07-09T14:03:33.434-0400 I COMMAND [conn56] CMD: drop db39.create_collection2_17 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.436-0400 m30999| 2015-07-09T14:03:33.435-0400 I COMMAND [conn1] DROP: db39.create_collection2_18 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.436-0400 m30999| 2015-07-09T14:03:33.435-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.436-0400 m31100| 2015-07-09T14:03:33.436-0400 I COMMAND [conn56] CMD: drop db39.create_collection2_18 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.436-0400 m31101| 2015-07-09T14:03:33.436-0400 I COMMAND [repl writer worker 3] CMD: drop db39.create_collection2_16 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.436-0400 m31102| 2015-07-09T14:03:33.436-0400 I COMMAND [repl writer worker 12] CMD: drop db39.create_collection2_16 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.437-0400 m31101| 2015-07-09T14:03:33.437-0400 I COMMAND [repl writer worker 2] CMD: drop db39.create_collection2_17 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.437-0400 m31102| 2015-07-09T14:03:33.437-0400 I COMMAND [repl writer worker 15] CMD: drop db39.create_collection2_17 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.439-0400 m30999| 2015-07-09T14:03:33.438-0400 I COMMAND [conn1] DROP: db39.create_collection2_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.439-0400 m30999| 2015-07-09T14:03:33.438-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.439-0400 m31100| 2015-07-09T14:03:33.438-0400 I COMMAND [conn56] CMD: drop db39.create_collection2_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.439-0400 m31101| 2015-07-09T14:03:33.439-0400 I COMMAND [repl writer worker 6] CMD: drop db39.create_collection2_18 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.439-0400 m31102| 2015-07-09T14:03:33.439-0400 I COMMAND [repl writer worker 2] CMD: drop db39.create_collection2_18 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.441-0400 m30999| 2015-07-09T14:03:33.441-0400 I COMMAND [conn1] DROP: db39.create_collection2_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.442-0400 m30999| 2015-07-09T14:03:33.441-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.442-0400 m31100| 2015-07-09T14:03:33.441-0400 I COMMAND [conn56] CMD: drop db39.create_collection2_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.442-0400 m31101| 2015-07-09T14:03:33.442-0400 I COMMAND [repl writer worker 12] CMD: drop db39.create_collection2_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.442-0400 m31102| 2015-07-09T14:03:33.442-0400 I COMMAND [repl writer worker 10] CMD: drop db39.create_collection2_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.443-0400 m30999| 2015-07-09T14:03:33.443-0400 I COMMAND [conn1] DROP: db39.create_collection2_4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.444-0400 m30999| 2015-07-09T14:03:33.443-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.444-0400 m31100| 2015-07-09T14:03:33.443-0400 I COMMAND [conn56] CMD: drop db39.create_collection2_4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.445-0400 m31102| 2015-07-09T14:03:33.444-0400 I COMMAND [repl writer worker 8] CMD: drop db39.create_collection2_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.445-0400 m31101| 2015-07-09T14:03:33.444-0400 I COMMAND [repl writer worker 9] CMD: drop db39.create_collection2_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.450-0400 m30999| 2015-07-09T14:03:33.449-0400 I COMMAND [conn1] DROP: db39.create_collection2_5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.450-0400 m30999| 2015-07-09T14:03:33.449-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.450-0400 m31100| 2015-07-09T14:03:33.450-0400 I COMMAND [conn56] CMD: drop db39.create_collection2_5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.451-0400 m31102| 2015-07-09T14:03:33.451-0400 I COMMAND [repl writer worker 5] CMD: drop db39.create_collection2_4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.451-0400 m31101| 2015-07-09T14:03:33.451-0400 I COMMAND [repl writer worker 10] CMD: drop db39.create_collection2_4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.452-0400 m30999| 2015-07-09T14:03:33.451-0400 I COMMAND [conn1] DROP: db39.create_collection2_6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.452-0400 m30999| 2015-07-09T14:03:33.451-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.452-0400 m31100| 2015-07-09T14:03:33.452-0400 I COMMAND [conn56] CMD: drop db39.create_collection2_6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.454-0400 m30999| 2015-07-09T14:03:33.453-0400 I COMMAND [conn1] DROP: db39.create_collection2_7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.454-0400 m30999| 2015-07-09T14:03:33.453-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.454-0400 m31100| 2015-07-09T14:03:33.453-0400 I COMMAND [conn56] CMD: drop db39.create_collection2_7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.455-0400 m31102| 2015-07-09T14:03:33.453-0400 I COMMAND [repl writer worker 11] CMD: drop db39.create_collection2_5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.455-0400 m31101| 2015-07-09T14:03:33.454-0400 I COMMAND [repl writer worker 5] CMD: drop db39.create_collection2_5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.455-0400 m31102| 2015-07-09T14:03:33.455-0400 I COMMAND [repl writer worker 3] CMD: drop db39.create_collection2_6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.456-0400 m31101| 2015-07-09T14:03:33.455-0400 I COMMAND [repl writer worker 7] CMD: drop db39.create_collection2_6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.456-0400 m31102| 2015-07-09T14:03:33.456-0400 I COMMAND [repl writer worker 4] CMD: drop db39.create_collection2_7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.456-0400 m30999| 2015-07-09T14:03:33.456-0400 I COMMAND [conn1] DROP: db39.create_collection2_8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.456-0400 m30999| 2015-07-09T14:03:33.456-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.456-0400 m31100| 2015-07-09T14:03:33.456-0400 I COMMAND [conn56] CMD: drop db39.create_collection2_8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.457-0400 m31101| 2015-07-09T14:03:33.457-0400 I COMMAND [repl writer worker 8] CMD: drop db39.create_collection2_7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.461-0400 m30999| 2015-07-09T14:03:33.461-0400 I COMMAND [conn1] DROP: db39.create_collection2_9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.461-0400 m30999| 2015-07-09T14:03:33.461-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.461-0400 m31100| 2015-07-09T14:03:33.461-0400 I COMMAND [conn56] CMD: drop db39.create_collection2_9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.462-0400 m31102| 2015-07-09T14:03:33.462-0400 I COMMAND [repl writer worker 6] CMD: drop db39.create_collection2_8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.462-0400 m31101| 2015-07-09T14:03:33.462-0400 I COMMAND [repl writer worker 1] CMD: drop db39.create_collection2_8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.463-0400 m30999| 2015-07-09T14:03:33.463-0400 I COMMAND [conn1] DROP: db39.create_collection3_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.463-0400 m30999| 2015-07-09T14:03:33.463-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.463-0400 m31100| 2015-07-09T14:03:33.463-0400 I COMMAND [conn56] CMD: drop db39.create_collection3_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.465-0400 m31101| 2015-07-09T14:03:33.465-0400 I COMMAND [repl writer worker 0] CMD: drop db39.create_collection2_9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.466-0400 m31102| 2015-07-09T14:03:33.465-0400 I COMMAND [repl writer worker 1] CMD: drop db39.create_collection2_9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.466-0400 m30999| 2015-07-09T14:03:33.465-0400 I COMMAND [conn1] DROP: db39.create_collection3_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.466-0400 m30999| 2015-07-09T14:03:33.465-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.466-0400 m31100| 2015-07-09T14:03:33.466-0400 I COMMAND [conn56] CMD: drop db39.create_collection3_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.466-0400 m31101| 2015-07-09T14:03:33.466-0400 I COMMAND [repl writer worker 15] CMD: drop db39.create_collection3_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.467-0400 m31102| 2015-07-09T14:03:33.467-0400 I COMMAND [repl writer worker 7] CMD: drop db39.create_collection3_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.468-0400 m31102| 2015-07-09T14:03:33.468-0400 I COMMAND [repl writer worker 13] CMD: drop db39.create_collection3_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.470-0400 m30999| 2015-07-09T14:03:33.469-0400 I COMMAND [conn1] DROP: db39.create_collection3_10 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.470-0400 m30999| 2015-07-09T14:03:33.470-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.470-0400 m31100| 2015-07-09T14:03:33.470-0400 I COMMAND [conn56] CMD: drop db39.create_collection3_10 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.470-0400 m31101| 2015-07-09T14:03:33.470-0400 I COMMAND [repl writer worker 11] CMD: drop db39.create_collection3_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.474-0400 m30999| 2015-07-09T14:03:33.474-0400 I COMMAND [conn1] DROP: db39.create_collection3_11 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.475-0400 m30999| 2015-07-09T14:03:33.474-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.475-0400 m31100| 2015-07-09T14:03:33.475-0400 I COMMAND [conn56] CMD: drop db39.create_collection3_11 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.476-0400 m31102| 2015-07-09T14:03:33.475-0400 I COMMAND [repl writer worker 14] CMD: drop db39.create_collection3_10 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.476-0400 m31101| 2015-07-09T14:03:33.476-0400 I COMMAND [repl writer worker 13] CMD: drop db39.create_collection3_10 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.477-0400 m30999| 2015-07-09T14:03:33.476-0400 I COMMAND [conn1] DROP: db39.create_collection3_12 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.477-0400 m30999| 2015-07-09T14:03:33.476-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.477-0400 m31100| 2015-07-09T14:03:33.477-0400 I COMMAND [conn56] CMD: drop db39.create_collection3_12 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.478-0400 m30999| 2015-07-09T14:03:33.478-0400 I COMMAND [conn1] DROP: db39.create_collection3_13 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.479-0400 m31102| 2015-07-09T14:03:33.478-0400 I COMMAND [repl writer worker 9] CMD: drop db39.create_collection3_11 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.479-0400 m30999| 2015-07-09T14:03:33.478-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.479-0400 m31100| 2015-07-09T14:03:33.479-0400 I COMMAND [conn56] CMD: drop db39.create_collection3_13 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.479-0400 m31101| 2015-07-09T14:03:33.479-0400 I COMMAND [repl writer worker 4] CMD: drop db39.create_collection3_11 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.480-0400 m31102| 2015-07-09T14:03:33.480-0400 I COMMAND [repl writer worker 0] CMD: drop db39.create_collection3_12 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.481-0400 m30999| 2015-07-09T14:03:33.481-0400 I COMMAND [conn1] DROP: db39.create_collection3_14 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.481-0400 m30999| 2015-07-09T14:03:33.481-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.481-0400 m31101| 2015-07-09T14:03:33.481-0400 I COMMAND [repl writer worker 14] CMD: drop db39.create_collection3_12 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.481-0400 m31102| 2015-07-09T14:03:33.481-0400 I COMMAND [repl writer worker 12] CMD: drop db39.create_collection3_13 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.482-0400 m31100| 2015-07-09T14:03:33.481-0400 I COMMAND [conn56] CMD: drop db39.create_collection3_14 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.483-0400 m31101| 2015-07-09T14:03:33.482-0400 I COMMAND [repl writer worker 3] CMD: drop db39.create_collection3_13 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.483-0400 m30999| 2015-07-09T14:03:33.483-0400 I COMMAND [conn1] DROP: db39.create_collection3_15 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.483-0400 m30999| 2015-07-09T14:03:33.483-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.483-0400 m31100| 2015-07-09T14:03:33.483-0400 I COMMAND [conn56] CMD: drop db39.create_collection3_15 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.484-0400 m31102| 2015-07-09T14:03:33.484-0400 I COMMAND [repl writer worker 15] CMD: drop db39.create_collection3_14 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.484-0400 m30999| 2015-07-09T14:03:33.484-0400 I COMMAND [conn1] DROP: db39.create_collection3_16 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.484-0400 m30999| 2015-07-09T14:03:33.484-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.484-0400 m31100| 2015-07-09T14:03:33.484-0400 I COMMAND [conn56] CMD: drop db39.create_collection3_16 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.485-0400 m31101| 2015-07-09T14:03:33.485-0400 I COMMAND [repl writer worker 2] CMD: drop db39.create_collection3_14 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.486-0400 m31102| 2015-07-09T14:03:33.486-0400 I COMMAND [repl writer worker 2] CMD: drop db39.create_collection3_15 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.487-0400 m31101| 2015-07-09T14:03:33.486-0400 I COMMAND [repl writer worker 6] CMD: drop db39.create_collection3_15 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.487-0400 m30999| 2015-07-09T14:03:33.486-0400 I COMMAND [conn1] DROP: db39.create_collection3_17 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.487-0400 m30999| 2015-07-09T14:03:33.486-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.487-0400 m31100| 2015-07-09T14:03:33.486-0400 I COMMAND [conn56] CMD: drop db39.create_collection3_17 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.488-0400 m30999| 2015-07-09T14:03:33.488-0400 I COMMAND [conn1] DROP: db39.create_collection3_18 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.488-0400 m30999| 2015-07-09T14:03:33.488-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.488-0400 m31101| 2015-07-09T14:03:33.488-0400 I COMMAND [repl writer worker 12] CMD: drop db39.create_collection3_16 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.488-0400 m31100| 2015-07-09T14:03:33.488-0400 I COMMAND [conn56] CMD: drop db39.create_collection3_18 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.489-0400 m31102| 2015-07-09T14:03:33.489-0400 I COMMAND [repl writer worker 10] CMD: drop db39.create_collection3_16 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.489-0400 m30999| 2015-07-09T14:03:33.489-0400 I COMMAND [conn1] DROP: db39.create_collection3_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.490-0400 m30999| 2015-07-09T14:03:33.489-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.490-0400 m31100| 2015-07-09T14:03:33.489-0400 I COMMAND [conn56] CMD: drop db39.create_collection3_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.490-0400 m31102| 2015-07-09T14:03:33.490-0400 I COMMAND [repl writer worker 8] CMD: drop db39.create_collection3_17 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.490-0400 m31101| 2015-07-09T14:03:33.490-0400 I COMMAND [repl writer worker 9] CMD: drop db39.create_collection3_17 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.491-0400 m30999| 2015-07-09T14:03:33.491-0400 I COMMAND [conn1] DROP: db39.create_collection3_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.491-0400 m30999| 2015-07-09T14:03:33.491-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.491-0400 m31100| 2015-07-09T14:03:33.491-0400 I COMMAND [conn56] CMD: drop db39.create_collection3_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.492-0400 m31101| 2015-07-09T14:03:33.492-0400 I COMMAND [repl writer worker 10] CMD: drop db39.create_collection3_18 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.492-0400 m31102| 2015-07-09T14:03:33.492-0400 I COMMAND [repl writer worker 5] CMD: drop db39.create_collection3_18 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.493-0400 m31101| 2015-07-09T14:03:33.493-0400 I COMMAND [repl writer worker 5] CMD: drop db39.create_collection3_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.493-0400 m31102| 2015-07-09T14:03:33.493-0400 I COMMAND [repl writer worker 11] CMD: drop db39.create_collection3_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.494-0400 m30999| 2015-07-09T14:03:33.494-0400 I COMMAND [conn1] DROP: db39.create_collection3_4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.494-0400 m30999| 2015-07-09T14:03:33.494-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.494-0400 m31100| 2015-07-09T14:03:33.494-0400 I COMMAND [conn56] CMD: drop db39.create_collection3_4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.495-0400 m31102| 2015-07-09T14:03:33.494-0400 I COMMAND [repl writer worker 3] CMD: drop db39.create_collection3_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.496-0400 m30999| 2015-07-09T14:03:33.496-0400 I COMMAND [conn1] DROP: db39.create_collection3_5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.496-0400 m30999| 2015-07-09T14:03:33.496-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.497-0400 m31100| 2015-07-09T14:03:33.496-0400 I COMMAND [conn56] CMD: drop db39.create_collection3_5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.497-0400 m31101| 2015-07-09T14:03:33.496-0400 I COMMAND [repl writer worker 7] CMD: drop db39.create_collection3_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.497-0400 m31102| 2015-07-09T14:03:33.496-0400 I COMMAND [repl writer worker 4] CMD: drop db39.create_collection3_4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.498-0400 m30999| 2015-07-09T14:03:33.497-0400 I COMMAND [conn1] DROP: db39.create_collection3_6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.498-0400 m30999| 2015-07-09T14:03:33.497-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.498-0400 m31100| 2015-07-09T14:03:33.497-0400 I COMMAND [conn56] CMD: drop db39.create_collection3_6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.498-0400 m31101| 2015-07-09T14:03:33.498-0400 I COMMAND [repl writer worker 8] CMD: drop db39.create_collection3_4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.499-0400 m31101| 2015-07-09T14:03:33.499-0400 I COMMAND [repl writer worker 1] CMD: drop db39.create_collection3_5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.500-0400 m31102| 2015-07-09T14:03:33.499-0400 I COMMAND [repl writer worker 6] CMD: drop db39.create_collection3_5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.500-0400 m30999| 2015-07-09T14:03:33.499-0400 I COMMAND [conn1] DROP: db39.create_collection3_7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.500-0400 m30999| 2015-07-09T14:03:33.499-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.500-0400 m31100| 2015-07-09T14:03:33.500-0400 I COMMAND [conn56] CMD: drop db39.create_collection3_7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.500-0400 m31102| 2015-07-09T14:03:33.500-0400 I COMMAND [repl writer worker 1] CMD: drop db39.create_collection3_6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.501-0400 m30999| 2015-07-09T14:03:33.501-0400 I COMMAND [conn1] DROP: db39.create_collection3_8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.501-0400 m30999| 2015-07-09T14:03:33.501-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.501-0400 m31100| 2015-07-09T14:03:33.501-0400 I COMMAND [conn56] CMD: drop db39.create_collection3_8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.502-0400 m31101| 2015-07-09T14:03:33.502-0400 I COMMAND [repl writer worker 0] CMD: drop db39.create_collection3_6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.502-0400 m31102| 2015-07-09T14:03:33.502-0400 I COMMAND [repl writer worker 7] CMD: drop db39.create_collection3_7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.503-0400 m31101| 2015-07-09T14:03:33.503-0400 I COMMAND [repl writer worker 15] CMD: drop db39.create_collection3_7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.504-0400 m31100| 2015-07-09T14:03:33.504-0400 I COMMAND [conn56] CMD: drop db39.create_collection3_9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.504-0400 m30999| 2015-07-09T14:03:33.504-0400 I COMMAND [conn1] DROP: db39.create_collection3_9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.505-0400 m30999| 2015-07-09T14:03:33.504-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.505-0400 m31102| 2015-07-09T14:03:33.504-0400 I COMMAND [repl writer worker 13] CMD: drop db39.create_collection3_8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.505-0400 m30999| 2015-07-09T14:03:33.505-0400 I COMMAND [conn1] DROP: db39.create_collection4_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.506-0400 m30999| 2015-07-09T14:03:33.505-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.506-0400 m31100| 2015-07-09T14:03:33.505-0400 I COMMAND [conn56] CMD: drop db39.create_collection4_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.506-0400 m31101| 2015-07-09T14:03:33.506-0400 I COMMAND [repl writer worker 11] CMD: drop db39.create_collection3_8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.507-0400 m31102| 2015-07-09T14:03:33.507-0400 I COMMAND [repl writer worker 14] CMD: drop db39.create_collection3_9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.507-0400 m31101| 2015-07-09T14:03:33.507-0400 I COMMAND [repl writer worker 13] CMD: drop db39.create_collection3_9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.508-0400 m30999| 2015-07-09T14:03:33.507-0400 I COMMAND [conn1] DROP: db39.create_collection4_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.508-0400 m30999| 2015-07-09T14:03:33.507-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.508-0400 m31100| 2015-07-09T14:03:33.508-0400 I COMMAND [conn56] CMD: drop db39.create_collection4_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.508-0400 m31101| 2015-07-09T14:03:33.508-0400 I COMMAND [repl writer worker 4] CMD: drop db39.create_collection4_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.509-0400 m30999| 2015-07-09T14:03:33.509-0400 I COMMAND [conn1] DROP: db39.create_collection4_10 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.510-0400 m30999| 2015-07-09T14:03:33.509-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.510-0400 m31102| 2015-07-09T14:03:33.509-0400 I COMMAND [repl writer worker 9] CMD: drop db39.create_collection4_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.510-0400 m31100| 2015-07-09T14:03:33.509-0400 I COMMAND [conn56] CMD: drop db39.create_collection4_10 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.511-0400 m31102| 2015-07-09T14:03:33.510-0400 I COMMAND [repl writer worker 0] CMD: drop db39.create_collection4_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.511-0400 m30999| 2015-07-09T14:03:33.511-0400 I COMMAND [conn1] DROP: db39.create_collection4_11 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.511-0400 m30999| 2015-07-09T14:03:33.511-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.511-0400 m31100| 2015-07-09T14:03:33.511-0400 I COMMAND [conn56] CMD: drop db39.create_collection4_11 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.511-0400 m31101| 2015-07-09T14:03:33.511-0400 I COMMAND [repl writer worker 14] CMD: drop db39.create_collection4_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.512-0400 m31102| 2015-07-09T14:03:33.512-0400 I COMMAND [repl writer worker 12] CMD: drop db39.create_collection4_10 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.513-0400 m30999| 2015-07-09T14:03:33.512-0400 I COMMAND [conn1] DROP: db39.create_collection4_12 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.513-0400 m30999| 2015-07-09T14:03:33.512-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.513-0400 m31100| 2015-07-09T14:03:33.512-0400 I COMMAND [conn56] CMD: drop db39.create_collection4_12 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.513-0400 m31101| 2015-07-09T14:03:33.513-0400 I COMMAND [repl writer worker 3] CMD: drop db39.create_collection4_10 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.514-0400 m31101| 2015-07-09T14:03:33.514-0400 I COMMAND [repl writer worker 2] CMD: drop db39.create_collection4_11 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.515-0400 m30999| 2015-07-09T14:03:33.514-0400 I COMMAND [conn1] DROP: db39.create_collection4_13 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.515-0400 m30999| 2015-07-09T14:03:33.514-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.515-0400 m31100| 2015-07-09T14:03:33.515-0400 I COMMAND [conn56] CMD: drop db39.create_collection4_13 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.515-0400 m31102| 2015-07-09T14:03:33.515-0400 I COMMAND [repl writer worker 15] CMD: drop db39.create_collection4_11 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.516-0400 m30999| 2015-07-09T14:03:33.516-0400 I COMMAND [conn1] DROP: db39.create_collection4_14 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.516-0400 m30999| 2015-07-09T14:03:33.516-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.516-0400 m31100| 2015-07-09T14:03:33.516-0400 I COMMAND [conn56] CMD: drop db39.create_collection4_14 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.517-0400 m31102| 2015-07-09T14:03:33.517-0400 I COMMAND [repl writer worker 2] CMD: drop db39.create_collection4_12 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.517-0400 m31101| 2015-07-09T14:03:33.517-0400 I COMMAND [repl writer worker 6] CMD: drop db39.create_collection4_12 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.518-0400 m30999| 2015-07-09T14:03:33.518-0400 I COMMAND [conn1] DROP: db39.create_collection4_15 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.518-0400 m30999| 2015-07-09T14:03:33.518-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.519-0400 m31100| 2015-07-09T14:03:33.518-0400 I COMMAND [conn56] CMD: drop db39.create_collection4_15 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.519-0400 m31102| 2015-07-09T14:03:33.518-0400 I COMMAND [repl writer worker 10] CMD: drop db39.create_collection4_13 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.519-0400 m31101| 2015-07-09T14:03:33.518-0400 I COMMAND [repl writer worker 12] CMD: drop db39.create_collection4_13 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.520-0400 m31101| 2015-07-09T14:03:33.519-0400 I COMMAND [repl writer worker 9] CMD: drop db39.create_collection4_14 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.520-0400 m31102| 2015-07-09T14:03:33.520-0400 I COMMAND [repl writer worker 8] CMD: drop db39.create_collection4_14 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.521-0400 m30999| 2015-07-09T14:03:33.521-0400 I COMMAND [conn1] DROP: db39.create_collection4_16 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.521-0400 m30999| 2015-07-09T14:03:33.521-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.521-0400 m31100| 2015-07-09T14:03:33.521-0400 I COMMAND [conn56] CMD: drop db39.create_collection4_16 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.523-0400 m30999| 2015-07-09T14:03:33.523-0400 I COMMAND [conn1] DROP: db39.create_collection4_17 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.524-0400 m30999| 2015-07-09T14:03:33.523-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.524-0400 m31101| 2015-07-09T14:03:33.523-0400 I COMMAND [repl writer worker 10] CMD: drop db39.create_collection4_15 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.524-0400 m31102| 2015-07-09T14:03:33.523-0400 I COMMAND [repl writer worker 5] CMD: drop db39.create_collection4_15 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.524-0400 m31100| 2015-07-09T14:03:33.523-0400 I COMMAND [conn56] CMD: drop db39.create_collection4_17 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.524-0400 m30999| 2015-07-09T14:03:33.524-0400 I COMMAND [conn1] DROP: db39.create_collection4_18 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.525-0400 m31100| 2015-07-09T14:03:33.524-0400 I COMMAND [conn56] CMD: drop db39.create_collection4_18 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.525-0400 m30999| 2015-07-09T14:03:33.524-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.525-0400 m31102| 2015-07-09T14:03:33.525-0400 I COMMAND [repl writer worker 11] CMD: drop db39.create_collection4_16 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.526-0400 m31101| 2015-07-09T14:03:33.526-0400 I COMMAND [repl writer worker 5] CMD: drop db39.create_collection4_16 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.526-0400 m30999| 2015-07-09T14:03:33.526-0400 I COMMAND [conn1] DROP: db39.create_collection4_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.527-0400 m30999| 2015-07-09T14:03:33.526-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.527-0400 m31102| 2015-07-09T14:03:33.526-0400 I COMMAND [repl writer worker 3] CMD: drop db39.create_collection4_17 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.527-0400 m31100| 2015-07-09T14:03:33.526-0400 I COMMAND [conn56] CMD: drop db39.create_collection4_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.527-0400 m31101| 2015-07-09T14:03:33.527-0400 I COMMAND [repl writer worker 7] CMD: drop db39.create_collection4_17 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.528-0400 m31102| 2015-07-09T14:03:33.527-0400 I COMMAND [repl writer worker 4] CMD: drop db39.create_collection4_18 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.528-0400 m31101| 2015-07-09T14:03:33.528-0400 I COMMAND [repl writer worker 8] CMD: drop db39.create_collection4_18 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.529-0400 m30999| 2015-07-09T14:03:33.528-0400 I COMMAND [conn1] DROP: db39.create_collection4_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.529-0400 m30999| 2015-07-09T14:03:33.528-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.529-0400 m31100| 2015-07-09T14:03:33.529-0400 I COMMAND [conn56] CMD: drop db39.create_collection4_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.530-0400 m31102| 2015-07-09T14:03:33.530-0400 I COMMAND [repl writer worker 6] CMD: drop db39.create_collection4_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.530-0400 m30999| 2015-07-09T14:03:33.530-0400 I COMMAND [conn1] DROP: db39.create_collection4_4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.530-0400 m30999| 2015-07-09T14:03:33.530-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.531-0400 m31101| 2015-07-09T14:03:33.530-0400 I COMMAND [repl writer worker 1] CMD: drop db39.create_collection4_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.531-0400 m31100| 2015-07-09T14:03:33.530-0400 I COMMAND [conn56] CMD: drop db39.create_collection4_4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.531-0400 m30999| 2015-07-09T14:03:33.531-0400 I COMMAND [conn1] DROP: db39.create_collection4_5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.531-0400 m31101| 2015-07-09T14:03:33.531-0400 I COMMAND [repl writer worker 0] CMD: drop db39.create_collection4_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.532-0400 m30999| 2015-07-09T14:03:33.531-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.532-0400 m31100| 2015-07-09T14:03:33.531-0400 I COMMAND [conn56] CMD: drop db39.create_collection4_5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.532-0400 m31102| 2015-07-09T14:03:33.532-0400 I COMMAND [repl writer worker 1] CMD: drop db39.create_collection4_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.533-0400 m31101| 2015-07-09T14:03:33.533-0400 I COMMAND [repl writer worker 15] CMD: drop db39.create_collection4_4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.533-0400 m31102| 2015-07-09T14:03:33.533-0400 I COMMAND [repl writer worker 7] CMD: drop db39.create_collection4_4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.533-0400 m30999| 2015-07-09T14:03:33.533-0400 I COMMAND [conn1] DROP: db39.create_collection4_6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.534-0400 m30999| 2015-07-09T14:03:33.533-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.534-0400 m31100| 2015-07-09T14:03:33.534-0400 I COMMAND [conn56] CMD: drop db39.create_collection4_6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.534-0400 m31101| 2015-07-09T14:03:33.534-0400 I COMMAND [repl writer worker 11] CMD: drop db39.create_collection4_5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.535-0400 m31102| 2015-07-09T14:03:33.535-0400 I COMMAND [repl writer worker 13] CMD: drop db39.create_collection4_5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.535-0400 m30999| 2015-07-09T14:03:33.535-0400 I COMMAND [conn1] DROP: db39.create_collection4_7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.536-0400 m30999| 2015-07-09T14:03:33.535-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.536-0400 m31100| 2015-07-09T14:03:33.535-0400 I COMMAND [conn56] CMD: drop db39.create_collection4_7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.537-0400 m31101| 2015-07-09T14:03:33.536-0400 I COMMAND [repl writer worker 13] CMD: drop db39.create_collection4_6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.537-0400 m30999| 2015-07-09T14:03:33.536-0400 I COMMAND [conn1] DROP: db39.create_collection4_8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.537-0400 m30999| 2015-07-09T14:03:33.536-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.537-0400 m31100| 2015-07-09T14:03:33.537-0400 I COMMAND [conn56] CMD: drop db39.create_collection4_8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.537-0400 m31102| 2015-07-09T14:03:33.537-0400 I COMMAND [repl writer worker 14] CMD: drop db39.create_collection4_6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.538-0400 m31101| 2015-07-09T14:03:33.537-0400 I COMMAND [repl writer worker 4] CMD: drop db39.create_collection4_7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.538-0400 m30999| 2015-07-09T14:03:33.538-0400 I COMMAND [conn1] DROP: db39.create_collection4_9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.539-0400 m30999| 2015-07-09T14:03:33.538-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.539-0400 m31102| 2015-07-09T14:03:33.538-0400 I COMMAND [repl writer worker 9] CMD: drop db39.create_collection4_7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.539-0400 m31100| 2015-07-09T14:03:33.538-0400 I COMMAND [conn56] CMD: drop db39.create_collection4_9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.539-0400 m31101| 2015-07-09T14:03:33.539-0400 I COMMAND [repl writer worker 14] CMD: drop db39.create_collection4_8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.540-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.540-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.540-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.540-0400 jstests/concurrency/fsm_workloads/create_collection.js: Workload completed in 1990 ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.540-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.540-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.540-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.540-0400 m30999| 2015-07-09T14:03:33.540-0400 I COMMAND [conn1] DROP: db39.coll39 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.541-0400 m30999| 2015-07-09T14:03:33.540-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:33.540-0400-559eb775ca4787b9985d1d17", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465013540), what: "dropCollection.start", ns: "db39.coll39", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.541-0400 m31102| 2015-07-09T14:03:33.540-0400 I COMMAND [repl writer worker 0] CMD: drop db39.create_collection4_8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.541-0400 m31102| 2015-07-09T14:03:33.541-0400 I COMMAND [repl writer worker 12] CMD: drop db39.create_collection4_9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.542-0400 m31101| 2015-07-09T14:03:33.542-0400 I COMMAND [repl writer worker 3] CMD: drop db39.create_collection4_9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.596-0400 m30999| 2015-07-09T14:03:33.596-0400 I SHARDING [conn1] distributed lock 'db39.coll39/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb775ca4787b9985d1d18 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.597-0400 m31100| 2015-07-09T14:03:33.597-0400 I COMMAND [conn15] CMD: drop db39.coll39 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.599-0400 m31200| 2015-07-09T14:03:33.599-0400 I COMMAND [conn18] CMD: drop db39.coll39 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.601-0400 m31102| 2015-07-09T14:03:33.600-0400 I COMMAND [repl writer worker 15] CMD: drop db39.coll39 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.601-0400 m31101| 2015-07-09T14:03:33.600-0400 I COMMAND [repl writer worker 2] CMD: drop db39.coll39 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.603-0400 m31201| 2015-07-09T14:03:33.603-0400 I COMMAND [repl writer worker 10] CMD: drop db39.coll39 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.604-0400 m31202| 2015-07-09T14:03:33.603-0400 I COMMAND [repl writer worker 2] CMD: drop db39.coll39 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.656-0400 m31100| 2015-07-09T14:03:33.656-0400 I SHARDING [conn15] remotely refreshing metadata for db39.coll39 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559eb772ca4787b9985d1d15, current metadata version is 2|3||559eb772ca4787b9985d1d15 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.658-0400 m31100| 2015-07-09T14:03:33.657-0400 W SHARDING [conn15] no chunks found when reloading db39.coll39, previous version was 0|0||559eb772ca4787b9985d1d15, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.658-0400 m31100| 2015-07-09T14:03:33.657-0400 I SHARDING [conn15] dropping metadata for db39.coll39 at shard version 2|3||559eb772ca4787b9985d1d15, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.659-0400 m31200| 2015-07-09T14:03:33.658-0400 I SHARDING [conn18] remotely refreshing metadata for db39.coll39 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559eb772ca4787b9985d1d15, current metadata version is 2|5||559eb772ca4787b9985d1d15 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.660-0400 m31200| 2015-07-09T14:03:33.660-0400 W SHARDING [conn18] no chunks found when reloading db39.coll39, previous version was 0|0||559eb772ca4787b9985d1d15, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.660-0400 m31200| 2015-07-09T14:03:33.660-0400 I SHARDING [conn18] dropping metadata for db39.coll39 at shard version 2|5||559eb772ca4787b9985d1d15, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.661-0400 m30999| 2015-07-09T14:03:33.661-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:33.661-0400-559eb775ca4787b9985d1d19", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465013661), what: "dropCollection", ns: "db39.coll39", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.715-0400 m30999| 2015-07-09T14:03:33.715-0400 I SHARDING [conn1] distributed lock 'db39.coll39/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.771-0400 m30999| 2015-07-09T14:03:33.771-0400 I COMMAND [conn1] DROP DATABASE: db39 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.772-0400 m30999| 2015-07-09T14:03:33.771-0400 I SHARDING [conn1] DBConfig::dropDatabase: db39 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.772-0400 m30999| 2015-07-09T14:03:33.771-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:33.771-0400-559eb775ca4787b9985d1d1a", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465013771), what: "dropDatabase.start", ns: "db39", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.877-0400 m30999| 2015-07-09T14:03:33.877-0400 I SHARDING [conn1] DBConfig::dropDatabase: db39 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.877-0400 m31100| 2015-07-09T14:03:33.877-0400 I COMMAND [conn28] dropDatabase db39 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.878-0400 m31100| 2015-07-09T14:03:33.877-0400 I COMMAND [conn28] dropDatabase db39 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.878-0400 m30999| 2015-07-09T14:03:33.878-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:33.878-0400-559eb775ca4787b9985d1d1b", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465013878), what: "dropDatabase", ns: "db39", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.878-0400 m31101| 2015-07-09T14:03:33.878-0400 I COMMAND [repl writer worker 6] dropDatabase db39 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.878-0400 m31101| 2015-07-09T14:03:33.878-0400 I COMMAND [repl writer worker 6] dropDatabase db39 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.879-0400 m31102| 2015-07-09T14:03:33.878-0400 I COMMAND [repl writer worker 2] dropDatabase db39 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.879-0400 m31102| 2015-07-09T14:03:33.878-0400 I COMMAND [repl writer worker 2] dropDatabase db39 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.968-0400 m31100| 2015-07-09T14:03:33.968-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.972-0400 m31102| 2015-07-09T14:03:33.972-0400 I COMMAND [repl writer worker 5] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:33.972-0400 m31101| 2015-07-09T14:03:33.972-0400 I COMMAND [repl writer worker 10] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.007-0400 m31200| 2015-07-09T14:03:34.006-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.010-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.010-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.010-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.010-0400 jstests/concurrency/fsm_workloads/indexed_insert_where.js [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.010-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.010-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.010-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.010-0400 m31201| 2015-07-09T14:03:34.010-0400 I COMMAND [repl writer worker 7] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.011-0400 m31202| 2015-07-09T14:03:34.010-0400 I COMMAND [repl writer worker 3] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.018-0400 m30999| 2015-07-09T14:03:34.018-0400 I SHARDING [conn1] distributed lock 'db40/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb776ca4787b9985d1d1c [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.022-0400 m30999| 2015-07-09T14:03:34.021-0400 I SHARDING [conn1] Placing [db40] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.022-0400 m30999| 2015-07-09T14:03:34.021-0400 I SHARDING [conn1] Enabling sharding for database [db40] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.076-0400 m30999| 2015-07-09T14:03:34.076-0400 I SHARDING [conn1] distributed lock 'db40/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.099-0400 m31100| 2015-07-09T14:03:34.098-0400 I INDEX [conn68] build index on: db40.coll40 properties: { v: 1, key: { tid: 1.0 }, name: "tid_1", ns: "db40.coll40" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.100-0400 m31100| 2015-07-09T14:03:34.098-0400 I INDEX [conn68] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.115-0400 m31100| 2015-07-09T14:03:34.115-0400 I INDEX [conn68] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.117-0400 m30999| 2015-07-09T14:03:34.117-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db40.coll40", key: { tid: 1.0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.121-0400 m30999| 2015-07-09T14:03:34.121-0400 I SHARDING [conn1] distributed lock 'db40.coll40/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb776ca4787b9985d1d1d [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.123-0400 m30999| 2015-07-09T14:03:34.122-0400 I SHARDING [conn1] enable sharding on: db40.coll40 with shard key: { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.124-0400 m30999| 2015-07-09T14:03:34.123-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:34.123-0400-559eb776ca4787b9985d1d1e", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465014123), what: "shardCollection.start", ns: "db40.coll40", details: { shardKey: { tid: 1.0 }, collection: "db40.coll40", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 1 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.137-0400 m31101| 2015-07-09T14:03:34.136-0400 I INDEX [repl writer worker 7] build index on: db40.coll40 properties: { v: 1, key: { tid: 1.0 }, name: "tid_1", ns: "db40.coll40" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.137-0400 m31101| 2015-07-09T14:03:34.136-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.139-0400 m31102| 2015-07-09T14:03:34.138-0400 I INDEX [repl writer worker 3] build index on: db40.coll40 properties: { v: 1, key: { tid: 1.0 }, name: "tid_1", ns: "db40.coll40" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.139-0400 m31102| 2015-07-09T14:03:34.139-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.148-0400 m31102| 2015-07-09T14:03:34.148-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.155-0400 m31101| 2015-07-09T14:03:34.154-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.176-0400 m30999| 2015-07-09T14:03:34.176-0400 I SHARDING [conn1] going to create 1 chunk(s) for: db40.coll40 using new epoch 559eb776ca4787b9985d1d1f [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.231-0400 m30999| 2015-07-09T14:03:34.231-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db40.coll40: 1ms sequenceNumber: 176 version: 1|0||559eb776ca4787b9985d1d1f based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.286-0400 m30999| 2015-07-09T14:03:34.285-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db40.coll40: 0ms sequenceNumber: 177 version: 1|0||559eb776ca4787b9985d1d1f based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.288-0400 m31100| 2015-07-09T14:03:34.287-0400 I SHARDING [conn56] remotely refreshing metadata for db40.coll40 with requested shard version 1|0||559eb776ca4787b9985d1d1f, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.290-0400 m31100| 2015-07-09T14:03:34.289-0400 I SHARDING [conn56] collection db40.coll40 was previously unsharded, new metadata loaded with shard version 1|0||559eb776ca4787b9985d1d1f [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.290-0400 m31100| 2015-07-09T14:03:34.289-0400 I SHARDING [conn56] collection version was loaded at version 1|0||559eb776ca4787b9985d1d1f, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.290-0400 m30999| 2015-07-09T14:03:34.290-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:34.290-0400-559eb776ca4787b9985d1d20", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465014290), what: "shardCollection", ns: "db40.coll40", details: { version: "1|0||559eb776ca4787b9985d1d1f" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.346-0400 m30999| 2015-07-09T14:03:34.345-0400 I SHARDING [conn1] distributed lock 'db40.coll40/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.366-0400 m31200| 2015-07-09T14:03:34.364-0400 I INDEX [conn39] build index on: db40.coll40 properties: { v: 1, key: { tid: 1.0 }, name: "tid_1", ns: "db40.coll40" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.366-0400 m31200| 2015-07-09T14:03:34.364-0400 I INDEX [conn39] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.372-0400 m31200| 2015-07-09T14:03:34.372-0400 I INDEX [conn39] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.374-0400 Using 10 threads (requested 10) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.491-0400 m31202| 2015-07-09T14:03:34.490-0400 I INDEX [repl writer worker 11] build index on: db40.coll40 properties: { v: 1, key: { tid: 1.0 }, name: "tid_1", ns: "db40.coll40" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.493-0400 m31202| 2015-07-09T14:03:34.490-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.493-0400 m30999| 2015-07-09T14:03:34.491-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63452 #252 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.493-0400 m30999| 2015-07-09T14:03:34.492-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63453 #253 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.494-0400 m30998| 2015-07-09T14:03:34.492-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63455 #252 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.494-0400 m30999| 2015-07-09T14:03:34.493-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63454 #254 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.495-0400 m30999| 2015-07-09T14:03:34.493-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63458 #255 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.498-0400 m31201| 2015-07-09T14:03:34.498-0400 I INDEX [repl writer worker 1] build index on: db40.coll40 properties: { v: 1, key: { tid: 1.0 }, name: "tid_1", ns: "db40.coll40" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.498-0400 m31201| 2015-07-09T14:03:34.498-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.503-0400 m30998| 2015-07-09T14:03:34.502-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63457 #253 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.508-0400 m30998| 2015-07-09T14:03:34.505-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63456 #254 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.508-0400 m30998| 2015-07-09T14:03:34.505-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63459 #255 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.510-0400 m30999| 2015-07-09T14:03:34.510-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:03:34.504-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30999:1436464534:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.515-0400 m31202| 2015-07-09T14:03:34.515-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.516-0400 m31201| 2015-07-09T14:03:34.515-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.516-0400 m30998| 2015-07-09T14:03:34.515-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63460 #256 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.522-0400 m30999| 2015-07-09T14:03:34.522-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63461 #256 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.528-0400 setting random seed: 234988769516 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.528-0400 setting random seed: 8271450158208 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.528-0400 setting random seed: 746490182355 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.530-0400 setting random seed: 1212244150228 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.536-0400 setting random seed: 6260500680655 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.539-0400 setting random seed: 9456251095980 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.539-0400 setting random seed: 4187442380934 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.540-0400 setting random seed: 8262873743660 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.543-0400 setting random seed: 8344129142351 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.545-0400 setting random seed: 9778560800477 [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.546-0400 m30998| 2015-07-09T14:03:34.546-0400 I SHARDING [conn252] ChunkManager: time to load chunks for db40.coll40: 0ms sequenceNumber: 49 version: 1|0||559eb776ca4787b9985d1d1f based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.631-0400 m31100| 2015-07-09T14:03:34.630-0400 I SHARDING [conn39] request split points lookup for chunk db40.coll40 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.633-0400 m31100| 2015-07-09T14:03:34.632-0400 W SHARDING [conn39] possible low cardinality key detected in db40.coll40 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.633-0400 m31100| 2015-07-09T14:03:34.632-0400 W SHARDING [conn39] possible low cardinality key detected in db40.coll40 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.634-0400 m31100| 2015-07-09T14:03:34.632-0400 W SHARDING [conn39] possible low cardinality key detected in db40.coll40 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.634-0400 m31100| 2015-07-09T14:03:34.632-0400 W SHARDING [conn39] possible low cardinality key detected in db40.coll40 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.634-0400 m31100| 2015-07-09T14:03:34.632-0400 W SHARDING [conn39] possible low cardinality key detected in db40.coll40 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.650-0400 m31100| 2015-07-09T14:03:34.632-0400 W SHARDING [conn39] possible low cardinality key detected in db40.coll40 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.650-0400 m31100| 2015-07-09T14:03:34.632-0400 W SHARDING [conn39] possible low cardinality key detected in db40.coll40 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.655-0400 m31100| 2015-07-09T14:03:34.632-0400 W SHARDING [conn39] possible low cardinality key detected in db40.coll40 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.655-0400 m31100| 2015-07-09T14:03:34.632-0400 W SHARDING [conn39] possible low cardinality key detected in db40.coll40 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.655-0400 m31100| 2015-07-09T14:03:34.632-0400 W SHARDING [conn39] possible low cardinality key detected in db40.coll40 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.655-0400 m31100| 2015-07-09T14:03:34.632-0400 I SHARDING [conn36] request split points lookup for chunk db40.coll40 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.656-0400 m31100| 2015-07-09T14:03:34.633-0400 I SHARDING [conn39] received splitChunk request: { splitChunk: "db40.coll40", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb776ca4787b9985d1d1f') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.656-0400 m31100| 2015-07-09T14:03:34.634-0400 I SHARDING [conn36] received splitChunk request: { splitChunk: "db40.coll40", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb776ca4787b9985d1d1f') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.656-0400 m31100| 2015-07-09T14:03:34.636-0400 I SHARDING [conn15] request split points lookup for chunk db40.coll40 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.656-0400 m31100| 2015-07-09T14:03:34.637-0400 W SHARDING [conn15] possible low cardinality key detected in db40.coll40 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.656-0400 m31100| 2015-07-09T14:03:34.637-0400 W SHARDING [conn15] possible low cardinality key detected in db40.coll40 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.657-0400 m31100| 2015-07-09T14:03:34.637-0400 W SHARDING [conn15] possible low cardinality key detected in db40.coll40 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.657-0400 m31100| 2015-07-09T14:03:34.637-0400 W SHARDING [conn15] possible low cardinality key detected in db40.coll40 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.657-0400 m31100| 2015-07-09T14:03:34.637-0400 W SHARDING [conn15] possible low cardinality key detected in db40.coll40 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.657-0400 m31100| 2015-07-09T14:03:34.637-0400 W SHARDING [conn15] possible low cardinality key detected in db40.coll40 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.657-0400 m31100| 2015-07-09T14:03:34.637-0400 W SHARDING [conn15] possible low cardinality key detected in db40.coll40 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.657-0400 m31100| 2015-07-09T14:03:34.637-0400 W SHARDING [conn15] possible low cardinality key detected in db40.coll40 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.657-0400 m31100| 2015-07-09T14:03:34.637-0400 W SHARDING [conn15] possible low cardinality key detected in db40.coll40 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.658-0400 m31100| 2015-07-09T14:03:34.637-0400 W SHARDING [conn15] possible low cardinality key detected in db40.coll40 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.658-0400 m31100| 2015-07-09T14:03:34.637-0400 I SHARDING [conn40] request split points lookup for chunk db40.coll40 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.658-0400 m31100| 2015-07-09T14:03:34.638-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db40.coll40", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb776ca4787b9985d1d1f') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.658-0400 m31100| 2015-07-09T14:03:34.639-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db40.coll40", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb776ca4787b9985d1d1f') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.659-0400 m31100| 2015-07-09T14:03:34.640-0400 W SHARDING [conn40] could not acquire collection lock for db40.coll40 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db40.coll40 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.659-0400 m31100| 2015-07-09T14:03:34.645-0400 I SHARDING [conn132] request split points lookup for chunk db40.coll40 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.659-0400 m30999| 2015-07-09T14:03:34.640-0400 W SHARDING [conn255] splitChunk failed - cmd: { splitChunk: "db40.coll40", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb776ca4787b9985d1d1f') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db40.coll40 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.661-0400 m31100| 2015-07-09T14:03:34.646-0400 W SHARDING [conn36] could not acquire collection lock for db40.coll40 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db40.coll40 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.661-0400 m30998| 2015-07-09T14:03:34.646-0400 W SHARDING [conn252] splitChunk failed - cmd: { splitChunk: "db40.coll40", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb776ca4787b9985d1d1f') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db40.coll40 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.661-0400 m31100| 2015-07-09T14:03:34.646-0400 I SHARDING [conn39] distributed lock 'db40.coll40/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb776792e00bb672749bb [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.661-0400 m31100| 2015-07-09T14:03:34.646-0400 I SHARDING [conn39] remotely refreshing metadata for db40.coll40 based on current shard version 1|0||559eb776ca4787b9985d1d1f, current metadata version is 1|0||559eb776ca4787b9985d1d1f [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.662-0400 m31100| 2015-07-09T14:03:34.647-0400 W SHARDING [conn15] could not acquire collection lock for db40.coll40 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db40.coll40 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.662-0400 m31100| 2015-07-09T14:03:34.647-0400 I SHARDING [conn132] received splitChunk request: { splitChunk: "db40.coll40", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb776ca4787b9985d1d1f') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.662-0400 m30999| 2015-07-09T14:03:34.647-0400 W SHARDING [conn254] splitChunk failed - cmd: { splitChunk: "db40.coll40", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb776ca4787b9985d1d1f') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db40.coll40 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.663-0400 m31100| 2015-07-09T14:03:34.648-0400 W SHARDING [conn132] could not acquire collection lock for db40.coll40 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db40.coll40 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.663-0400 m30998| 2015-07-09T14:03:34.648-0400 W SHARDING [conn256] splitChunk failed - cmd: { splitChunk: "db40.coll40", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb776ca4787b9985d1d1f') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db40.coll40 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.673-0400 m31100| 2015-07-09T14:03:34.672-0400 I SHARDING [conn39] metadata of collection db40.coll40 already up to date (shard version : 1|0||559eb776ca4787b9985d1d1f, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.674-0400 m31100| 2015-07-09T14:03:34.672-0400 I COMMAND [conn23] command db40.$cmd command: insert { insert: "coll40", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eb776ca4787b9985d1d1f') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 105, w: 105 } }, Database: { acquireCount: { w: 105 } }, Collection: { acquireCount: { w: 5 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 122ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.679-0400 m31100| 2015-07-09T14:03:34.672-0400 I SHARDING [conn39] splitChunk accepted at version 1|0||559eb776ca4787b9985d1d1f [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.679-0400 m31100| 2015-07-09T14:03:34.673-0400 I SHARDING [conn15] request split points lookup for chunk db40.coll40 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.680-0400 m31100| 2015-07-09T14:03:34.674-0400 W SHARDING [conn15] possible low cardinality key detected in db40.coll40 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.680-0400 m31100| 2015-07-09T14:03:34.674-0400 W SHARDING [conn15] possible low cardinality key detected in db40.coll40 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.680-0400 m31100| 2015-07-09T14:03:34.674-0400 W SHARDING [conn15] possible low cardinality key detected in db40.coll40 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.680-0400 m31100| 2015-07-09T14:03:34.674-0400 W SHARDING [conn15] possible low cardinality key detected in db40.coll40 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.681-0400 m31100| 2015-07-09T14:03:34.675-0400 W SHARDING [conn15] possible low cardinality key detected in db40.coll40 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.681-0400 m31100| 2015-07-09T14:03:34.675-0400 W SHARDING [conn15] possible low cardinality key detected in db40.coll40 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.681-0400 m31100| 2015-07-09T14:03:34.675-0400 W SHARDING [conn15] possible low cardinality key detected in db40.coll40 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.681-0400 m31100| 2015-07-09T14:03:34.675-0400 W SHARDING [conn15] possible low cardinality key detected in db40.coll40 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.681-0400 m31100| 2015-07-09T14:03:34.675-0400 W SHARDING [conn15] possible low cardinality key detected in db40.coll40 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.681-0400 m31100| 2015-07-09T14:03:34.675-0400 W SHARDING [conn15] possible low cardinality key detected in db40.coll40 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.682-0400 m31100| 2015-07-09T14:03:34.679-0400 I COMMAND [conn70] command db40.$cmd command: insert { insert: "coll40", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eb776ca4787b9985d1d1f') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 106, w: 106 } }, Database: { acquireCount: { w: 106 } }, Collection: { acquireCount: { w: 6 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 21999 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 135ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.685-0400 m31100| 2015-07-09T14:03:34.685-0400 I COMMAND [conn146] command db40.$cmd command: insert { insert: "coll40", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eb776ca4787b9985d1d1f') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 106, w: 106 } }, Database: { acquireCount: { w: 106 } }, Collection: { acquireCount: { w: 6 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 176 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 123ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.686-0400 m31100| 2015-07-09T14:03:34.685-0400 I SHARDING [conn40] request split points lookup for chunk db40.coll40 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.686-0400 m31100| 2015-07-09T14:03:34.685-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db40.coll40", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb776ca4787b9985d1d1f') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.688-0400 m31100| 2015-07-09T14:03:34.687-0400 W SHARDING [conn15] could not acquire collection lock for db40.coll40 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db40.coll40 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.689-0400 m30999| 2015-07-09T14:03:34.687-0400 W SHARDING [conn256] splitChunk failed - cmd: { splitChunk: "db40.coll40", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb776ca4787b9985d1d1f') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db40.coll40 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.697-0400 m30999| 2015-07-09T14:03:34.694-0400 I SHARDING [conn256] ChunkManager: time to load chunks for db40.coll40: 0ms sequenceNumber: 178 version: 1|10||559eb776ca4787b9985d1d1f based on: 1|0||559eb776ca4787b9985d1d1f [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.701-0400 m31100| 2015-07-09T14:03:34.699-0400 I SHARDING [conn132] request split points lookup for chunk db40.coll40 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.749-0400 m31100| 2015-07-09T14:03:34.748-0400 I SHARDING [conn39] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:34.748-0400-559eb776792e00bb672749bc", server: "bs-osx108-8", clientAddr: "127.0.0.1:62641", time: new Date(1436465014748), what: "multi-split", ns: "db40.coll40", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 1, of: 10, chunk: { min: { tid: MinKey }, max: { tid: 0.0 }, lastmod: Timestamp 1000|1, lastmodEpoch: ObjectId('559eb776ca4787b9985d1d1f') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.750-0400 m31100| 2015-07-09T14:03:34.749-0400 W SHARDING [conn40] possible low cardinality key detected in db40.coll40 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.750-0400 m31100| 2015-07-09T14:03:34.749-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db40.coll40", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb776ca4787b9985d1d1f') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.750-0400 m31100| 2015-07-09T14:03:34.749-0400 W SHARDING [conn132] possible low cardinality key detected in db40.coll40 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.750-0400 m31100| 2015-07-09T14:03:34.749-0400 W SHARDING [conn132] possible low cardinality key detected in db40.coll40 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.751-0400 m31100| 2015-07-09T14:03:34.749-0400 W SHARDING [conn132] possible low cardinality key detected in db40.coll40 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.751-0400 m31100| 2015-07-09T14:03:34.749-0400 W SHARDING [conn132] possible low cardinality key detected in db40.coll40 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.751-0400 m31100| 2015-07-09T14:03:34.749-0400 W SHARDING [conn132] possible low cardinality key detected in db40.coll40 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.751-0400 m31100| 2015-07-09T14:03:34.749-0400 W SHARDING [conn132] possible low cardinality key detected in db40.coll40 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.751-0400 m31100| 2015-07-09T14:03:34.749-0400 W SHARDING [conn132] possible low cardinality key detected in db40.coll40 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.752-0400 m31100| 2015-07-09T14:03:34.749-0400 W SHARDING [conn132] possible low cardinality key detected in db40.coll40 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.752-0400 m31100| 2015-07-09T14:03:34.749-0400 W SHARDING [conn132] possible low cardinality key detected in db40.coll40 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.752-0400 m31100| 2015-07-09T14:03:34.750-0400 W SHARDING [conn132] possible low cardinality key detected in db40.coll40 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.752-0400 m31100| 2015-07-09T14:03:34.750-0400 I SHARDING [conn132] received splitChunk request: { splitChunk: "db40.coll40", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb776ca4787b9985d1d1f') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.753-0400 m31100| 2015-07-09T14:03:34.750-0400 W SHARDING [conn40] could not acquire collection lock for db40.coll40 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db40.coll40 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.753-0400 m30999| 2015-07-09T14:03:34.751-0400 W SHARDING [conn252] splitChunk failed - cmd: { splitChunk: "db40.coll40", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb776ca4787b9985d1d1f') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db40.coll40 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.753-0400 m31100| 2015-07-09T14:03:34.751-0400 W SHARDING [conn132] could not acquire collection lock for db40.coll40 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db40.coll40 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.754-0400 m30998| 2015-07-09T14:03:34.751-0400 W SHARDING [conn254] splitChunk failed - cmd: { splitChunk: "db40.coll40", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb776ca4787b9985d1d1f') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db40.coll40 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.757-0400 m30998| 2015-07-09T14:03:34.756-0400 I SHARDING [conn254] ChunkManager: time to load chunks for db40.coll40: 0ms sequenceNumber: 50 version: 1|10||559eb776ca4787b9985d1d1f based on: 1|0||559eb776ca4787b9985d1d1f [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.785-0400 m31100| 2015-07-09T14:03:34.785-0400 I COMMAND [conn27] command db40.$cmd command: insert { insert: "coll40", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eb776ca4787b9985d1d1f') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 105, w: 105 } }, Database: { acquireCount: { w: 105 } }, Collection: { acquireCount: { w: 5 }, acquireWaitCount: { w: 3 }, timeAcquiringMicros: { w: 63222 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 120ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.801-0400 m31100| 2015-07-09T14:03:34.799-0400 I SHARDING [conn39] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:34.799-0400-559eb776792e00bb672749bd", server: "bs-osx108-8", clientAddr: "127.0.0.1:62641", time: new Date(1436465014799), what: "multi-split", ns: "db40.coll40", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 2, of: 10, chunk: { min: { tid: 0.0 }, max: { tid: 2.0 }, lastmod: Timestamp 1000|2, lastmodEpoch: ObjectId('559eb776ca4787b9985d1d1f') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.852-0400 m31100| 2015-07-09T14:03:34.852-0400 I SHARDING [conn39] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:34.852-0400-559eb776792e00bb672749be", server: "bs-osx108-8", clientAddr: "127.0.0.1:62641", time: new Date(1436465014852), what: "multi-split", ns: "db40.coll40", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 3, of: 10, chunk: { min: { tid: 2.0 }, max: { tid: 3.0 }, lastmod: Timestamp 1000|3, lastmodEpoch: ObjectId('559eb776ca4787b9985d1d1f') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.907-0400 m31100| 2015-07-09T14:03:34.906-0400 I SHARDING [conn39] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:34.906-0400-559eb776792e00bb672749bf", server: "bs-osx108-8", clientAddr: "127.0.0.1:62641", time: new Date(1436465014906), what: "multi-split", ns: "db40.coll40", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 4, of: 10, chunk: { min: { tid: 3.0 }, max: { tid: 4.0 }, lastmod: Timestamp 1000|4, lastmodEpoch: ObjectId('559eb776ca4787b9985d1d1f') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:34.959-0400 m31100| 2015-07-09T14:03:34.959-0400 I SHARDING [conn39] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:34.959-0400-559eb776792e00bb672749c0", server: "bs-osx108-8", clientAddr: "127.0.0.1:62641", time: new Date(1436465014959), what: "multi-split", ns: "db40.coll40", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 5, of: 10, chunk: { min: { tid: 4.0 }, max: { tid: 5.0 }, lastmod: Timestamp 1000|5, lastmodEpoch: ObjectId('559eb776ca4787b9985d1d1f') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:35.012-0400 m31100| 2015-07-09T14:03:35.012-0400 I SHARDING [conn39] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:35.012-0400-559eb777792e00bb672749c1", server: "bs-osx108-8", clientAddr: "127.0.0.1:62641", time: new Date(1436465015012), what: "multi-split", ns: "db40.coll40", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 6, of: 10, chunk: { min: { tid: 5.0 }, max: { tid: 6.0 }, lastmod: Timestamp 1000|6, lastmodEpoch: ObjectId('559eb776ca4787b9985d1d1f') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:35.066-0400 m31100| 2015-07-09T14:03:35.065-0400 I SHARDING [conn39] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:35.065-0400-559eb777792e00bb672749c2", server: "bs-osx108-8", clientAddr: "127.0.0.1:62641", time: new Date(1436465015065), what: "multi-split", ns: "db40.coll40", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 7, of: 10, chunk: { min: { tid: 6.0 }, max: { tid: 7.0 }, lastmod: Timestamp 1000|7, lastmodEpoch: ObjectId('559eb776ca4787b9985d1d1f') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:35.119-0400 m31100| 2015-07-09T14:03:35.118-0400 I SHARDING [conn39] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:35.118-0400-559eb777792e00bb672749c3", server: "bs-osx108-8", clientAddr: "127.0.0.1:62641", time: new Date(1436465015118), what: "multi-split", ns: "db40.coll40", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 8, of: 10, chunk: { min: { tid: 7.0 }, max: { tid: 8.0 }, lastmod: Timestamp 1000|8, lastmodEpoch: ObjectId('559eb776ca4787b9985d1d1f') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:35.172-0400 m31100| 2015-07-09T14:03:35.171-0400 I SHARDING [conn39] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:35.171-0400-559eb777792e00bb672749c4", server: "bs-osx108-8", clientAddr: "127.0.0.1:62641", time: new Date(1436465015171), what: "multi-split", ns: "db40.coll40", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 9, of: 10, chunk: { min: { tid: 8.0 }, max: { tid: 9.0 }, lastmod: Timestamp 1000|9, lastmodEpoch: ObjectId('559eb776ca4787b9985d1d1f') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:35.225-0400 m31100| 2015-07-09T14:03:35.224-0400 I SHARDING [conn39] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:03:35.224-0400-559eb777792e00bb672749c5", server: "bs-osx108-8", clientAddr: "127.0.0.1:62641", time: new Date(1436465015224), what: "multi-split", ns: "db40.coll40", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 10, of: 10, chunk: { min: { tid: 9.0 }, max: { tid: MaxKey }, lastmod: Timestamp 1000|10, lastmodEpoch: ObjectId('559eb776ca4787b9985d1d1f') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:35.279-0400 m31100| 2015-07-09T14:03:35.278-0400 I SHARDING [conn39] distributed lock 'db40.coll40/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:35.280-0400 m31100| 2015-07-09T14:03:35.279-0400 I COMMAND [conn39] command db40.coll40 command: splitChunk { splitChunk: "db40.coll40", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb776ca4787b9985d1d1f') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 4, w: 2 } }, Database: { acquireCount: { r: 1, w: 2 } }, Collection: { acquireCount: { r: 1, W: 2 }, acquireWaitCount: { W: 2 }, timeAcquiringMicros: { W: 97753 } } } protocol:op_command 645ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:35.280-0400 m30998| 2015-07-09T14:03:35.280-0400 I SHARDING [conn255] autosplitted db40.coll40 shard: ns: db40.coll40, shard: test-rs0, lastmod: 1|0||000000000000000000000000, min: { tid: MinKey }, max: { tid: MaxKey } into 10 (splitThreshold 921) [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:35.926-0400 m30998| 2015-07-09T14:03:35.926-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:03:35.918-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30998:1436464535:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:36.825-0400 m31100| 2015-07-09T14:03:36.825-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:03:36.823-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31100:1436464536:197041335', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:37.249-0400 m31200| 2015-07-09T14:03:37.248-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:03:37.246-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31200:1436464537:809424560', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:52.683-0400 m31100| 2015-07-09T14:03:52.682-0400 I QUERY [conn72] query db40.coll40 query: { $where: "this.tid === 1" } planSummary: COLLSCAN cursorid:2810174289375 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:994 keyUpdates:0 writeConflicts:0 numYields:662 nreturned:101 reslen:3555 locks:{ Global: { acquireCount: { r: 1330 } }, Database: { acquireCount: { r: 665 } }, Collection: { acquireCount: { r: 665 } } } 17866ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:53.514-0400 m31100| 2015-07-09T14:03:53.513-0400 I QUERY [conn45] query db40.coll40 query: { $where: "this.tid === 0" } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1200 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:699 nreturned:100 reslen:3520 locks:{ Global: { acquireCount: { r: 1404 } }, Database: { acquireCount: { r: 702 } }, Collection: { acquireCount: { r: 702 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 56564 } } } 18839ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:55.252-0400 m31100| 2015-07-09T14:03:55.251-0400 I QUERY [conn71] query db40.coll40 query: { $where: "this.tid === 9" } planSummary: COLLSCAN cursorid:2809024866057 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1101 keyUpdates:0 writeConflicts:0 numYields:735 nreturned:101 reslen:3555 locks:{ Global: { acquireCount: { r: 1476 } }, Database: { acquireCount: { r: 738 } }, Collection: { acquireCount: { r: 738 } } } 19912ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:55.357-0400 m31100| 2015-07-09T14:03:55.356-0400 I QUERY [conn48] query db40.coll40 query: { $where: "this.tid === 5" } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1300 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:765 nreturned:100 reslen:3520 locks:{ Global: { acquireCount: { r: 1536 } }, Database: { acquireCount: { r: 768 } }, Collection: { acquireCount: { r: 768 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 41228 } } } 20674ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:03:57.317-0400 m31100| 2015-07-09T14:03:57.316-0400 I QUERY [conn47] query db40.coll40 query: { $where: "this.tid === 2" } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1400 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:839 nreturned:100 reslen:3520 locks:{ Global: { acquireCount: { r: 1684 } }, Database: { acquireCount: { r: 842 } }, Collection: { acquireCount: { r: 842 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 40093 } } } 22641ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:04:00.159-0400 m31100| 2015-07-09T14:04:00.158-0400 I QUERY [conn46] query db40.coll40 query: { $where: "this.tid === 7" } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1500 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:945 nreturned:100 reslen:3520 locks:{ Global: { acquireCount: { r: 1896 } }, Database: { acquireCount: { r: 948 } }, Collection: { acquireCount: { r: 948 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 9916 } } } 25459ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:04:02.008-0400 m31100| 2015-07-09T14:04:02.008-0400 I QUERY [conn56] query db40.coll40 query: { $where: "this.tid === 4" } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1600 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1012 nreturned:100 reslen:3520 locks:{ Global: { acquireCount: { r: 2030 } }, Database: { acquireCount: { r: 1015 } }, Collection: { acquireCount: { r: 1015 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 20126 } } } 27335ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:04:04.512-0400 m30999| 2015-07-09T14:04:04.512-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:04:04.509-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30999:1436464534:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:04:04.881-0400 m31100| 2015-07-09T14:04:04.880-0400 I QUERY [conn59] query db40.coll40 query: { $where: "this.tid === 6" } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1700 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1118 nreturned:100 reslen:3520 locks:{ Global: { acquireCount: { r: 2242 } }, Database: { acquireCount: { r: 1121 } }, Collection: { acquireCount: { r: 1121 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 25493 } } } 30130ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:04:05.263-0400 m31100| 2015-07-09T14:04:05.262-0400 I QUERY [conn49] query db40.coll40 query: { $where: "this.tid === 3" } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1700 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1129 nreturned:100 reslen:3520 locks:{ Global: { acquireCount: { r: 2264 } }, Database: { acquireCount: { r: 1132 } }, Collection: { acquireCount: { r: 1132 } } } 30481ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:04:05.344-0400 m31100| 2015-07-09T14:04:05.343-0400 I QUERY [conn134] getmore db40.coll40 query: { $where: "this.tid === 1" } cursorid:2810174289375 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:465 nreturned:99 reslen:3485 locks:{ Global: { acquireCount: { r: 932 } }, Database: { acquireCount: { r: 466 } }, Collection: { acquireCount: { r: 466 } } } 12658ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:04:05.929-0400 m30998| 2015-07-09T14:04:05.929-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:04:05.926-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30998:1436464535:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:04:06.828-0400 m31100| 2015-07-09T14:04:06.827-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:04:06.825-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31100:1436464536:197041335', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:04:07.251-0400 m31200| 2015-07-09T14:04:07.251-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:04:07.248-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31200:1436464537:809424560', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:04:08.762-0400 m31100| 2015-07-09T14:04:08.762-0400 I QUERY [conn51] query db40.coll40 query: { $where: "this.tid === 8" } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1900 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1264 nreturned:100 reslen:3520 locks:{ Global: { acquireCount: { r: 2534 } }, Database: { acquireCount: { r: 1267 } }, Collection: { acquireCount: { r: 1267 } } } 33975ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:04:11.394-0400 m31100| 2015-07-09T14:04:11.394-0400 I QUERY [conn140] getmore db40.coll40 query: { $where: "this.tid === 9" } cursorid:2809024866057 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:603 nreturned:99 reslen:3485 locks:{ Global: { acquireCount: { r: 1208 } }, Database: { acquireCount: { r: 604 } }, Collection: { acquireCount: { r: 604 } } } 16140ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:04:15.214-0400 m31100| 2015-07-09T14:04:15.214-0400 I QUERY [conn45] query db40.coll40 query: { $where: "this.tid === 0" } planSummary: COLLSCAN cursorid:2809328191989 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1201 keyUpdates:0 writeConflicts:0 numYields:819 nreturned:101 reslen:3555 locks:{ Global: { acquireCount: { r: 1640 } }, Database: { acquireCount: { r: 820 } }, Collection: { acquireCount: { r: 820 } } } 21671ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:04:18.868-0400 m31100| 2015-07-09T14:04:18.867-0400 I QUERY [conn48] query db40.coll40 query: { $where: "this.tid === 5" } planSummary: COLLSCAN cursorid:2810317052028 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1301 keyUpdates:0 writeConflicts:0 numYields:886 nreturned:101 reslen:3555 locks:{ Global: { acquireCount: { r: 1774 } }, Database: { acquireCount: { r: 887 } }, Collection: { acquireCount: { r: 887 } } } 23475ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:04:22.871-0400 m31100| 2015-07-09T14:04:22.870-0400 I QUERY [conn47] query db40.coll40 query: { $where: "this.tid === 2" } planSummary: COLLSCAN cursorid:2810567688592 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1401 keyUpdates:0 writeConflicts:0 numYields:967 nreturned:101 reslen:3555 locks:{ Global: { acquireCount: { r: 1936 } }, Database: { acquireCount: { r: 968 } }, Collection: { acquireCount: { r: 968 } } } 25520ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:04:23.829-0400 m31100| 2015-07-09T14:04:23.828-0400 I QUERY [conn71] query db40.coll40 query: { $where: "this.tid === 1" } planSummary: COLLSCAN cursorid:2809099450218 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:994 keyUpdates:0 writeConflicts:0 numYields:708 nreturned:101 reslen:3555 locks:{ Global: { acquireCount: { r: 1418 } }, Database: { acquireCount: { r: 709 } }, Collection: { acquireCount: { r: 709 } } } 18458ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:04:27.707-0400 m31100| 2015-07-09T14:04:27.707-0400 I QUERY [conn46] query db40.coll40 query: { $where: "this.tid === 7" } planSummary: COLLSCAN cursorid:2808910273045 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1501 keyUpdates:0 writeConflicts:0 numYields:1058 nreturned:101 reslen:3555 locks:{ Global: { acquireCount: { r: 2118 } }, Database: { acquireCount: { r: 1059 } }, Collection: { acquireCount: { r: 1059 } } } 27520ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:04:31.638-0400 m31100| 2015-07-09T14:04:31.637-0400 I QUERY [conn56] query db40.coll40 query: { $where: "this.tid === 4" } planSummary: COLLSCAN cursorid:2809795875474 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1601 keyUpdates:0 writeConflicts:0 numYields:1149 nreturned:101 reslen:3555 locks:{ Global: { acquireCount: { r: 2300 } }, Database: { acquireCount: { r: 1150 } }, Collection: { acquireCount: { r: 1150 } } } 29604ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:04:32.023-0400 m31100| 2015-07-09T14:04:32.022-0400 I QUERY [conn72] query db40.coll40 query: { $where: "this.tid === 9" } planSummary: COLLSCAN cursorid:2810157693599 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1101 keyUpdates:0 writeConflicts:0 numYields:799 nreturned:101 reslen:3555 locks:{ Global: { acquireCount: { r: 1600 } }, Database: { acquireCount: { r: 800 } }, Collection: { acquireCount: { r: 800 } } } 20596ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:04:32.080-0400 m31100| 2015-07-09T14:04:32.080-0400 I QUERY [conn43] getmore db40.coll40 query: { $where: "this.tid === 0" } cursorid:2809328191989 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:656 nreturned:99 reslen:3485 locks:{ Global: { acquireCount: { r: 1314 } }, Database: { acquireCount: { r: 657 } }, Collection: { acquireCount: { r: 657 } } } 16862ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:04:34.059-0400 m31100| 2015-07-09T14:04:34.058-0400 I QUERY [conn140] getmore db40.coll40 query: { $where: "this.tid === 5" } cursorid:2810317052028 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:596 nreturned:99 reslen:3485 locks:{ Global: { acquireCount: { r: 1194 } }, Database: { acquireCount: { r: 597 } }, Collection: { acquireCount: { r: 597 } } } 15187ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:04:34.514-0400 m30999| 2015-07-09T14:04:34.514-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:04:34.512-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30999:1436464534:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:04:35.932-0400 m30998| 2015-07-09T14:04:35.932-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:04:35.929-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30998:1436464535:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:04:36.829-0400 m31100| 2015-07-09T14:04:36.829-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:04:36.827-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31100:1436464536:197041335', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:04:36.953-0400 m31100| 2015-07-09T14:04:36.952-0400 I QUERY [conn49] query db40.coll40 query: { $where: "this.tid === 3" } planSummary: COLLSCAN cursorid:2810005631004 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1701 keyUpdates:0 writeConflicts:0 numYields:1230 nreturned:101 reslen:3555 locks:{ Global: { acquireCount: { r: 2462 } }, Database: { acquireCount: { r: 1231 } }, Collection: { acquireCount: { r: 1231 } } } 31612ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:04:37.254-0400 m31200| 2015-07-09T14:04:37.253-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:04:37.250-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31200:1436464537:809424560', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:04:39.774-0400 m31100| 2015-07-09T14:04:39.773-0400 I QUERY [conn86] getmore db40.coll40 query: { $where: "this.tid === 2" } cursorid:2810567688592 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:659 nreturned:99 reslen:3485 locks:{ Global: { acquireCount: { r: 1320 } }, Database: { acquireCount: { r: 660 } }, Collection: { acquireCount: { r: 660 } } } 16900ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:04:44.427-0400 m31100| 2015-07-09T14:04:44.426-0400 I QUERY [conn51] query db40.coll40 query: { $where: "this.tid === 8" } planSummary: COLLSCAN cursorid:2810861061079 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1901 keyUpdates:0 writeConflicts:0 numYields:1385 nreturned:101 reslen:3555 locks:{ Global: { acquireCount: { r: 2772 } }, Database: { acquireCount: { r: 1386 } }, Collection: { acquireCount: { r: 1386 } } } 35632ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:04:44.586-0400 m31100| 2015-07-09T14:04:44.585-0400 I QUERY [conn139] getmore db40.coll40 query: { $where: "this.tid === 7" } cursorid:2808910273045 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:651 nreturned:99 reslen:3485 locks:{ Global: { acquireCount: { r: 1304 } }, Database: { acquireCount: { r: 652 } }, Collection: { acquireCount: { r: 652 } } } 16876ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:04:48.452-0400 m31100| 2015-07-09T14:04:48.451-0400 I QUERY [conn143] getmore db40.coll40 query: { $where: "this.tid === 4" } cursorid:2809795875474 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:653 nreturned:99 reslen:3485 locks:{ Global: { acquireCount: { r: 1308 } }, Database: { acquireCount: { r: 654 } }, Collection: { acquireCount: { r: 654 } } } 16810ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:04:51.482-0400 m31100| 2015-07-09T14:04:51.482-0400 I QUERY [conn59] query db40.coll40 query: { $where: "this.tid === 6" } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:2500 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1811 nreturned:100 reslen:3520 locks:{ Global: { acquireCount: { r: 3624 } }, Database: { acquireCount: { r: 1812 } }, Collection: { acquireCount: { r: 1812 } } } 46594ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:04:54.695-0400 m31100| 2015-07-09T14:04:54.695-0400 I QUERY [conn56] query db40.coll40 query: { $where: "this.tid === 0" } planSummary: COLLSCAN cursorid:2810321467851 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1201 keyUpdates:0 writeConflicts:0 numYields:879 nreturned:101 reslen:3555 locks:{ Global: { acquireCount: { r: 1760 } }, Database: { acquireCount: { r: 880 } }, Collection: { acquireCount: { r: 880 } } } 22606ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:04:55.961-0400 m31100| 2015-07-09T14:04:55.961-0400 I QUERY [conn136] getmore db40.coll40 query: { $where: "this.tid === 3" } cursorid:2810005631004 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:746 nreturned:99 reslen:3485 locks:{ Global: { acquireCount: { r: 1494 } }, Database: { acquireCount: { r: 747 } }, Collection: { acquireCount: { r: 747 } } } 19004ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:04:55.984-0400 m31100| 2015-07-09T14:04:55.984-0400 I QUERY [conn134] getmore db40.coll40 query: { $where: "this.tid === 1" } cursorid:2809099450218 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1256 nreturned:199 reslen:6985 locks:{ Global: { acquireCount: { r: 2514 } }, Database: { acquireCount: { r: 1257 } }, Collection: { acquireCount: { r: 1257 } } } 32152ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:04:58.814-0400 m31100| 2015-07-09T14:04:58.814-0400 I QUERY [conn72] query db40.coll40 query: { $where: "this.tid === 5" } planSummary: COLLSCAN cursorid:2810959648336 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1301 keyUpdates:0 writeConflicts:0 numYields:960 nreturned:101 reslen:3555 locks:{ Global: { acquireCount: { r: 1922 } }, Database: { acquireCount: { r: 961 } }, Collection: { acquireCount: { r: 961 } } } 24701ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:05:03.782-0400 m31100| 2015-07-09T14:05:03.782-0400 I QUERY [conn86] getmore db40.coll40 query: { $where: "this.tid === 8" } cursorid:2810861061079 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:773 nreturned:99 reslen:3485 locks:{ Global: { acquireCount: { r: 1548 } }, Database: { acquireCount: { r: 774 } }, Collection: { acquireCount: { r: 774 } } } 19351ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:05:04.517-0400 m30999| 2015-07-09T14:05:04.517-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:05:04.514-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30999:1436464534:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:05:05.934-0400 m30998| 2015-07-09T14:05:05.934-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:05:05.931-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30998:1436464535:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:05:06.831-0400 m31100| 2015-07-09T14:05:06.830-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:05:06.829-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31100:1436464536:197041335', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:05:06.886-0400 m31100| 2015-07-09T14:05:06.886-0400 I QUERY [conn55] query db40.coll40 query: { $where: "this.tid === 2" } planSummary: COLLSCAN cursorid:2809495513812 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1401 keyUpdates:0 writeConflicts:0 numYields:1078 nreturned:101 reslen:3555 locks:{ Global: { acquireCount: { r: 2158 } }, Database: { acquireCount: { r: 1079 } }, Collection: { acquireCount: { r: 1079 } } } 27083ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:05:07.257-0400 m31200| 2015-07-09T14:05:07.256-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:05:07.253-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31200:1436464537:809424560', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:05:08.662-0400 m31100| 2015-07-09T14:05:08.662-0400 I QUERY [conn42] getmore db40.coll40 query: { $where: "this.tid === 9" } cursorid:2810157693599 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1445 nreturned:199 reslen:6985 locks:{ Global: { acquireCount: { r: 2892 } }, Database: { acquireCount: { r: 1446 } }, Collection: { acquireCount: { r: 1446 } } } 36635ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:05:14.152-0400 m31100| 2015-07-09T14:05:14.151-0400 I QUERY [conn49] query db40.coll40 query: { $where: "this.tid === 7" } planSummary: COLLSCAN cursorid:2810367290343 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1501 keyUpdates:0 writeConflicts:0 numYields:1198 nreturned:101 reslen:3555 locks:{ Global: { acquireCount: { r: 2398 } }, Database: { acquireCount: { r: 1199 } }, Collection: { acquireCount: { r: 1199 } } } 29541ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:05:16.122-0400 m31100| 2015-07-09T14:05:16.121-0400 I QUERY [conn58] query db40.coll40 query: { $where: "this.tid === 1" } planSummary: COLLSCAN cursorid:2810232862931 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:994 keyUpdates:0 writeConflicts:0 numYields:828 nreturned:101 reslen:3555 locks:{ Global: { acquireCount: { r: 1658 } }, Database: { acquireCount: { r: 829 } }, Collection: { acquireCount: { r: 829 } } } 20108ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:05:20.413-0400 m31100| 2015-07-09T14:05:20.412-0400 I QUERY [conn51] query db40.coll40 query: { $where: "this.tid === 4" } planSummary: COLLSCAN cursorid:2809821156744 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1601 keyUpdates:0 writeConflicts:0 numYields:1300 nreturned:101 reslen:3555 locks:{ Global: { acquireCount: { r: 2602 } }, Database: { acquireCount: { r: 1301 } }, Collection: { acquireCount: { r: 1301 } } } 31953ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:05:30.492-0400 m31100| 2015-07-09T14:05:30.491-0400 I QUERY [conn73] query db40.coll40 query: { $where: "this.tid === 3" } planSummary: COLLSCAN cursorid:2809717077082 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1701 keyUpdates:0 writeConflicts:0 numYields:1414 nreturned:101 reslen:3555 locks:{ Global: { acquireCount: { r: 2830 } }, Database: { acquireCount: { r: 1415 } }, Collection: { acquireCount: { r: 1415 } } } 34498ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:05:31.234-0400 m31100| 2015-07-09T14:05:31.233-0400 I QUERY [conn72] query db40.coll40 query: { $where: "this.tid === 9" } planSummary: COLLSCAN cursorid:2809248681768 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1101 keyUpdates:0 writeConflicts:0 numYields:918 nreturned:101 reslen:3555 locks:{ Global: { acquireCount: { r: 1838 } }, Database: { acquireCount: { r: 919 } }, Collection: { acquireCount: { r: 919 } } } 22522ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:05:33.145-0400 m31100| 2015-07-09T14:05:33.145-0400 I QUERY [conn143] getmore db40.coll40 query: { $where: "this.tid === 0" } cursorid:2810321467851 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1568 nreturned:99 reslen:3485 locks:{ Global: { acquireCount: { r: 3138 } }, Database: { acquireCount: { r: 1569 } }, Collection: { acquireCount: { r: 1569 } } } 38448ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:05:34.520-0400 m30999| 2015-07-09T14:05:34.519-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:05:34.517-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30999:1436464534:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:05:35.936-0400 m30998| 2015-07-09T14:05:35.936-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:05:35.934-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30998:1436464535:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:05:36.833-0400 m31100| 2015-07-09T14:05:36.833-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:05:36.831-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31100:1436464536:197041335', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:05:37.235-0400 m31100| 2015-07-09T14:05:37.234-0400 I QUERY [conn134] getmore db40.coll40 query: { $where: "this.tid === 5" } cursorid:2810959648336 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1571 nreturned:299 reslen:10485 locks:{ Global: { acquireCount: { r: 3144 } }, Database: { acquireCount: { r: 1572 } }, Collection: { acquireCount: { r: 1572 } } } 38418ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:05:37.259-0400 m31200| 2015-07-09T14:05:37.258-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:05:37.257-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31200:1436464537:809424560', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:05:42.016-0400 m31100| 2015-07-09T14:05:42.015-0400 I QUERY [conn59] query db40.coll40 query: { $where: "this.tid === 6" } planSummary: COLLSCAN cursorid:2809601770899 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:2501 keyUpdates:0 writeConflicts:0 numYields:2067 nreturned:101 reslen:3555 locks:{ Global: { acquireCount: { r: 4136 } }, Database: { acquireCount: { r: 2068 } }, Collection: { acquireCount: { r: 2068 } } } 50482ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:05:42.426-0400 m31100| 2015-07-09T14:05:42.425-0400 I QUERY [conn56] query db40.coll40 query: { $where: "this.tid === 8" } planSummary: COLLSCAN cursorid:2809941204260 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1901 keyUpdates:0 writeConflicts:0 numYields:1568 nreturned:101 reslen:3555 locks:{ Global: { acquireCount: { r: 3138 } }, Database: { acquireCount: { r: 1569 } }, Collection: { acquireCount: { r: 1569 } } } 38613ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:05:45.489-0400 m31100| 2015-07-09T14:05:45.488-0400 I QUERY [conn86] getmore db40.coll40 query: { $where: "this.tid === 2" } cursorid:2809495513812 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1576 nreturned:199 reslen:6985 locks:{ Global: { acquireCount: { r: 3154 } }, Database: { acquireCount: { r: 1577 } }, Collection: { acquireCount: { r: 1577 } } } 38600ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:05:50.804-0400 m31100| 2015-07-09T14:05:50.804-0400 I QUERY [conn42] getmore db40.coll40 query: { $where: "this.tid === 7" } cursorid:2810367290343 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1491 nreturned:199 reslen:6985 locks:{ Global: { acquireCount: { r: 2984 } }, Database: { acquireCount: { r: 1492 } }, Collection: { acquireCount: { r: 1492 } } } 36650ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:05:56.579-0400 m31100| 2015-07-09T14:05:56.578-0400 I QUERY [conn135] getmore db40.coll40 query: { $where: "this.tid === 4" } cursorid:2809821156744 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1476 nreturned:99 reslen:3485 locks:{ Global: { acquireCount: { r: 2954 } }, Database: { acquireCount: { r: 1477 } }, Collection: { acquireCount: { r: 1477 } } } 36164ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:05:57.132-0400 m31100| 2015-07-09T14:05:57.131-0400 I QUERY [conn51] query db40.coll40 query: { $where: "this.tid === 0" } planSummary: COLLSCAN cursorid:2810849487915 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1201 keyUpdates:0 writeConflicts:0 numYields:965 nreturned:101 reslen:3555 locks:{ Global: { acquireCount: { r: 1932 } }, Database: { acquireCount: { r: 966 } }, Collection: { acquireCount: { r: 966 } } } 23962ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:06:01.590-0400 m31100| 2015-07-09T14:06:01.589-0400 I QUERY [conn43] getmore db40.coll40 query: { $where: "this.tid === 6" } cursorid:2809601770899 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:777 nreturned:199 reslen:6985 locks:{ Global: { acquireCount: { r: 1556 } }, Database: { acquireCount: { r: 778 } }, Collection: { acquireCount: { r: 778 } } } 19572ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:06:02.834-0400 m31100| 2015-07-09T14:06:02.833-0400 I QUERY [conn46] query db40.coll40 query: { $where: "this.tid === 5" } planSummary: COLLSCAN cursorid:2810245484857 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1301 keyUpdates:0 writeConflicts:0 numYields:1014 nreturned:101 reslen:3555 locks:{ Global: { acquireCount: { r: 2030 } }, Database: { acquireCount: { r: 1015 } }, Collection: { acquireCount: { r: 1015 } } } 25568ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:06:04.522-0400 m30999| 2015-07-09T14:06:04.522-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:06:04.519-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30999:1436464534:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:06:05.939-0400 m30998| 2015-07-09T14:06:05.938-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:06:05.936-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30998:1436464535:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:06:06.836-0400 m31100| 2015-07-09T14:06:06.836-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:06:06.833-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31100:1436464536:197041335', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:06:07.261-0400 m31200| 2015-07-09T14:06:07.260-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:06:07.258-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31200:1436464537:809424560', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:06:07.611-0400 m31100| 2015-07-09T14:06:07.610-0400 I QUERY [conn139] getmore db40.coll40 query: { $where: "this.tid === 3" } cursorid:2809717077082 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1478 nreturned:199 reslen:6985 locks:{ Global: { acquireCount: { r: 2958 } }, Database: { acquireCount: { r: 1479 } }, Collection: { acquireCount: { r: 1479 } } } 37116ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:06:11.522-0400 m31100| 2015-07-09T14:06:11.521-0400 I QUERY [conn136] getmore db40.coll40 query: { $where: "this.tid === 1" } cursorid:2810232862931 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2226 nreturned:299 reslen:10485 locks:{ Global: { acquireCount: { r: 4454 } }, Database: { acquireCount: { r: 2227 } }, Collection: { acquireCount: { r: 2227 } } } 55398ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:06:12.380-0400 m31100| 2015-07-09T14:06:12.379-0400 I QUERY [conn56] query db40.coll40 query: { $where: "this.tid === 2" } planSummary: COLLSCAN cursorid:2809436846338 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1401 keyUpdates:0 writeConflicts:0 numYields:1052 nreturned:101 reslen:3555 locks:{ Global: { acquireCount: { r: 2106 } }, Database: { acquireCount: { r: 1053 } }, Collection: { acquireCount: { r: 1053 } } } 26881ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:06:19.195-0400 m31100| 2015-07-09T14:06:19.194-0400 I QUERY [conn142] getmore db40.coll40 query: { $where: "this.tid === 8" } cursorid:2809941204260 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1447 nreturned:199 reslen:6985 locks:{ Global: { acquireCount: { r: 2896 } }, Database: { acquireCount: { r: 1448 } }, Collection: { acquireCount: { r: 1448 } } } 36767ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:06:19.269-0400 m31100| 2015-07-09T14:06:19.268-0400 I QUERY [conn71] query db40.coll40 query: { $where: "this.tid === 7" } planSummary: COLLSCAN cursorid:2810020389620 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1501 keyUpdates:0 writeConflicts:0 numYields:1108 nreturned:101 reslen:3555 locks:{ Global: { acquireCount: { r: 2218 } }, Database: { acquireCount: { r: 1109 } }, Collection: { acquireCount: { r: 1109 } } } 28438ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:06:26.364-0400 m31100| 2015-07-09T14:06:26.364-0400 I QUERY [conn59] query db40.coll40 query: { $where: "this.tid === 4" } planSummary: COLLSCAN cursorid:2810158682275 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1601 keyUpdates:0 writeConflicts:0 numYields:1137 nreturned:101 reslen:3555 locks:{ Global: { acquireCount: { r: 2276 } }, Database: { acquireCount: { r: 1138 } }, Collection: { acquireCount: { r: 1138 } } } 29761ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:06:26.886-0400 m31100| 2015-07-09T14:06:26.886-0400 I QUERY [conn137] getmore db40.coll40 query: { $where: "this.tid === 9" } cursorid:2809248681768 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2190 nreturned:299 reslen:10485 locks:{ Global: { acquireCount: { r: 4382 } }, Database: { acquireCount: { r: 2191 } }, Collection: { acquireCount: { r: 2191 } } } 55651ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:06:29.802-0400 m31100| 2015-07-09T14:06:29.802-0400 I QUERY [conn48] query db40.coll40 query: { $where: "this.tid === 1" } planSummary: COLLSCAN cursorid:2811015423507 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:994 keyUpdates:0 writeConflicts:0 numYields:690 nreturned:101 reslen:3555 locks:{ Global: { acquireCount: { r: 1382 } }, Database: { acquireCount: { r: 691 } }, Collection: { acquireCount: { r: 691 } } } 18271ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:06:34.525-0400 m30999| 2015-07-09T14:06:34.524-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:06:34.521-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30999:1436464534:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:06:35.941-0400 m30998| 2015-07-09T14:06:35.940-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:06:35.938-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30998:1436464535:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:06:36.838-0400 m31100| 2015-07-09T14:06:36.837-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:06:36.835-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31100:1436464536:197041335', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:06:37.263-0400 m31200| 2015-07-09T14:06:37.263-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:06:37.260-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31200:1436464537:809424560', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:06:40.081-0400 m31100| 2015-07-09T14:06:40.080-0400 I QUERY [conn46] query db40.coll40 query: { $where: "this.tid === 3" } planSummary: COLLSCAN cursorid:2811003270282 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1701 keyUpdates:0 writeConflicts:0 numYields:1260 nreturned:101 reslen:3555 locks:{ Global: { acquireCount: { r: 2522 } }, Database: { acquireCount: { r: 1261 } }, Collection: { acquireCount: { r: 1261 } } } 32411ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:06:48.916-0400 m31100| 2015-07-09T14:06:48.915-0400 I QUERY [conn71] query db40.coll40 query: { $where: "this.tid === 9" } planSummary: COLLSCAN cursorid:2809410578983 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1101 keyUpdates:0 writeConflicts:0 numYields:890 nreturned:101 reslen:3555 locks:{ Global: { acquireCount: { r: 1782 } }, Database: { acquireCount: { r: 891 } }, Collection: { acquireCount: { r: 891 } } } 22004ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:06:49.635-0400 m31100| 2015-07-09T14:06:49.635-0400 I QUERY [conn51] query db40.coll40 query: { $where: "this.tid === 6" } planSummary: COLLSCAN cursorid:2810044409521 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:2501 keyUpdates:0 writeConflicts:0 numYields:1889 nreturned:101 reslen:3555 locks:{ Global: { acquireCount: { r: 3780 } }, Database: { acquireCount: { r: 1890 } }, Collection: { acquireCount: { r: 1890 } } } 48012ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:06:53.112-0400 m31100| 2015-07-09T14:06:53.112-0400 I QUERY [conn135] getmore db40.coll40 query: { $where: "this.tid === 0" } cursorid:2810849487915 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2209 nreturned:199 reslen:6985 locks:{ Global: { acquireCount: { r: 4420 } }, Database: { acquireCount: { r: 2210 } }, Collection: { acquireCount: { r: 2210 } } } 55977ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:06:56.783-0400 m31100| 2015-07-09T14:06:56.782-0400 I QUERY [conn56] query db40.coll40 query: { $where: "this.tid === 8" } planSummary: COLLSCAN cursorid:2810434794282 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1901 keyUpdates:0 writeConflicts:0 numYields:1487 nreturned:101 reslen:3555 locks:{ Global: { acquireCount: { r: 2976 } }, Database: { acquireCount: { r: 1488 } }, Collection: { acquireCount: { r: 1488 } } } 37502ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:06:59.417-0400 m31100| 2015-07-09T14:06:59.417-0400 I QUERY [conn42] getmore db40.coll40 query: { $where: "this.tid === 5" } cursorid:2810245484857 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2250 nreturned:399 reslen:13985 locks:{ Global: { acquireCount: { r: 4502 } }, Database: { acquireCount: { r: 2251 } }, Collection: { acquireCount: { r: 2251 } } } 56581ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:06:59.453-0400 m30998| 2015-07-09T14:06:59.452-0400 I NETWORK [conn253] end connection 127.0.0.1:63457 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:07:04.527-0400 m30999| 2015-07-09T14:07:04.526-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:07:04.524-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30999:1436464534:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:07:05.943-0400 m30998| 2015-07-09T14:07:05.943-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:07:05.940-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30998:1436464535:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:07:06.840-0400 m31100| 2015-07-09T14:07:06.840-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:07:06.837-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31100:1436464536:197041335', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:07:07.265-0400 m31200| 2015-07-09T14:07:07.265-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:07:07.262-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31200:1436464537:809424560', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:07:08.521-0400 m31100| 2015-07-09T14:07:08.521-0400 I QUERY [conn43] getmore db40.coll40 query: { $where: "this.tid === 2" } cursorid:2809436846338 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2223 nreturned:199 reslen:6985 locks:{ Global: { acquireCount: { r: 4448 } }, Database: { acquireCount: { r: 2224 } }, Collection: { acquireCount: { r: 2224 } } } 56139ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:07:16.028-0400 m31100| 2015-07-09T14:07:16.028-0400 I QUERY [conn136] getmore db40.coll40 query: { $where: "this.tid === 7" } cursorid:2810020389620 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2241 nreturned:299 reslen:10485 locks:{ Global: { acquireCount: { r: 4484 } }, Database: { acquireCount: { r: 2242 } }, Collection: { acquireCount: { r: 2242 } } } 56758ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:07:16.664-0400 m31100| 2015-07-09T14:07:16.663-0400 I QUERY [conn51] query db40.coll40 query: { $where: "this.tid === 0" } planSummary: COLLSCAN cursorid:2809281470810 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1201 keyUpdates:0 writeConflicts:0 numYields:939 nreturned:101 reslen:3555 locks:{ Global: { acquireCount: { r: 1880 } }, Database: { acquireCount: { r: 940 } }, Collection: { acquireCount: { r: 940 } } } 23528ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:07:24.697-0400 m31100| 2015-07-09T14:07:24.696-0400 I QUERY [conn142] getmore db40.coll40 query: { $where: "this.tid === 4" } cursorid:2810158682275 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2340 nreturned:199 reslen:6985 locks:{ Global: { acquireCount: { r: 4682 } }, Database: { acquireCount: { r: 2341 } }, Collection: { acquireCount: { r: 2341 } } } 58331ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:07:32.786-0400 m31100| 2015-07-09T14:07:32.785-0400 I QUERY [conn143] getmore db40.coll40 query: { $where: "this.tid === 6" } cursorid:2810044409521 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1742 nreturned:299 reslen:10485 locks:{ Global: { acquireCount: { r: 3486 } }, Database: { acquireCount: { r: 1743 } }, Collection: { acquireCount: { r: 1743 } } } 43148ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:07:32.831-0400 m30999| 2015-07-09T14:07:32.830-0400 I NETWORK [conn256] end connection 127.0.0.1:63461 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:07:34.530-0400 m30999| 2015-07-09T14:07:34.530-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:07:34.526-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30999:1436464534:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:07:35.945-0400 m30998| 2015-07-09T14:07:35.945-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:07:35.943-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30998:1436464535:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:07:36.841-0400 m31100| 2015-07-09T14:07:36.841-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:07:36.839-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31100:1436464536:197041335', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:07:37.268-0400 m31200| 2015-07-09T14:07:37.268-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:07:37.265-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31200:1436464537:809424560', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:07:38.419-0400 m31100| 2015-07-09T14:07:38.419-0400 I QUERY [conn56] query db40.coll40 query: { $where: "this.tid === 2" } planSummary: COLLSCAN cursorid:2810263735272 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1401 keyUpdates:0 writeConflicts:0 numYields:1215 nreturned:101 reslen:3555 locks:{ Global: { acquireCount: { r: 2432 } }, Database: { acquireCount: { r: 1216 } }, Collection: { acquireCount: { r: 1216 } } } 29874ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:07:43.812-0400 m31100| 2015-07-09T14:07:43.812-0400 I QUERY [conn74] getmore db40.coll40 query: { $where: "this.tid === 3" } cursorid:2811003270282 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2596 nreturned:399 reslen:13985 locks:{ Global: { acquireCount: { r: 5194 } }, Database: { acquireCount: { r: 2597 } }, Collection: { acquireCount: { r: 2597 } } } 63729ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:07:43.836-0400 m30998| 2015-07-09T14:07:43.836-0400 I NETWORK [conn254] end connection 127.0.0.1:63456 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:07:47.545-0400 m31100| 2015-07-09T14:07:47.545-0400 I QUERY [conn71] query db40.coll40 query: { $where: "this.tid === 7" } planSummary: COLLSCAN cursorid:2810982066223 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1501 keyUpdates:0 writeConflicts:0 numYields:1294 nreturned:101 reslen:3555 locks:{ Global: { acquireCount: { r: 2590 } }, Database: { acquireCount: { r: 1295 } }, Collection: { acquireCount: { r: 1295 } } } 31490ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:07:49.284-0400 m31100| 2015-07-09T14:07:49.284-0400 I QUERY [conn137] getmore db40.coll40 query: { $where: "this.tid === 1" } cursorid:2811015423507 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:3220 nreturned:299 reslen:10485 locks:{ Global: { acquireCount: { r: 6442 } }, Database: { acquireCount: { r: 3221 } }, Collection: { acquireCount: { r: 3221 } } } 79480ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:07:49.332-0400 m30998| 2015-07-09T14:07:49.332-0400 I NETWORK [conn252] end connection 127.0.0.1:63455 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:07:55.159-0400 m31100| 2015-07-09T14:07:55.158-0400 I QUERY [conn51] query db40.coll40 query: { $where: "this.tid === 4" } planSummary: COLLSCAN cursorid:2810571647636 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1601 keyUpdates:0 writeConflicts:0 numYields:1224 nreturned:101 reslen:3555 locks:{ Global: { acquireCount: { r: 2450 } }, Database: { acquireCount: { r: 1225 } }, Collection: { acquireCount: { r: 1225 } } } 30437ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:07:58.109-0400 m31100| 2015-07-09T14:07:58.108-0400 I QUERY [conn135] getmore db40.coll40 query: { $where: "this.tid === 8" } cursorid:2810434794282 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2464 nreturned:399 reslen:13985 locks:{ Global: { acquireCount: { r: 4930 } }, Database: { acquireCount: { r: 2465 } }, Collection: { acquireCount: { r: 2465 } } } 61323ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:07:58.136-0400 m30999| 2015-07-09T14:07:58.135-0400 I NETWORK [conn252] end connection 127.0.0.1:63452 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:04.532-0400 m30999| 2015-07-09T14:08:04.532-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:08:04.529-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30999:1436464534:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:04.646-0400 m31100| 2015-07-09T14:08:04.646-0400 I QUERY [conn138] getmore db40.coll40 query: { $where: "this.tid === 9" } cursorid:2809410578983 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:3050 nreturned:399 reslen:13985 locks:{ Global: { acquireCount: { r: 6102 } }, Database: { acquireCount: { r: 3051 } }, Collection: { acquireCount: { r: 3051 } } } 75728ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:04.679-0400 m30998| 2015-07-09T14:08:04.678-0400 I NETWORK [conn255] end connection 127.0.0.1:63459 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:05.948-0400 m30998| 2015-07-09T14:08:05.947-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:08:05.945-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30998:1436464535:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:06.843-0400 m31100| 2015-07-09T14:08:06.843-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:08:06.840-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31100:1436464536:197041335', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:07.270-0400 m31200| 2015-07-09T14:08:07.270-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:08:07.267-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31200:1436464537:809424560', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:19.011-0400 m31100| 2015-07-09T14:08:19.011-0400 I QUERY [conn43] getmore db40.coll40 query: { $where: "this.tid === 0" } cursorid:2809281470810 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2584 nreturned:299 reslen:10485 locks:{ Global: { acquireCount: { r: 5170 } }, Database: { acquireCount: { r: 2585 } }, Collection: { acquireCount: { r: 2585 } } } 62345ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:19.037-0400 m30999| 2015-07-09T14:08:19.036-0400 I NETWORK [conn255] end connection 127.0.0.1:63458 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:25.833-0400 m31100| 2015-07-09T14:08:25.832-0400 I QUERY [conn86] getmore db40.coll40 query: { $where: "this.tid === 2" } cursorid:2810263735272 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2014 nreturned:299 reslen:10485 locks:{ Global: { acquireCount: { r: 4030 } }, Database: { acquireCount: { r: 2015 } }, Collection: { acquireCount: { r: 2015 } } } 47411ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:27.330-0400 m31100| 2015-07-09T14:08:27.329-0400 I QUERY [conn74] getmore db40.coll40 query: { $where: "this.tid === 7" } cursorid:2810982066223 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1712 nreturned:399 reslen:13985 locks:{ Global: { acquireCount: { r: 3426 } }, Database: { acquireCount: { r: 1713 } }, Collection: { acquireCount: { r: 1713 } } } 39782ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:27.338-0400 m30998| 2015-07-09T14:08:27.337-0400 I NETWORK [conn256] end connection 127.0.0.1:63460 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:27.537-0400 m31100| 2015-07-09T14:08:27.536-0400 I QUERY [conn150] getmore db40.coll40 query: { $where: "this.tid === 4" } cursorid:2810571647636 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1422 nreturned:299 reslen:10485 locks:{ Global: { acquireCount: { r: 2846 } }, Database: { acquireCount: { r: 1423 } }, Collection: { acquireCount: { r: 1423 } } } 32375ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:27.541-0400 m31100| 2015-07-09T14:08:27.540-0400 I QUERY [conn51] query db40.coll40 query: { $where: "this.tid === 2" } planSummary: COLLSCAN cursorid:2810014950428 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1401 keyUpdates:0 writeConflicts:0 numYields:81 nreturned:101 reslen:3555 locks:{ Global: { acquireCount: { r: 164 } }, Database: { acquireCount: { r: 82 } }, Collection: { acquireCount: { r: 82 } } } 1703ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:27.562-0400 m30999| 2015-07-09T14:08:27.562-0400 I NETWORK [conn253] end connection 127.0.0.1:63453 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:27.702-0400 m31100| 2015-07-09T14:08:27.702-0400 I QUERY [conn150] getmore db40.coll40 query: { $where: "this.tid === 2" } cursorid:2810014950428 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:32 nreturned:299 reslen:10485 locks:{ Global: { acquireCount: { r: 66 } }, Database: { acquireCount: { r: 33 } }, Collection: { acquireCount: { r: 33 } } } 159ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:27.712-0400 m30999| 2015-07-09T14:08:27.711-0400 I NETWORK [conn254] end connection 127.0.0.1:63454 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:27.732-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:27.732-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:27.732-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:27.732-0400 jstests/concurrency/fsm_workloads/indexed_insert_where.js: Workload completed in 293357 ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:27.732-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:27.732-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:27.732-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:27.733-0400 m30999| 2015-07-09T14:08:27.732-0400 I COMMAND [conn1] DROP: db40.coll40 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:27.733-0400 m30999| 2015-07-09T14:08:27.732-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:08:27.732-0400-559eb89bca4787b9985d1d21", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465307732), what: "dropCollection.start", ns: "db40.coll40", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:27.791-0400 m30999| 2015-07-09T14:08:27.791-0400 I SHARDING [conn1] distributed lock 'db40.coll40/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb89bca4787b9985d1d22 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:27.792-0400 m31100| 2015-07-09T14:08:27.792-0400 I COMMAND [conn37] CMD: drop db40.coll40 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:27.796-0400 m31200| 2015-07-09T14:08:27.795-0400 I COMMAND [conn84] CMD: drop db40.coll40 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:27.797-0400 m31102| 2015-07-09T14:08:27.797-0400 I COMMAND [repl writer worker 15] CMD: drop db40.coll40 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:27.797-0400 m31101| 2015-07-09T14:08:27.797-0400 I COMMAND [repl writer worker 6] CMD: drop db40.coll40 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:27.799-0400 m31202| 2015-07-09T14:08:27.799-0400 I COMMAND [repl writer worker 0] CMD: drop db40.coll40 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:27.800-0400 m31201| 2015-07-09T14:08:27.799-0400 I COMMAND [repl writer worker 15] CMD: drop db40.coll40 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:27.853-0400 m31100| 2015-07-09T14:08:27.852-0400 I SHARDING [conn37] remotely refreshing metadata for db40.coll40 with requested shard version 0|0||000000000000000000000000, current shard version is 1|10||559eb776ca4787b9985d1d1f, current metadata version is 1|10||559eb776ca4787b9985d1d1f [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:27.855-0400 m31100| 2015-07-09T14:08:27.855-0400 W SHARDING [conn37] no chunks found when reloading db40.coll40, previous version was 0|0||559eb776ca4787b9985d1d1f, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:27.855-0400 m31100| 2015-07-09T14:08:27.855-0400 I SHARDING [conn37] dropping metadata for db40.coll40 at shard version 1|10||559eb776ca4787b9985d1d1f, took 2ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:27.858-0400 m30999| 2015-07-09T14:08:27.858-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:08:27.858-0400-559eb89bca4787b9985d1d23", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465307858), what: "dropCollection", ns: "db40.coll40", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:27.913-0400 m30999| 2015-07-09T14:08:27.912-0400 I SHARDING [conn1] distributed lock 'db40.coll40/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:27.969-0400 m30999| 2015-07-09T14:08:27.969-0400 I COMMAND [conn1] DROP DATABASE: db40 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:27.970-0400 m30999| 2015-07-09T14:08:27.969-0400 I SHARDING [conn1] DBConfig::dropDatabase: db40 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:27.970-0400 m30999| 2015-07-09T14:08:27.969-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:08:27.969-0400-559eb89bca4787b9985d1d24", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465307969), what: "dropDatabase.start", ns: "db40", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.076-0400 m30999| 2015-07-09T14:08:28.076-0400 I SHARDING [conn1] DBConfig::dropDatabase: db40 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.077-0400 m31100| 2015-07-09T14:08:28.077-0400 I COMMAND [conn28] dropDatabase db40 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.077-0400 m31100| 2015-07-09T14:08:28.077-0400 I COMMAND [conn28] dropDatabase db40 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.078-0400 m30999| 2015-07-09T14:08:28.077-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:08:28.077-0400-559eb89cca4787b9985d1d25", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465308077), what: "dropDatabase", ns: "db40", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.078-0400 m31101| 2015-07-09T14:08:28.078-0400 I COMMAND [repl writer worker 10] dropDatabase db40 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.078-0400 m31102| 2015-07-09T14:08:28.078-0400 I COMMAND [repl writer worker 5] dropDatabase db40 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.078-0400 m31101| 2015-07-09T14:08:28.078-0400 I COMMAND [repl writer worker 10] dropDatabase db40 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.078-0400 m31102| 2015-07-09T14:08:28.078-0400 I COMMAND [repl writer worker 5] dropDatabase db40 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.176-0400 m31100| 2015-07-09T14:08:28.176-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.180-0400 m31101| 2015-07-09T14:08:28.180-0400 I COMMAND [repl writer worker 13] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.180-0400 m31102| 2015-07-09T14:08:28.180-0400 I COMMAND [repl writer worker 11] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.219-0400 m31200| 2015-07-09T14:08:28.218-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.221-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.221-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.221-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.221-0400 jstests/concurrency/fsm_workloads/update_check_index.js [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.221-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.221-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.221-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.222-0400 m31202| 2015-07-09T14:08:28.222-0400 I COMMAND [repl writer worker 7] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.223-0400 m31201| 2015-07-09T14:08:28.222-0400 I COMMAND [repl writer worker 12] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.230-0400 m30999| 2015-07-09T14:08:28.229-0400 I SHARDING [conn1] distributed lock 'db41/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb89cca4787b9985d1d26 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.234-0400 m30999| 2015-07-09T14:08:28.233-0400 I SHARDING [conn1] Placing [db41] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.234-0400 m30999| 2015-07-09T14:08:28.233-0400 I SHARDING [conn1] Enabling sharding for database [db41] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.290-0400 m30999| 2015-07-09T14:08:28.289-0400 I SHARDING [conn1] distributed lock 'db41/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.316-0400 m31100| 2015-07-09T14:08:28.315-0400 I INDEX [conn29] build index on: db41.coll41 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db41.coll41" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.316-0400 m31100| 2015-07-09T14:08:28.316-0400 I INDEX [conn29] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.323-0400 m31100| 2015-07-09T14:08:28.322-0400 I INDEX [conn29] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.325-0400 m30999| 2015-07-09T14:08:28.324-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db41.coll41", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.326-0400 m30999| 2015-07-09T14:08:28.326-0400 I SHARDING [conn1] distributed lock 'db41.coll41/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb89cca4787b9985d1d27 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.328-0400 m30999| 2015-07-09T14:08:28.327-0400 I SHARDING [conn1] enable sharding on: db41.coll41 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.329-0400 m30999| 2015-07-09T14:08:28.328-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:08:28.327-0400-559eb89cca4787b9985d1d28", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465308327), what: "shardCollection.start", ns: "db41.coll41", details: { shardKey: { _id: "hashed" }, collection: "db41.coll41", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.338-0400 m31102| 2015-07-09T14:08:28.338-0400 I INDEX [repl writer worker 14] build index on: db41.coll41 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db41.coll41" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.339-0400 m31102| 2015-07-09T14:08:28.338-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.339-0400 m31101| 2015-07-09T14:08:28.338-0400 I INDEX [repl writer worker 4] build index on: db41.coll41 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db41.coll41" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.340-0400 m31101| 2015-07-09T14:08:28.338-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.345-0400 m31102| 2015-07-09T14:08:28.344-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.348-0400 m31101| 2015-07-09T14:08:28.348-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.381-0400 m30999| 2015-07-09T14:08:28.380-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db41.coll41 using new epoch 559eb89cca4787b9985d1d29 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.489-0400 m30999| 2015-07-09T14:08:28.489-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db41.coll41: 1ms sequenceNumber: 179 version: 1|1||559eb89cca4787b9985d1d29 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.545-0400 m30999| 2015-07-09T14:08:28.544-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db41.coll41: 0ms sequenceNumber: 180 version: 1|1||559eb89cca4787b9985d1d29 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.547-0400 m31100| 2015-07-09T14:08:28.546-0400 I SHARDING [conn51] remotely refreshing metadata for db41.coll41 with requested shard version 1|1||559eb89cca4787b9985d1d29, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.549-0400 m31100| 2015-07-09T14:08:28.548-0400 I SHARDING [conn51] collection db41.coll41 was previously unsharded, new metadata loaded with shard version 1|1||559eb89cca4787b9985d1d29 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.549-0400 m31100| 2015-07-09T14:08:28.549-0400 I SHARDING [conn51] collection version was loaded at version 1|1||559eb89cca4787b9985d1d29, took 2ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.549-0400 m30999| 2015-07-09T14:08:28.549-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:08:28.549-0400-559eb89cca4787b9985d1d2a", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465308549), what: "shardCollection", ns: "db41.coll41", details: { version: "1|1||559eb89cca4787b9985d1d29" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.606-0400 m30999| 2015-07-09T14:08:28.605-0400 I SHARDING [conn1] distributed lock 'db41.coll41/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.607-0400 m30999| 2015-07-09T14:08:28.606-0400 I SHARDING [conn1] moving chunk ns: db41.coll41 moving ( ns: db41.coll41, shard: test-rs0, lastmod: 1|1||000000000000000000000000, min: { _id: 0 }, max: { _id: MaxKey }) test-rs0 -> test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.608-0400 m31100| 2015-07-09T14:08:28.607-0400 I SHARDING [conn37] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.609-0400 m31100| 2015-07-09T14:08:28.608-0400 I SHARDING [conn37] received moveChunk request: { moveChunk: "db41.coll41", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb89cca4787b9985d1d29') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.613-0400 m31100| 2015-07-09T14:08:28.612-0400 I SHARDING [conn37] distributed lock 'db41.coll41/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb89c792e00bb672749c7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.613-0400 m31100| 2015-07-09T14:08:28.613-0400 I SHARDING [conn37] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:08:28.613-0400-559eb89c792e00bb672749c8", server: "bs-osx108-8", clientAddr: "127.0.0.1:62639", time: new Date(1436465308613), what: "moveChunk.start", ns: "db41.coll41", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.667-0400 m31100| 2015-07-09T14:08:28.666-0400 I SHARDING [conn37] remotely refreshing metadata for db41.coll41 based on current shard version 1|1||559eb89cca4787b9985d1d29, current metadata version is 1|1||559eb89cca4787b9985d1d29 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.669-0400 m31100| 2015-07-09T14:08:28.669-0400 I SHARDING [conn37] metadata of collection db41.coll41 already up to date (shard version : 1|1||559eb89cca4787b9985d1d29, took 2ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.669-0400 m31100| 2015-07-09T14:08:28.669-0400 I SHARDING [conn37] moveChunk request accepted at version 1|1||559eb89cca4787b9985d1d29 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.670-0400 m31100| 2015-07-09T14:08:28.669-0400 I SHARDING [conn37] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.670-0400 m31200| 2015-07-09T14:08:28.670-0400 I SHARDING [conn16] remotely refreshing metadata for db41.coll41, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.672-0400 m31200| 2015-07-09T14:08:28.672-0400 I SHARDING [conn16] collection db41.coll41 was previously unsharded, new metadata loaded with shard version 0|0||559eb89cca4787b9985d1d29 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.672-0400 m31200| 2015-07-09T14:08:28.672-0400 I SHARDING [conn16] collection version was loaded at version 1|1||559eb89cca4787b9985d1d29, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.672-0400 m31200| 2015-07-09T14:08:28.672-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: 0 } -> { _id: MaxKey } for collection db41.coll41 from test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 at epoch 559eb89cca4787b9985d1d29 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.675-0400 m31100| 2015-07-09T14:08:28.674-0400 I SHARDING [conn37] moveChunk data transfer progress: { active: true, ns: "db41.coll41", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.679-0400 m31100| 2015-07-09T14:08:28.678-0400 I SHARDING [conn37] moveChunk data transfer progress: { active: true, ns: "db41.coll41", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.684-0400 m31100| 2015-07-09T14:08:28.683-0400 I SHARDING [conn37] moveChunk data transfer progress: { active: true, ns: "db41.coll41", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.693-0400 m31100| 2015-07-09T14:08:28.692-0400 I SHARDING [conn37] moveChunk data transfer progress: { active: true, ns: "db41.coll41", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.695-0400 m31200| 2015-07-09T14:08:28.695-0400 I INDEX [migrateThread] build index on: db41.coll41 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db41.coll41" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.695-0400 m31200| 2015-07-09T14:08:28.695-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.710-0400 m31200| 2015-07-09T14:08:28.709-0400 I INDEX [migrateThread] build index on: db41.coll41 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db41.coll41" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.710-0400 m31200| 2015-07-09T14:08:28.709-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.711-0400 m31100| 2015-07-09T14:08:28.710-0400 I SHARDING [conn37] moveChunk data transfer progress: { active: true, ns: "db41.coll41", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.719-0400 m31200| 2015-07-09T14:08:28.719-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.720-0400 m31200| 2015-07-09T14:08:28.719-0400 I SHARDING [migrateThread] Deleter starting delete for: db41.coll41 from { _id: 0 } -> { _id: MaxKey }, with opId: 69343 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.720-0400 m31200| 2015-07-09T14:08:28.720-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db41.coll41 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.729-0400 m31202| 2015-07-09T14:08:28.729-0400 I INDEX [repl writer worker 5] build index on: db41.coll41 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db41.coll41" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.730-0400 m31202| 2015-07-09T14:08:28.729-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.732-0400 m31201| 2015-07-09T14:08:28.732-0400 I INDEX [repl writer worker 3] build index on: db41.coll41 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db41.coll41" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.732-0400 m31201| 2015-07-09T14:08:28.732-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.737-0400 m31202| 2015-07-09T14:08:28.737-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.741-0400 m31201| 2015-07-09T14:08:28.740-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.741-0400 m31200| 2015-07-09T14:08:28.740-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.741-0400 m31200| 2015-07-09T14:08:28.740-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db41.coll41' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.744-0400 m31100| 2015-07-09T14:08:28.744-0400 I SHARDING [conn37] moveChunk data transfer progress: { active: true, ns: "db41.coll41", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.745-0400 m31100| 2015-07-09T14:08:28.744-0400 I SHARDING [conn37] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.745-0400 m31100| 2015-07-09T14:08:28.745-0400 I SHARDING [conn37] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.745-0400 m31100| 2015-07-09T14:08:28.745-0400 I SHARDING [conn37] moveChunk setting version to: 2|0||559eb89cca4787b9985d1d29 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.752-0400 m31200| 2015-07-09T14:08:28.752-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db41.coll41' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.753-0400 m31200| 2015-07-09T14:08:28.752-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:08:28.752-0400-559eb89cd5a107a5b9c0db37", server: "bs-osx108-8", clientAddr: "", time: new Date(1436465308752), what: "moveChunk.to", ns: "db41.coll41", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 5: 47, step 2 of 5: 19, step 3 of 5: 0, step 4 of 5: 1, step 5 of 5: 11, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.806-0400 m31100| 2015-07-09T14:08:28.805-0400 I SHARDING [conn37] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db41.coll41", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.806-0400 m31100| 2015-07-09T14:08:28.805-0400 I SHARDING [conn37] moveChunk updating self version to: 2|1||559eb89cca4787b9985d1d29 through { _id: MinKey } -> { _id: 0 } for collection 'db41.coll41' [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.807-0400 m31100| 2015-07-09T14:08:28.807-0400 I SHARDING [conn37] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:08:28.807-0400-559eb89c792e00bb672749c9", server: "bs-osx108-8", clientAddr: "127.0.0.1:62639", time: new Date(1436465308807), what: "moveChunk.commit", ns: "db41.coll41", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.861-0400 m31100| 2015-07-09T14:08:28.860-0400 I SHARDING [conn37] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.861-0400 m31100| 2015-07-09T14:08:28.861-0400 I SHARDING [conn37] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.861-0400 m31100| 2015-07-09T14:08:28.861-0400 I SHARDING [conn37] Deleter starting delete for: db41.coll41 from { _id: 0 } -> { _id: MaxKey }, with opId: 72782 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.861-0400 m31100| 2015-07-09T14:08:28.861-0400 I SHARDING [conn37] rangeDeleter deleted 0 documents for db41.coll41 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.862-0400 m31100| 2015-07-09T14:08:28.861-0400 I SHARDING [conn37] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.864-0400 m31100| 2015-07-09T14:08:28.864-0400 I SHARDING [conn37] distributed lock 'db41.coll41/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.865-0400 m31100| 2015-07-09T14:08:28.864-0400 I SHARDING [conn37] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:08:28.864-0400-559eb89c792e00bb672749ca", server: "bs-osx108-8", clientAddr: "127.0.0.1:62639", time: new Date(1436465308864), what: "moveChunk.from", ns: "db41.coll41", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 6: 0, step 2 of 6: 60, step 3 of 6: 3, step 4 of 6: 71, step 5 of 6: 116, step 6 of 6: 0, to: "test-rs1", from: "test-rs0", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.917-0400 m31100| 2015-07-09T14:08:28.916-0400 I COMMAND [conn37] command db41.coll41 command: moveChunk { moveChunk: "db41.coll41", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb89cca4787b9985d1d29') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 309ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.919-0400 m30999| 2015-07-09T14:08:28.919-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db41.coll41: 1ms sequenceNumber: 181 version: 2|1||559eb89cca4787b9985d1d29 based on: 1|1||559eb89cca4787b9985d1d29 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.921-0400 m31100| 2015-07-09T14:08:28.920-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db41.coll41", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb89cca4787b9985d1d29') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.925-0400 m31100| 2015-07-09T14:08:28.924-0400 I SHARDING [conn37] distributed lock 'db41.coll41/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb89c792e00bb672749cb [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.925-0400 m31100| 2015-07-09T14:08:28.924-0400 I SHARDING [conn37] remotely refreshing metadata for db41.coll41 based on current shard version 2|0||559eb89cca4787b9985d1d29, current metadata version is 2|0||559eb89cca4787b9985d1d29 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.927-0400 m31100| 2015-07-09T14:08:28.926-0400 I SHARDING [conn37] updating metadata for db41.coll41 from shard version 2|0||559eb89cca4787b9985d1d29 to shard version 2|1||559eb89cca4787b9985d1d29 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.927-0400 m31100| 2015-07-09T14:08:28.926-0400 I SHARDING [conn37] collection version was loaded at version 2|1||559eb89cca4787b9985d1d29, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.927-0400 m31100| 2015-07-09T14:08:28.926-0400 I SHARDING [conn37] splitChunk accepted at version 2|1||559eb89cca4787b9985d1d29 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.929-0400 m31100| 2015-07-09T14:08:28.928-0400 I SHARDING [conn37] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:08:28.928-0400-559eb89c792e00bb672749cc", server: "bs-osx108-8", clientAddr: "127.0.0.1:62639", time: new Date(1436465308928), what: "split", ns: "db41.coll41", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559eb89cca4787b9985d1d29') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559eb89cca4787b9985d1d29') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.986-0400 m31100| 2015-07-09T14:08:28.985-0400 I SHARDING [conn37] distributed lock 'db41.coll41/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.988-0400 m30999| 2015-07-09T14:08:28.988-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db41.coll41: 0ms sequenceNumber: 182 version: 2|3||559eb89cca4787b9985d1d29 based on: 2|1||559eb89cca4787b9985d1d29 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.989-0400 m31200| 2015-07-09T14:08:28.988-0400 I SHARDING [conn84] received splitChunk request: { splitChunk: "db41.coll41", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb89cca4787b9985d1d29') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.993-0400 m31200| 2015-07-09T14:08:28.992-0400 I SHARDING [conn84] distributed lock 'db41.coll41/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb89cd5a107a5b9c0db38 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.993-0400 m31200| 2015-07-09T14:08:28.993-0400 I SHARDING [conn84] remotely refreshing metadata for db41.coll41 based on current shard version 0|0||559eb89cca4787b9985d1d29, current metadata version is 1|1||559eb89cca4787b9985d1d29 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.995-0400 m31200| 2015-07-09T14:08:28.995-0400 I SHARDING [conn84] updating metadata for db41.coll41 from shard version 0|0||559eb89cca4787b9985d1d29 to shard version 2|0||559eb89cca4787b9985d1d29 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.996-0400 m31200| 2015-07-09T14:08:28.995-0400 I SHARDING [conn84] collection version was loaded at version 2|3||559eb89cca4787b9985d1d29, took 2ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.996-0400 m31200| 2015-07-09T14:08:28.995-0400 I SHARDING [conn84] splitChunk accepted at version 2|0||559eb89cca4787b9985d1d29 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:28.997-0400 m31200| 2015-07-09T14:08:28.996-0400 I SHARDING [conn84] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:08:28.996-0400-559eb89cd5a107a5b9c0db39", server: "bs-osx108-8", clientAddr: "127.0.0.1:63007", time: new Date(1436465308996), what: "split", ns: "db41.coll41", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559eb89cca4787b9985d1d29') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559eb89cca4787b9985d1d29') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.052-0400 m31200| 2015-07-09T14:08:29.051-0400 I SHARDING [conn84] distributed lock 'db41.coll41/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.054-0400 m30999| 2015-07-09T14:08:29.054-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db41.coll41: 0ms sequenceNumber: 183 version: 2|5||559eb89cca4787b9985d1d29 based on: 2|3||559eb89cca4787b9985d1d29 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.065-0400 m31200| 2015-07-09T14:08:29.064-0400 I INDEX [conn83] build index on: db41.coll41 properties: { v: 1, key: { a: 1.0 }, name: "a_1", ns: "db41.coll41" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.065-0400 m31200| 2015-07-09T14:08:29.065-0400 I INDEX [conn83] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.066-0400 m31100| 2015-07-09T14:08:29.064-0400 I INDEX [conn51] build index on: db41.coll41 properties: { v: 1, key: { a: 1.0 }, name: "a_1", ns: "db41.coll41" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.066-0400 m31100| 2015-07-09T14:08:29.065-0400 I INDEX [conn51] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.073-0400 m31100| 2015-07-09T14:08:29.073-0400 I INDEX [conn51] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.074-0400 m31200| 2015-07-09T14:08:29.073-0400 I INDEX [conn83] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.089-0400 m31101| 2015-07-09T14:08:29.087-0400 I INDEX [repl writer worker 7] build index on: db41.coll41 properties: { v: 1, key: { a: 1.0 }, name: "a_1", ns: "db41.coll41" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.089-0400 m31101| 2015-07-09T14:08:29.088-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.093-0400 m31100| 2015-07-09T14:08:29.092-0400 I INDEX [conn51] build index on: db41.coll41 properties: { v: 1, key: { b: 1.0 }, name: "b_1", ns: "db41.coll41" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.093-0400 m31100| 2015-07-09T14:08:29.093-0400 I INDEX [conn51] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.099-0400 m31200| 2015-07-09T14:08:29.098-0400 I INDEX [conn83] build index on: db41.coll41 properties: { v: 1, key: { b: 1.0 }, name: "b_1", ns: "db41.coll41" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.099-0400 m31200| 2015-07-09T14:08:29.098-0400 I INDEX [conn83] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.099-0400 m31102| 2015-07-09T14:08:29.098-0400 I INDEX [repl writer worker 9] build index on: db41.coll41 properties: { v: 1, key: { a: 1.0 }, name: "a_1", ns: "db41.coll41" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.100-0400 m31102| 2015-07-09T14:08:29.098-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.103-0400 m31201| 2015-07-09T14:08:29.102-0400 I INDEX [repl writer worker 5] build index on: db41.coll41 properties: { v: 1, key: { a: 1.0 }, name: "a_1", ns: "db41.coll41" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.103-0400 m31201| 2015-07-09T14:08:29.102-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.103-0400 m31202| 2015-07-09T14:08:29.102-0400 I INDEX [repl writer worker 13] build index on: db41.coll41 properties: { v: 1, key: { a: 1.0 }, name: "a_1", ns: "db41.coll41" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.103-0400 m31202| 2015-07-09T14:08:29.103-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.106-0400 m31101| 2015-07-09T14:08:29.106-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.112-0400 m31200| 2015-07-09T14:08:29.110-0400 I INDEX [conn83] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.113-0400 m31102| 2015-07-09T14:08:29.110-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.113-0400 m31100| 2015-07-09T14:08:29.110-0400 I INDEX [conn51] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.114-0400 m31202| 2015-07-09T14:08:29.112-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.119-0400 m31201| 2015-07-09T14:08:29.119-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.126-0400 m31200| 2015-07-09T14:08:29.125-0400 I INDEX [conn83] build index on: db41.coll41 properties: { v: 1, key: { c: 1.0 }, name: "c_1", ns: "db41.coll41" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.126-0400 m31200| 2015-07-09T14:08:29.126-0400 I INDEX [conn83] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.134-0400 m31202| 2015-07-09T14:08:29.134-0400 I INDEX [repl writer worker 12] build index on: db41.coll41 properties: { v: 1, key: { b: 1.0 }, name: "b_1", ns: "db41.coll41" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.135-0400 m31202| 2015-07-09T14:08:29.134-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.135-0400 m31102| 2015-07-09T14:08:29.134-0400 I INDEX [repl writer worker 2] build index on: db41.coll41 properties: { v: 1, key: { b: 1.0 }, name: "b_1", ns: "db41.coll41" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.135-0400 m31102| 2015-07-09T14:08:29.134-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.135-0400 m31101| 2015-07-09T14:08:29.134-0400 I INDEX [repl writer worker 14] build index on: db41.coll41 properties: { v: 1, key: { b: 1.0 }, name: "b_1", ns: "db41.coll41" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.135-0400 m31101| 2015-07-09T14:08:29.134-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.135-0400 m31100| 2015-07-09T14:08:29.134-0400 I INDEX [conn51] build index on: db41.coll41 properties: { v: 1, key: { c: 1.0 }, name: "c_1", ns: "db41.coll41" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.136-0400 m31100| 2015-07-09T14:08:29.134-0400 I INDEX [conn51] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.145-0400 m31100| 2015-07-09T14:08:29.144-0400 I INDEX [conn51] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.145-0400 m31201| 2015-07-09T14:08:29.144-0400 I INDEX [repl writer worker 13] build index on: db41.coll41 properties: { v: 1, key: { b: 1.0 }, name: "b_1", ns: "db41.coll41" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.145-0400 m31201| 2015-07-09T14:08:29.145-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.146-0400 m31102| 2015-07-09T14:08:29.144-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.151-0400 m31200| 2015-07-09T14:08:29.150-0400 I INDEX [conn83] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.153-0400 m31202| 2015-07-09T14:08:29.153-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.156-0400 m31101| 2015-07-09T14:08:29.155-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.161-0400 m31201| 2015-07-09T14:08:29.160-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.161-0400 m31102| 2015-07-09T14:08:29.160-0400 I INDEX [repl writer worker 6] build index on: db41.coll41 properties: { v: 1, key: { c: 1.0 }, name: "c_1", ns: "db41.coll41" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.161-0400 m31102| 2015-07-09T14:08:29.160-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.165-0400 m31202| 2015-07-09T14:08:29.165-0400 I INDEX [repl writer worker 8] build index on: db41.coll41 properties: { v: 1, key: { c: 1.0 }, name: "c_1", ns: "db41.coll41" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.166-0400 m31202| 2015-07-09T14:08:29.165-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.170-0400 m31101| 2015-07-09T14:08:29.170-0400 I INDEX [repl writer worker 0] build index on: db41.coll41 properties: { v: 1, key: { c: 1.0 }, name: "c_1", ns: "db41.coll41" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.170-0400 m31101| 2015-07-09T14:08:29.170-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.172-0400 m31102| 2015-07-09T14:08:29.171-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.175-0400 m31201| 2015-07-09T14:08:29.173-0400 I INDEX [repl writer worker 11] build index on: db41.coll41 properties: { v: 1, key: { c: 1.0 }, name: "c_1", ns: "db41.coll41" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.175-0400 m31201| 2015-07-09T14:08:29.173-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.177-0400 m31202| 2015-07-09T14:08:29.177-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.181-0400 m31101| 2015-07-09T14:08:29.181-0400 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.184-0400 m31201| 2015-07-09T14:08:29.184-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.185-0400 Using 10 threads (requested 10) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.267-0400 m30998| 2015-07-09T14:08:29.267-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63503 #257 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.282-0400 m30998| 2015-07-09T14:08:29.277-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63504 #258 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.303-0400 m30999| 2015-07-09T14:08:29.300-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63505 #257 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.303-0400 m30999| 2015-07-09T14:08:29.303-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63506 #258 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.303-0400 m30999| 2015-07-09T14:08:29.303-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63507 #259 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.307-0400 m30998| 2015-07-09T14:08:29.304-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63509 #259 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.310-0400 m30998| 2015-07-09T14:08:29.309-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63510 #260 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.310-0400 m30998| 2015-07-09T14:08:29.309-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63511 #261 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.311-0400 m30999| 2015-07-09T14:08:29.310-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63508 #260 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.320-0400 m30999| 2015-07-09T14:08:29.320-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63512 #261 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.325-0400 setting random seed: 705102486535 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.325-0400 setting random seed: 9283012095838 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.325-0400 setting random seed: 1986981099471 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.326-0400 setting random seed: 1092854663729 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.327-0400 setting random seed: 5191354365088 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.329-0400 setting random seed: 9584367410279 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.330-0400 setting random seed: 3401600825600 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.331-0400 setting random seed: 4119074372574 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.333-0400 m30998| 2015-07-09T14:08:29.333-0400 I SHARDING [conn258] ChunkManager: time to load chunks for db41.coll41: 1ms sequenceNumber: 51 version: 2|5||559eb89cca4787b9985d1d29 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.336-0400 setting random seed: 7608341854065 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.336-0400 setting random seed: 4463139581494 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:29.450-0400 m31100| 2015-07-09T14:08:29.449-0400 I COMMAND [conn24] command db41.$cmd command: update { update: "coll41", updates: [ { q: { a: 1.0, b: 1.0 }, u: { $set: { c: 774.0 } }, multi: true, upsert: false } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('00000000ffffffffffffffff') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:140 locks:{ Global: { acquireCount: { r: 40, w: 40 } }, Database: { acquireCount: { w: 40 } }, Collection: { acquireCount: { w: 35 } }, Metadata: { acquireCount: { w: 5 } }, oplog: { acquireCount: { w: 5 } } } protocol:op_command 101ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:31.075-0400 m31100| 2015-07-09T14:08:31.075-0400 I WRITE [conn26] update db41.coll41 query: { a: 1.0, b: 1.0 } update: { $set: { c: 993.0 } } nscanned:5 nscannedObjects:5 nMatched:5 nModified:5 keyUpdates:1 writeConflicts:33 numYields:33 locks:{ Global: { acquireCount: { r: 39, w: 39 } }, Database: { acquireCount: { w: 39 } }, Collection: { acquireCount: { w: 34 } }, Metadata: { acquireCount: { w: 5 } }, oplog: { acquireCount: { w: 5 } } } 118ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:31.076-0400 m31100| 2015-07-09T14:08:31.075-0400 I COMMAND [conn26] command db41.$cmd command: update { update: "coll41", updates: [ { q: { a: 1.0, b: 1.0 }, u: { $set: { c: 993.0 } }, multi: true, upsert: false } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('00000000ffffffffffffffff') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:140 locks:{ Global: { acquireCount: { r: 39, w: 39 } }, Database: { acquireCount: { w: 39 } }, Collection: { acquireCount: { w: 34 } }, Metadata: { acquireCount: { w: 5 } }, oplog: { acquireCount: { w: 5 } } } protocol:op_command 119ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:31.275-0400 m31200| 2015-07-09T14:08:31.275-0400 I WRITE [conn49] update db41.coll41 query: { a: 1.0, b: 1.0 } update: { $set: { c: 671.0 } } nscanned:5 nscannedObjects:5 nMatched:5 nModified:5 keyUpdates:1 writeConflicts:48 numYields:48 locks:{ Global: { acquireCount: { r: 54, w: 54 } }, Database: { acquireCount: { w: 54 } }, Collection: { acquireCount: { w: 49 } }, Metadata: { acquireCount: { w: 5 } }, oplog: { acquireCount: { w: 5 } } } 131ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:31.276-0400 m31200| 2015-07-09T14:08:31.275-0400 I COMMAND [conn49] command db41.$cmd command: update { update: "coll41", updates: [ { q: { a: 1.0, b: 1.0 }, u: { $set: { c: 671.0 } }, multi: true, upsert: false } ], ordered: true, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 0|0, ObjectId('00000000ffffffffffffffff') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:140 locks:{ Global: { acquireCount: { r: 54, w: 54 } }, Database: { acquireCount: { w: 54 } }, Collection: { acquireCount: { w: 49 } }, Metadata: { acquireCount: { w: 5 } }, oplog: { acquireCount: { w: 5 } } } protocol:op_command 131ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:31.417-0400 m30998| 2015-07-09T14:08:31.417-0400 I NETWORK [conn258] end connection 127.0.0.1:63504 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:31.434-0400 m31200| 2015-07-09T14:08:31.434-0400 I WRITE [conn23] update db41.coll41 query: { a: 1.0, b: 1.0 } update: { $set: { c: 919.0 } } nscanned:5 nscannedObjects:5 nMatched:5 nModified:5 keyUpdates:1 writeConflicts:31 numYields:31 locks:{ Global: { acquireCount: { r: 37, w: 37 } }, Database: { acquireCount: { w: 37 } }, Collection: { acquireCount: { w: 32 } }, Metadata: { acquireCount: { w: 5 } }, oplog: { acquireCount: { w: 5 } } } 116ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:31.434-0400 m31200| 2015-07-09T14:08:31.434-0400 I COMMAND [conn23] command db41.$cmd command: update { update: "coll41", updates: [ { q: { a: 1.0, b: 1.0 }, u: { $set: { c: 919.0 } }, multi: true, upsert: false } ], ordered: true, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 0|0, ObjectId('00000000ffffffffffffffff') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:140 locks:{ Global: { acquireCount: { r: 37, w: 37 } }, Database: { acquireCount: { w: 37 } }, Collection: { acquireCount: { w: 32 } }, Metadata: { acquireCount: { w: 5 } }, oplog: { acquireCount: { w: 5 } } } protocol:op_command 116ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:31.442-0400 m30999| 2015-07-09T14:08:31.442-0400 I NETWORK [conn260] end connection 127.0.0.1:63508 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:31.472-0400 m30998| 2015-07-09T14:08:31.472-0400 I NETWORK [conn257] end connection 127.0.0.1:63503 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:31.521-0400 m30999| 2015-07-09T14:08:31.521-0400 I NETWORK [conn259] end connection 127.0.0.1:63507 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:31.522-0400 m30999| 2015-07-09T14:08:31.522-0400 I NETWORK [conn257] end connection 127.0.0.1:63505 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:31.565-0400 m30999| 2015-07-09T14:08:31.565-0400 I NETWORK [conn261] end connection 127.0.0.1:63512 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:31.591-0400 m30999| 2015-07-09T14:08:31.591-0400 I NETWORK [conn258] end connection 127.0.0.1:63506 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:31.595-0400 m30998| 2015-07-09T14:08:31.594-0400 I NETWORK [conn261] end connection 127.0.0.1:63511 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:31.606-0400 m30998| 2015-07-09T14:08:31.606-0400 I NETWORK [conn260] end connection 127.0.0.1:63510 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:31.634-0400 m30998| 2015-07-09T14:08:31.633-0400 I NETWORK [conn259] end connection 127.0.0.1:63509 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:31.666-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:31.666-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:31.666-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:31.666-0400 jstests/concurrency/fsm_workloads/update_check_index.js: Workload completed in 2469 ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:31.666-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:31.666-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:31.666-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:31.667-0400 m30999| 2015-07-09T14:08:31.666-0400 I COMMAND [conn1] DROP: db41.coll41 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:31.667-0400 m30999| 2015-07-09T14:08:31.666-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:08:31.666-0400-559eb89fca4787b9985d1d2b", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465311666), what: "dropCollection.start", ns: "db41.coll41", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:31.723-0400 m30999| 2015-07-09T14:08:31.723-0400 I SHARDING [conn1] distributed lock 'db41.coll41/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb89fca4787b9985d1d2c [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:31.724-0400 m31100| 2015-07-09T14:08:31.724-0400 I COMMAND [conn37] CMD: drop db41.coll41 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:31.727-0400 m31200| 2015-07-09T14:08:31.727-0400 I COMMAND [conn84] CMD: drop db41.coll41 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:31.728-0400 m31101| 2015-07-09T14:08:31.728-0400 I COMMAND [repl writer worker 8] CMD: drop db41.coll41 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:31.729-0400 m31102| 2015-07-09T14:08:31.728-0400 I COMMAND [repl writer worker 5] CMD: drop db41.coll41 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:31.734-0400 m31202| 2015-07-09T14:08:31.734-0400 I COMMAND [repl writer worker 10] CMD: drop db41.coll41 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:31.734-0400 m31201| 2015-07-09T14:08:31.734-0400 I COMMAND [repl writer worker 13] CMD: drop db41.coll41 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:31.785-0400 m31100| 2015-07-09T14:08:31.785-0400 I SHARDING [conn37] remotely refreshing metadata for db41.coll41 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559eb89cca4787b9985d1d29, current metadata version is 2|3||559eb89cca4787b9985d1d29 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:31.787-0400 m31100| 2015-07-09T14:08:31.787-0400 W SHARDING [conn37] no chunks found when reloading db41.coll41, previous version was 0|0||559eb89cca4787b9985d1d29, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:31.788-0400 m31100| 2015-07-09T14:08:31.787-0400 I SHARDING [conn37] dropping metadata for db41.coll41 at shard version 2|3||559eb89cca4787b9985d1d29, took 2ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:31.789-0400 m31200| 2015-07-09T14:08:31.788-0400 I SHARDING [conn84] remotely refreshing metadata for db41.coll41 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559eb89cca4787b9985d1d29, current metadata version is 2|5||559eb89cca4787b9985d1d29 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:31.790-0400 m31200| 2015-07-09T14:08:31.790-0400 W SHARDING [conn84] no chunks found when reloading db41.coll41, previous version was 0|0||559eb89cca4787b9985d1d29, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:31.791-0400 m31200| 2015-07-09T14:08:31.790-0400 I SHARDING [conn84] dropping metadata for db41.coll41 at shard version 2|5||559eb89cca4787b9985d1d29, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:31.792-0400 m30999| 2015-07-09T14:08:31.792-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:08:31.792-0400-559eb89fca4787b9985d1d2d", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465311792), what: "dropCollection", ns: "db41.coll41", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:31.846-0400 m30999| 2015-07-09T14:08:31.846-0400 I SHARDING [conn1] distributed lock 'db41.coll41/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:31.904-0400 m30999| 2015-07-09T14:08:31.903-0400 I COMMAND [conn1] DROP DATABASE: db41 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:31.904-0400 m30999| 2015-07-09T14:08:31.903-0400 I SHARDING [conn1] DBConfig::dropDatabase: db41 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:31.904-0400 m30999| 2015-07-09T14:08:31.903-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:08:31.903-0400-559eb89fca4787b9985d1d2e", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465311903), what: "dropDatabase.start", ns: "db41", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.011-0400 m30999| 2015-07-09T14:08:32.011-0400 I SHARDING [conn1] DBConfig::dropDatabase: db41 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.012-0400 m31100| 2015-07-09T14:08:32.012-0400 I COMMAND [conn28] dropDatabase db41 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.013-0400 m31100| 2015-07-09T14:08:32.012-0400 I COMMAND [conn28] dropDatabase db41 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.013-0400 m30999| 2015-07-09T14:08:32.013-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:08:32.013-0400-559eb8a0ca4787b9985d1d2f", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465312013), what: "dropDatabase", ns: "db41", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.014-0400 m31101| 2015-07-09T14:08:32.013-0400 I COMMAND [repl writer worker 10] dropDatabase db41 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.014-0400 m31101| 2015-07-09T14:08:32.013-0400 I COMMAND [repl writer worker 10] dropDatabase db41 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.014-0400 m31102| 2015-07-09T14:08:32.013-0400 I COMMAND [repl writer worker 15] dropDatabase db41 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.014-0400 m31102| 2015-07-09T14:08:32.013-0400 I COMMAND [repl writer worker 15] dropDatabase db41 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.114-0400 m31100| 2015-07-09T14:08:32.114-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.118-0400 m31102| 2015-07-09T14:08:32.118-0400 I COMMAND [repl writer worker 14] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.118-0400 m31101| 2015-07-09T14:08:32.118-0400 I COMMAND [repl writer worker 13] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.165-0400 m31200| 2015-07-09T14:08:32.164-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.168-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.168-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.168-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.168-0400 jstests/concurrency/fsm_workloads/explain_remove.js [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.168-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.168-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.168-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.169-0400 m31202| 2015-07-09T14:08:32.168-0400 I COMMAND [repl writer worker 7] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.169-0400 m31201| 2015-07-09T14:08:32.168-0400 I COMMAND [repl writer worker 14] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.178-0400 m30999| 2015-07-09T14:08:32.178-0400 I SHARDING [conn1] distributed lock 'db42/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb8a0ca4787b9985d1d30 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.183-0400 m30999| 2015-07-09T14:08:32.182-0400 I SHARDING [conn1] Placing [db42] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.183-0400 m30999| 2015-07-09T14:08:32.182-0400 I SHARDING [conn1] Enabling sharding for database [db42] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.237-0400 m30999| 2015-07-09T14:08:32.237-0400 I SHARDING [conn1] distributed lock 'db42/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.263-0400 m31100| 2015-07-09T14:08:32.263-0400 I INDEX [conn144] build index on: db42.coll42 properties: { v: 1, key: { j: 1.0 }, name: "j_1", ns: "db42.coll42" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.263-0400 m31100| 2015-07-09T14:08:32.263-0400 I INDEX [conn144] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.273-0400 m31100| 2015-07-09T14:08:32.273-0400 I INDEX [conn144] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.274-0400 m30999| 2015-07-09T14:08:32.274-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db42.coll42", key: { j: 1.0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.278-0400 m30999| 2015-07-09T14:08:32.277-0400 I SHARDING [conn1] distributed lock 'db42.coll42/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb8a0ca4787b9985d1d31 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.280-0400 m30999| 2015-07-09T14:08:32.279-0400 I SHARDING [conn1] enable sharding on: db42.coll42 with shard key: { j: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.281-0400 m30999| 2015-07-09T14:08:32.279-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:08:32.279-0400-559eb8a0ca4787b9985d1d32", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465312279), what: "shardCollection.start", ns: "db42.coll42", details: { shardKey: { j: 1.0 }, collection: "db42.coll42", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 1 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.283-0400 m31101| 2015-07-09T14:08:32.283-0400 I INDEX [repl writer worker 6] build index on: db42.coll42 properties: { v: 1, key: { j: 1.0 }, name: "j_1", ns: "db42.coll42" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.284-0400 m31101| 2015-07-09T14:08:32.283-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.288-0400 m31102| 2015-07-09T14:08:32.287-0400 I INDEX [repl writer worker 7] build index on: db42.coll42 properties: { v: 1, key: { j: 1.0 }, name: "j_1", ns: "db42.coll42" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.288-0400 m31102| 2015-07-09T14:08:32.287-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.294-0400 m31102| 2015-07-09T14:08:32.293-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.294-0400 m31101| 2015-07-09T14:08:32.294-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.334-0400 m30999| 2015-07-09T14:08:32.333-0400 I SHARDING [conn1] going to create 1 chunk(s) for: db42.coll42 using new epoch 559eb8a0ca4787b9985d1d33 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.389-0400 m30999| 2015-07-09T14:08:32.388-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db42.coll42: 0ms sequenceNumber: 184 version: 1|0||559eb8a0ca4787b9985d1d33 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.445-0400 m30999| 2015-07-09T14:08:32.445-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db42.coll42: 0ms sequenceNumber: 185 version: 1|0||559eb8a0ca4787b9985d1d33 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.448-0400 m31100| 2015-07-09T14:08:32.447-0400 I SHARDING [conn51] remotely refreshing metadata for db42.coll42 with requested shard version 1|0||559eb8a0ca4787b9985d1d33, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.450-0400 m31100| 2015-07-09T14:08:32.450-0400 I SHARDING [conn51] collection db42.coll42 was previously unsharded, new metadata loaded with shard version 1|0||559eb8a0ca4787b9985d1d33 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.450-0400 m31100| 2015-07-09T14:08:32.450-0400 I SHARDING [conn51] collection version was loaded at version 1|0||559eb8a0ca4787b9985d1d33, took 2ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.451-0400 m30999| 2015-07-09T14:08:32.450-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:08:32.450-0400-559eb8a0ca4787b9985d1d34", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465312450), what: "shardCollection", ns: "db42.coll42", details: { version: "1|0||559eb8a0ca4787b9985d1d33" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.506-0400 m30999| 2015-07-09T14:08:32.505-0400 I SHARDING [conn1] distributed lock 'db42.coll42/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.506-0400 Using 10 threads (requested 10) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.605-0400 m30999| 2015-07-09T14:08:32.600-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63513 #262 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.615-0400 m30998| 2015-07-09T14:08:32.614-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63514 #262 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.615-0400 m30998| 2015-07-09T14:08:32.615-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63515 #263 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.616-0400 m30998| 2015-07-09T14:08:32.616-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63516 #264 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.616-0400 m30999| 2015-07-09T14:08:32.616-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63518 #263 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.617-0400 m30999| 2015-07-09T14:08:32.617-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63519 #264 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.622-0400 m30998| 2015-07-09T14:08:32.621-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63517 #265 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.624-0400 m30998| 2015-07-09T14:08:32.624-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63520 #266 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.636-0400 m30999| 2015-07-09T14:08:32.636-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63521 #265 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.637-0400 m30999| 2015-07-09T14:08:32.637-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63522 #266 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.646-0400 setting random seed: 547497039660 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.646-0400 setting random seed: 343591179698 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.646-0400 setting random seed: 657496727071 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.647-0400 setting random seed: 1782749863341 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.647-0400 setting random seed: 1786591229028 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.647-0400 setting random seed: 7238800269551 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.648-0400 setting random seed: 2676967130973 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.648-0400 setting random seed: 9555024090223 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.653-0400 setting random seed: 2516187005676 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.655-0400 m30998| 2015-07-09T14:08:32.655-0400 I SHARDING [conn262] ChunkManager: time to load chunks for db42.coll42: 0ms sequenceNumber: 52 version: 1|0||559eb8a0ca4787b9985d1d33 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.657-0400 setting random seed: 1289959158748 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.705-0400 m31100| 2015-07-09T14:08:32.705-0400 I SHARDING [conn37] request split points lookup for chunk db42.coll42 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.706-0400 m31100| 2015-07-09T14:08:32.705-0400 I SHARDING [conn34] request split points lookup for chunk db42.coll42 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.707-0400 m31100| 2015-07-09T14:08:32.706-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db42.coll42", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a0ca4787b9985d1d33') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.707-0400 m31100| 2015-07-09T14:08:32.706-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db42.coll42", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a0ca4787b9985d1d33') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.707-0400 m31100| 2015-07-09T14:08:32.707-0400 I SHARDING [conn38] request split points lookup for chunk db42.coll42 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.710-0400 m31100| 2015-07-09T14:08:32.707-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db42.coll42", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a0ca4787b9985d1d33') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.711-0400 m31100| 2015-07-09T14:08:32.708-0400 I SHARDING [conn38] could not acquire lock 'db42.coll42/bs-osx108-8:31100:1436464536:197041335' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.711-0400 m31100| 2015-07-09T14:08:32.708-0400 I SHARDING [conn38] distributed lock 'db42.coll42/bs-osx108-8:31100:1436464536:197041335' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.712-0400 m31100| 2015-07-09T14:08:32.708-0400 W SHARDING [conn38] could not acquire collection lock for db42.coll42 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db42.coll42 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.712-0400 m31100| 2015-07-09T14:08:32.709-0400 I SHARDING [conn37] could not acquire lock 'db42.coll42/bs-osx108-8:31100:1436464536:197041335' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.713-0400 m31100| 2015-07-09T14:08:32.709-0400 I SHARDING [conn37] distributed lock 'db42.coll42/bs-osx108-8:31100:1436464536:197041335' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.713-0400 m31100| 2015-07-09T14:08:32.709-0400 W SHARDING [conn37] could not acquire collection lock for db42.coll42 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db42.coll42 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.713-0400 m30999| 2015-07-09T14:08:32.709-0400 W SHARDING [conn266] splitChunk failed - cmd: { splitChunk: "db42.coll42", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a0ca4787b9985d1d33') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db42.coll42 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.714-0400 m30999| 2015-07-09T14:08:32.709-0400 W SHARDING [conn262] splitChunk failed - cmd: { splitChunk: "db42.coll42", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a0ca4787b9985d1d33') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db42.coll42 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.714-0400 m31100| 2015-07-09T14:08:32.709-0400 I SHARDING [conn34] distributed lock 'db42.coll42/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb8a0792e00bb672749ce [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.714-0400 m31100| 2015-07-09T14:08:32.709-0400 I SHARDING [conn34] remotely refreshing metadata for db42.coll42 based on current shard version 1|0||559eb8a0ca4787b9985d1d33, current metadata version is 1|0||559eb8a0ca4787b9985d1d33 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.714-0400 m31100| 2015-07-09T14:08:32.710-0400 I SHARDING [conn32] request split points lookup for chunk db42.coll42 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.714-0400 m31100| 2015-07-09T14:08:32.710-0400 I SHARDING [conn35] request split points lookup for chunk db42.coll42 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.715-0400 m31100| 2015-07-09T14:08:32.710-0400 I SHARDING [conn34] metadata of collection db42.coll42 already up to date (shard version : 1|0||559eb8a0ca4787b9985d1d33, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.715-0400 m31100| 2015-07-09T14:08:32.710-0400 I SHARDING [conn34] splitChunk accepted at version 1|0||559eb8a0ca4787b9985d1d33 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.716-0400 m31100| 2015-07-09T14:08:32.711-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db42.coll42", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a0ca4787b9985d1d33') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.716-0400 m31100| 2015-07-09T14:08:32.711-0400 I SHARDING [conn32] received splitChunk request: { splitChunk: "db42.coll42", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a0ca4787b9985d1d33') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.716-0400 m31100| 2015-07-09T14:08:32.713-0400 W SHARDING [conn32] could not acquire collection lock for db42.coll42 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db42.coll42 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.717-0400 m30998| 2015-07-09T14:08:32.713-0400 W SHARDING [conn264] splitChunk failed - cmd: { splitChunk: "db42.coll42", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a0ca4787b9985d1d33') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db42.coll42 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.717-0400 m31100| 2015-07-09T14:08:32.713-0400 W SHARDING [conn35] could not acquire collection lock for db42.coll42 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db42.coll42 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.717-0400 m30998| 2015-07-09T14:08:32.715-0400 W SHARDING [conn262] splitChunk failed - cmd: { splitChunk: "db42.coll42", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a0ca4787b9985d1d33') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db42.coll42 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.718-0400 m31100| 2015-07-09T14:08:32.715-0400 I SHARDING [conn34] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:08:32.715-0400-559eb8a0792e00bb672749d1", server: "bs-osx108-8", clientAddr: "127.0.0.1:62636", time: new Date(1436465312715), what: "multi-split", ns: "db42.coll42", details: { before: { min: { j: MinKey }, max: { j: MaxKey } }, number: 1, of: 3, chunk: { min: { j: MinKey }, max: { j: 0.0 }, lastmod: Timestamp 1000|1, lastmodEpoch: ObjectId('559eb8a0ca4787b9985d1d33') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.732-0400 m31100| 2015-07-09T14:08:32.731-0400 I SHARDING [conn37] request split points lookup for chunk db42.coll42 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.732-0400 m31100| 2015-07-09T14:08:32.731-0400 I SHARDING [conn35] request split points lookup for chunk db42.coll42 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.732-0400 m31100| 2015-07-09T14:08:32.731-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db42.coll42", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 8.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a0ca4787b9985d1d33') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.733-0400 m31100| 2015-07-09T14:08:32.731-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db42.coll42", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 8.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a0ca4787b9985d1d33') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.733-0400 m31100| 2015-07-09T14:08:32.733-0400 I SHARDING [conn38] request split points lookup for chunk db42.coll42 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.734-0400 m31100| 2015-07-09T14:08:32.733-0400 W SHARDING [conn37] could not acquire collection lock for db42.coll42 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db42.coll42 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.735-0400 m31100| 2015-07-09T14:08:32.733-0400 W SHARDING [conn35] could not acquire collection lock for db42.coll42 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db42.coll42 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.735-0400 m30999| 2015-07-09T14:08:32.733-0400 W SHARDING [conn265] splitChunk failed - cmd: { splitChunk: "db42.coll42", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 8.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a0ca4787b9985d1d33') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db42.coll42 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.736-0400 m30998| 2015-07-09T14:08:32.733-0400 W SHARDING [conn264] splitChunk failed - cmd: { splitChunk: "db42.coll42", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 8.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a0ca4787b9985d1d33') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db42.coll42 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.736-0400 m31100| 2015-07-09T14:08:32.734-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db42.coll42", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a0ca4787b9985d1d33') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.737-0400 m31100| 2015-07-09T14:08:32.735-0400 W SHARDING [conn38] could not acquire collection lock for db42.coll42 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db42.coll42 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.737-0400 m30999| 2015-07-09T14:08:32.735-0400 W SHARDING [conn263] splitChunk failed - cmd: { splitChunk: "db42.coll42", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a0ca4787b9985d1d33') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db42.coll42 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.745-0400 m31100| 2015-07-09T14:08:32.744-0400 I SHARDING [conn38] request split points lookup for chunk db42.coll42 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.746-0400 m31100| 2015-07-09T14:08:32.745-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db42.coll42", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 10.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a0ca4787b9985d1d33') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.747-0400 m31100| 2015-07-09T14:08:32.745-0400 I SHARDING [conn35] request split points lookup for chunk db42.coll42 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.747-0400 m31100| 2015-07-09T14:08:32.746-0400 W SHARDING [conn38] could not acquire collection lock for db42.coll42 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db42.coll42 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.748-0400 m31100| 2015-07-09T14:08:32.746-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db42.coll42", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 10.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a0ca4787b9985d1d33') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.748-0400 m30999| 2015-07-09T14:08:32.746-0400 W SHARDING [conn265] splitChunk failed - cmd: { splitChunk: "db42.coll42", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 10.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a0ca4787b9985d1d33') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db42.coll42 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.749-0400 m31100| 2015-07-09T14:08:32.747-0400 W SHARDING [conn35] could not acquire collection lock for db42.coll42 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db42.coll42 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.749-0400 m31100| 2015-07-09T14:08:32.747-0400 I SHARDING [conn32] request split points lookup for chunk db42.coll42 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.749-0400 m30998| 2015-07-09T14:08:32.747-0400 W SHARDING [conn265] splitChunk failed - cmd: { splitChunk: "db42.coll42", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 10.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a0ca4787b9985d1d33') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db42.coll42 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.750-0400 m31100| 2015-07-09T14:08:32.748-0400 I SHARDING [conn32] received splitChunk request: { splitChunk: "db42.coll42", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 10.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a0ca4787b9985d1d33') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.750-0400 m31100| 2015-07-09T14:08:32.749-0400 W SHARDING [conn32] could not acquire collection lock for db42.coll42 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db42.coll42 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.750-0400 m30998| 2015-07-09T14:08:32.749-0400 W SHARDING [conn263] splitChunk failed - cmd: { splitChunk: "db42.coll42", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 10.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a0ca4787b9985d1d33') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db42.coll42 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.763-0400 m30998| 2015-07-09T14:08:32.762-0400 I SHARDING [conn262] ChunkManager: time to load chunks for db42.coll42: 0ms sequenceNumber: 53 version: 1|3||559eb8a0ca4787b9985d1d33 based on: 1|0||559eb8a0ca4787b9985d1d33 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.769-0400 m31100| 2015-07-09T14:08:32.768-0400 I SHARDING [conn34] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:08:32.768-0400-559eb8a0792e00bb672749d2", server: "bs-osx108-8", clientAddr: "127.0.0.1:62636", time: new Date(1436465312768), what: "multi-split", ns: "db42.coll42", details: { before: { min: { j: MinKey }, max: { j: MaxKey } }, number: 2, of: 3, chunk: { min: { j: 0.0 }, max: { j: 4.0 }, lastmod: Timestamp 1000|2, lastmodEpoch: ObjectId('559eb8a0ca4787b9985d1d33') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.769-0400 m31100| 2015-07-09T14:08:32.768-0400 I SHARDING [conn38] request split points lookup for chunk db42.coll42 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.770-0400 m31100| 2015-07-09T14:08:32.769-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db42.coll42", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 10.0 }, { j: 14.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a0ca4787b9985d1d33') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.771-0400 m31100| 2015-07-09T14:08:32.770-0400 I SHARDING [conn37] request split points lookup for chunk db42.coll42 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.771-0400 m31100| 2015-07-09T14:08:32.771-0400 W SHARDING [conn38] could not acquire collection lock for db42.coll42 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db42.coll42 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.773-0400 m30999| 2015-07-09T14:08:32.771-0400 W SHARDING [conn262] splitChunk failed - cmd: { splitChunk: "db42.coll42", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 10.0 }, { j: 14.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a0ca4787b9985d1d33') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db42.coll42 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.773-0400 m31100| 2015-07-09T14:08:32.771-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db42.coll42", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 10.0 }, { j: 14.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a0ca4787b9985d1d33') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.773-0400 m31100| 2015-07-09T14:08:32.773-0400 W SHARDING [conn37] could not acquire collection lock for db42.coll42 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db42.coll42 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.775-0400 m30999| 2015-07-09T14:08:32.773-0400 W SHARDING [conn266] splitChunk failed - cmd: { splitChunk: "db42.coll42", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 10.0 }, { j: 14.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a0ca4787b9985d1d33') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db42.coll42 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.789-0400 m31100| 2015-07-09T14:08:32.787-0400 I SHARDING [conn37] request split points lookup for chunk db42.coll42 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.790-0400 m31100| 2015-07-09T14:08:32.788-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db42.coll42", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 20.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a0ca4787b9985d1d33') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.792-0400 m31100| 2015-07-09T14:08:32.791-0400 W SHARDING [conn37] could not acquire collection lock for db42.coll42 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db42.coll42 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.792-0400 m30999| 2015-07-09T14:08:32.791-0400 W SHARDING [conn262] splitChunk failed - cmd: { splitChunk: "db42.coll42", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 20.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a0ca4787b9985d1d33') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db42.coll42 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.801-0400 m31100| 2015-07-09T14:08:32.800-0400 I SHARDING [conn37] request split points lookup for chunk db42.coll42 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.801-0400 m31100| 2015-07-09T14:08:32.801-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db42.coll42", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a0ca4787b9985d1d33') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.802-0400 m31100| 2015-07-09T14:08:32.802-0400 W SHARDING [conn37] could not acquire collection lock for db42.coll42 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db42.coll42 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.804-0400 m30999| 2015-07-09T14:08:32.802-0400 W SHARDING [conn262] splitChunk failed - cmd: { splitChunk: "db42.coll42", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a0ca4787b9985d1d33') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db42.coll42 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.819-0400 m31100| 2015-07-09T14:08:32.819-0400 I SHARDING [conn37] request split points lookup for chunk db42.coll42 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.819-0400 m31100| 2015-07-09T14:08:32.819-0400 I SHARDING [conn38] request split points lookup for chunk db42.coll42 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.821-0400 m31100| 2015-07-09T14:08:32.819-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db42.coll42", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 18.0 }, { j: 20.0 }, { j: 28.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a0ca4787b9985d1d33') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.822-0400 m31100| 2015-07-09T14:08:32.820-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db42.coll42", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 18.0 }, { j: 20.0 }, { j: 28.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a0ca4787b9985d1d33') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.822-0400 m31100| 2015-07-09T14:08:32.820-0400 I SHARDING [conn15] request split points lookup for chunk db42.coll42 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.822-0400 m31100| 2015-07-09T14:08:32.821-0400 W SHARDING [conn38] could not acquire collection lock for db42.coll42 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db42.coll42 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.823-0400 m31100| 2015-07-09T14:08:32.821-0400 I SHARDING [conn34] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:08:32.821-0400-559eb8a0792e00bb672749d3", server: "bs-osx108-8", clientAddr: "127.0.0.1:62636", time: new Date(1436465312821), what: "multi-split", ns: "db42.coll42", details: { before: { min: { j: MinKey }, max: { j: MaxKey } }, number: 3, of: 3, chunk: { min: { j: 4.0 }, max: { j: MaxKey }, lastmod: Timestamp 1000|3, lastmodEpoch: ObjectId('559eb8a0ca4787b9985d1d33') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.823-0400 m30999| 2015-07-09T14:08:32.821-0400 W SHARDING [conn262] splitChunk failed - cmd: { splitChunk: "db42.coll42", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 18.0 }, { j: 20.0 }, { j: 28.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a0ca4787b9985d1d33') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db42.coll42 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.824-0400 m31100| 2015-07-09T14:08:32.821-0400 W SHARDING [conn37] could not acquire collection lock for db42.coll42 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db42.coll42 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.824-0400 m30999| 2015-07-09T14:08:32.821-0400 W SHARDING [conn265] splitChunk failed - cmd: { splitChunk: "db42.coll42", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 18.0 }, { j: 20.0 }, { j: 28.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a0ca4787b9985d1d33') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db42.coll42 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.825-0400 m31100| 2015-07-09T14:08:32.821-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db42.coll42", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 26.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a0ca4787b9985d1d33') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.825-0400 m31100| 2015-07-09T14:08:32.824-0400 W SHARDING [conn15] could not acquire collection lock for db42.coll42 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db42.coll42 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.825-0400 m30999| 2015-07-09T14:08:32.824-0400 W SHARDING [conn263] splitChunk failed - cmd: { splitChunk: "db42.coll42", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 26.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a0ca4787b9985d1d33') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db42.coll42 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.849-0400 m31100| 2015-07-09T14:08:32.848-0400 I SHARDING [conn15] request split points lookup for chunk db42.coll42 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.850-0400 m31100| 2015-07-09T14:08:32.849-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db42.coll42", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 22.0 }, { j: 28.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a0ca4787b9985d1d33') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.852-0400 m31100| 2015-07-09T14:08:32.851-0400 W SHARDING [conn15] could not acquire collection lock for db42.coll42 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db42.coll42 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.852-0400 m30999| 2015-07-09T14:08:32.851-0400 W SHARDING [conn266] splitChunk failed - cmd: { splitChunk: "db42.coll42", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 22.0 }, { j: 28.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a0ca4787b9985d1d33') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db42.coll42 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.863-0400 m31100| 2015-07-09T14:08:32.863-0400 I SHARDING [conn15] request split points lookup for chunk db42.coll42 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.863-0400 m31100| 2015-07-09T14:08:32.863-0400 I SHARDING [conn37] request split points lookup for chunk db42.coll42 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.864-0400 m31100| 2015-07-09T14:08:32.863-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db42.coll42", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 32.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a0ca4787b9985d1d33') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.865-0400 m31100| 2015-07-09T14:08:32.864-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db42.coll42", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 30.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a0ca4787b9985d1d33') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.865-0400 m31100| 2015-07-09T14:08:32.864-0400 W SHARDING [conn15] could not acquire collection lock for db42.coll42 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db42.coll42 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.866-0400 m30999| 2015-07-09T14:08:32.865-0400 W SHARDING [conn262] splitChunk failed - cmd: { splitChunk: "db42.coll42", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 32.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a0ca4787b9985d1d33') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db42.coll42 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.866-0400 m31100| 2015-07-09T14:08:32.865-0400 W SHARDING [conn37] could not acquire collection lock for db42.coll42 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db42.coll42 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.867-0400 m30999| 2015-07-09T14:08:32.866-0400 W SHARDING [conn265] splitChunk failed - cmd: { splitChunk: "db42.coll42", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 30.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a0ca4787b9985d1d33') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db42.coll42 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.874-0400 m31100| 2015-07-09T14:08:32.873-0400 I SHARDING [conn34] distributed lock 'db42.coll42/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.875-0400 m31100| 2015-07-09T14:08:32.873-0400 I COMMAND [conn34] command db42.coll42 command: splitChunk { splitChunk: "db42.coll42", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a0ca4787b9985d1d33') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 4, w: 2 } }, Database: { acquireCount: { r: 1, w: 2 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 398 } }, Collection: { acquireCount: { r: 1, W: 2 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 2912 } } } protocol:op_command 166ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.876-0400 m30999| 2015-07-09T14:08:32.874-0400 I SHARDING [conn264] ChunkManager: time to load chunks for db42.coll42: 0ms sequenceNumber: 186 version: 1|3||559eb8a0ca4787b9985d1d33 based on: 1|0||559eb8a0ca4787b9985d1d33 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.876-0400 m30999| 2015-07-09T14:08:32.874-0400 I SHARDING [conn264] autosplitted db42.coll42 shard: ns: db42.coll42, shard: test-rs0, lastmod: 1|0||000000000000000000000000, min: { j: MinKey }, max: { j: MaxKey } into 3 (splitThreshold 921) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.965-0400 m30999| 2015-07-09T14:08:32.965-0400 I NETWORK [conn262] end connection 127.0.0.1:63513 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.979-0400 m30998| 2015-07-09T14:08:32.978-0400 I NETWORK [conn264] end connection 127.0.0.1:63516 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.980-0400 m30999| 2015-07-09T14:08:32.980-0400 I NETWORK [conn263] end connection 127.0.0.1:63518 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.986-0400 m30998| 2015-07-09T14:08:32.985-0400 I NETWORK [conn262] end connection 127.0.0.1:63514 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.990-0400 m30998| 2015-07-09T14:08:32.990-0400 I NETWORK [conn265] end connection 127.0.0.1:63517 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:32.991-0400 m30999| 2015-07-09T14:08:32.990-0400 I NETWORK [conn266] end connection 127.0.0.1:63522 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.015-0400 m30998| 2015-07-09T14:08:33.015-0400 I NETWORK [conn263] end connection 127.0.0.1:63515 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.026-0400 m30998| 2015-07-09T14:08:33.025-0400 I NETWORK [conn266] end connection 127.0.0.1:63520 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.030-0400 m30999| 2015-07-09T14:08:33.029-0400 I NETWORK [conn265] end connection 127.0.0.1:63521 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.138-0400 m30999| 2015-07-09T14:08:33.137-0400 I NETWORK [conn264] end connection 127.0.0.1:63519 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.138-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.138-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.138-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.138-0400 jstests/concurrency/fsm_workloads/explain_remove.js: Workload completed in 632 ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.138-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.138-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.138-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.139-0400 m30999| 2015-07-09T14:08:33.138-0400 I COMMAND [conn1] DROP: db42.coll42 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.139-0400 m30999| 2015-07-09T14:08:33.138-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:08:33.138-0400-559eb8a1ca4787b9985d1d35", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465313138), what: "dropCollection.start", ns: "db42.coll42", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.197-0400 m30999| 2015-07-09T14:08:33.196-0400 I SHARDING [conn1] distributed lock 'db42.coll42/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb8a1ca4787b9985d1d36 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.198-0400 m31100| 2015-07-09T14:08:33.197-0400 I COMMAND [conn34] CMD: drop db42.coll42 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.200-0400 m31200| 2015-07-09T14:08:33.200-0400 I COMMAND [conn84] CMD: drop db42.coll42 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.203-0400 m31102| 2015-07-09T14:08:33.203-0400 I COMMAND [repl writer worker 7] CMD: drop db42.coll42 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.203-0400 m31101| 2015-07-09T14:08:33.203-0400 I COMMAND [repl writer worker 13] CMD: drop db42.coll42 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.254-0400 m31100| 2015-07-09T14:08:33.254-0400 I SHARDING [conn34] remotely refreshing metadata for db42.coll42 with requested shard version 0|0||000000000000000000000000, current shard version is 1|3||559eb8a0ca4787b9985d1d33, current metadata version is 1|3||559eb8a0ca4787b9985d1d33 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.256-0400 m31100| 2015-07-09T14:08:33.256-0400 W SHARDING [conn34] no chunks found when reloading db42.coll42, previous version was 0|0||559eb8a0ca4787b9985d1d33, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.257-0400 m31100| 2015-07-09T14:08:33.256-0400 I SHARDING [conn34] dropping metadata for db42.coll42 at shard version 1|3||559eb8a0ca4787b9985d1d33, took 2ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.259-0400 m30999| 2015-07-09T14:08:33.258-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:08:33.258-0400-559eb8a1ca4787b9985d1d37", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465313258), what: "dropCollection", ns: "db42.coll42", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.313-0400 m30999| 2015-07-09T14:08:33.313-0400 I SHARDING [conn1] distributed lock 'db42.coll42/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.371-0400 m30999| 2015-07-09T14:08:33.371-0400 I COMMAND [conn1] DROP DATABASE: db42 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.372-0400 m30999| 2015-07-09T14:08:33.371-0400 I SHARDING [conn1] DBConfig::dropDatabase: db42 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.372-0400 m30999| 2015-07-09T14:08:33.371-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:08:33.371-0400-559eb8a1ca4787b9985d1d38", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465313371), what: "dropDatabase.start", ns: "db42", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.478-0400 m30999| 2015-07-09T14:08:33.478-0400 I SHARDING [conn1] DBConfig::dropDatabase: db42 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.479-0400 m31100| 2015-07-09T14:08:33.478-0400 I COMMAND [conn28] dropDatabase db42 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.479-0400 m31100| 2015-07-09T14:08:33.478-0400 I COMMAND [conn28] dropDatabase db42 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.479-0400 m30999| 2015-07-09T14:08:33.479-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:08:33.479-0400-559eb8a1ca4787b9985d1d39", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465313479), what: "dropDatabase", ns: "db42", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.480-0400 m31101| 2015-07-09T14:08:33.480-0400 I COMMAND [repl writer worker 1] dropDatabase db42 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.480-0400 m31101| 2015-07-09T14:08:33.480-0400 I COMMAND [repl writer worker 1] dropDatabase db42 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.480-0400 m31102| 2015-07-09T14:08:33.480-0400 I COMMAND [repl writer worker 15] dropDatabase db42 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.480-0400 m31102| 2015-07-09T14:08:33.480-0400 I COMMAND [repl writer worker 15] dropDatabase db42 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.577-0400 m31100| 2015-07-09T14:08:33.576-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.580-0400 m31102| 2015-07-09T14:08:33.580-0400 I COMMAND [repl writer worker 11] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.581-0400 m31101| 2015-07-09T14:08:33.580-0400 I COMMAND [repl writer worker 11] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.618-0400 m31200| 2015-07-09T14:08:33.618-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.622-0400 m31202| 2015-07-09T14:08:33.621-0400 I COMMAND [repl writer worker 12] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.622-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.622-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.623-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.623-0400 jstests/concurrency/fsm_workloads/update_simple.js [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.623-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.623-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.623-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.623-0400 m31201| 2015-07-09T14:08:33.623-0400 I COMMAND [repl writer worker 4] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.635-0400 m30999| 2015-07-09T14:08:33.634-0400 I SHARDING [conn1] distributed lock 'db43/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb8a1ca4787b9985d1d3a [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.639-0400 m30999| 2015-07-09T14:08:33.639-0400 I SHARDING [conn1] Placing [db43] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.639-0400 m30999| 2015-07-09T14:08:33.639-0400 I SHARDING [conn1] Enabling sharding for database [db43] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.694-0400 m30999| 2015-07-09T14:08:33.693-0400 I SHARDING [conn1] distributed lock 'db43/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.718-0400 m31100| 2015-07-09T14:08:33.717-0400 I INDEX [conn145] build index on: db43.coll43 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db43.coll43" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.718-0400 m31100| 2015-07-09T14:08:33.717-0400 I INDEX [conn145] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.727-0400 m31100| 2015-07-09T14:08:33.726-0400 I INDEX [conn145] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.729-0400 m30999| 2015-07-09T14:08:33.728-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db43.coll43", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.731-0400 m30999| 2015-07-09T14:08:33.731-0400 I SHARDING [conn1] distributed lock 'db43.coll43/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb8a1ca4787b9985d1d3b [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.733-0400 m30999| 2015-07-09T14:08:33.732-0400 I SHARDING [conn1] enable sharding on: db43.coll43 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.733-0400 m30999| 2015-07-09T14:08:33.732-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:08:33.732-0400-559eb8a1ca4787b9985d1d3c", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465313732), what: "shardCollection.start", ns: "db43.coll43", details: { shardKey: { _id: "hashed" }, collection: "db43.coll43", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.740-0400 m31101| 2015-07-09T14:08:33.739-0400 I INDEX [repl writer worker 5] build index on: db43.coll43 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db43.coll43" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.740-0400 m31101| 2015-07-09T14:08:33.739-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.743-0400 m31101| 2015-07-09T14:08:33.743-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.743-0400 m31102| 2015-07-09T14:08:33.743-0400 I INDEX [repl writer worker 6] build index on: db43.coll43 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db43.coll43" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.744-0400 m31102| 2015-07-09T14:08:33.743-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.749-0400 m31102| 2015-07-09T14:08:33.748-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.786-0400 m30999| 2015-07-09T14:08:33.785-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db43.coll43 using new epoch 559eb8a1ca4787b9985d1d3d [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.897-0400 m30999| 2015-07-09T14:08:33.896-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db43.coll43: 1ms sequenceNumber: 187 version: 1|1||559eb8a1ca4787b9985d1d3d based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.955-0400 m30999| 2015-07-09T14:08:33.954-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db43.coll43: 0ms sequenceNumber: 188 version: 1|1||559eb8a1ca4787b9985d1d3d based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.958-0400 m31100| 2015-07-09T14:08:33.957-0400 I SHARDING [conn47] remotely refreshing metadata for db43.coll43 with requested shard version 1|1||559eb8a1ca4787b9985d1d3d, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.960-0400 m31100| 2015-07-09T14:08:33.960-0400 I SHARDING [conn47] collection db43.coll43 was previously unsharded, new metadata loaded with shard version 1|1||559eb8a1ca4787b9985d1d3d [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.961-0400 m31100| 2015-07-09T14:08:33.960-0400 I SHARDING [conn47] collection version was loaded at version 1|1||559eb8a1ca4787b9985d1d3d, took 2ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:33.961-0400 m30999| 2015-07-09T14:08:33.960-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:08:33.960-0400-559eb8a1ca4787b9985d1d3e", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465313960), what: "shardCollection", ns: "db43.coll43", details: { version: "1|1||559eb8a1ca4787b9985d1d3d" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.014-0400 m30999| 2015-07-09T14:08:34.014-0400 I SHARDING [conn1] distributed lock 'db43.coll43/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.015-0400 m30999| 2015-07-09T14:08:34.015-0400 I SHARDING [conn1] moving chunk ns: db43.coll43 moving ( ns: db43.coll43, shard: test-rs0, lastmod: 1|1||000000000000000000000000, min: { _id: 0 }, max: { _id: MaxKey }) test-rs0 -> test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.016-0400 m31100| 2015-07-09T14:08:34.015-0400 I SHARDING [conn34] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.017-0400 m31100| 2015-07-09T14:08:34.016-0400 I SHARDING [conn34] received moveChunk request: { moveChunk: "db43.coll43", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb8a1ca4787b9985d1d3d') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.020-0400 m31100| 2015-07-09T14:08:34.020-0400 I SHARDING [conn34] distributed lock 'db43.coll43/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb8a2792e00bb672749d5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.020-0400 m31100| 2015-07-09T14:08:34.020-0400 I SHARDING [conn34] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:08:34.020-0400-559eb8a2792e00bb672749d6", server: "bs-osx108-8", clientAddr: "127.0.0.1:62636", time: new Date(1436465314020), what: "moveChunk.start", ns: "db43.coll43", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.074-0400 m31100| 2015-07-09T14:08:34.073-0400 I SHARDING [conn34] remotely refreshing metadata for db43.coll43 based on current shard version 1|1||559eb8a1ca4787b9985d1d3d, current metadata version is 1|1||559eb8a1ca4787b9985d1d3d [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.076-0400 m31100| 2015-07-09T14:08:34.075-0400 I SHARDING [conn34] metadata of collection db43.coll43 already up to date (shard version : 1|1||559eb8a1ca4787b9985d1d3d, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.076-0400 m31100| 2015-07-09T14:08:34.075-0400 I SHARDING [conn34] moveChunk request accepted at version 1|1||559eb8a1ca4787b9985d1d3d [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.076-0400 m31100| 2015-07-09T14:08:34.076-0400 I SHARDING [conn34] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.077-0400 m31200| 2015-07-09T14:08:34.076-0400 I SHARDING [conn16] remotely refreshing metadata for db43.coll43, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.079-0400 m31200| 2015-07-09T14:08:34.078-0400 I SHARDING [conn16] collection db43.coll43 was previously unsharded, new metadata loaded with shard version 0|0||559eb8a1ca4787b9985d1d3d [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.079-0400 m31200| 2015-07-09T14:08:34.078-0400 I SHARDING [conn16] collection version was loaded at version 1|1||559eb8a1ca4787b9985d1d3d, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.080-0400 m31200| 2015-07-09T14:08:34.079-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: 0 } -> { _id: MaxKey } for collection db43.coll43 from test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 at epoch 559eb8a1ca4787b9985d1d3d [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.082-0400 m31100| 2015-07-09T14:08:34.081-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db43.coll43", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.085-0400 m31100| 2015-07-09T14:08:34.084-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db43.coll43", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.090-0400 m31100| 2015-07-09T14:08:34.090-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db43.coll43", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.096-0400 m31200| 2015-07-09T14:08:34.095-0400 I INDEX [migrateThread] build index on: db43.coll43 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db43.coll43" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.096-0400 m31200| 2015-07-09T14:08:34.095-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.101-0400 m31100| 2015-07-09T14:08:34.100-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db43.coll43", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.107-0400 m31200| 2015-07-09T14:08:34.106-0400 I INDEX [migrateThread] build index on: db43.coll43 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db43.coll43" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.107-0400 m31200| 2015-07-09T14:08:34.106-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.118-0400 m31100| 2015-07-09T14:08:34.117-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db43.coll43", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.126-0400 m31200| 2015-07-09T14:08:34.126-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.128-0400 m31200| 2015-07-09T14:08:34.128-0400 I SHARDING [migrateThread] Deleter starting delete for: db43.coll43 from { _id: 0 } -> { _id: MaxKey }, with opId: 72064 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.129-0400 m31200| 2015-07-09T14:08:34.128-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db43.coll43 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.137-0400 m31201| 2015-07-09T14:08:34.137-0400 I INDEX [repl writer worker 6] build index on: db43.coll43 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db43.coll43" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.138-0400 m31201| 2015-07-09T14:08:34.137-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.144-0400 m31202| 2015-07-09T14:08:34.143-0400 I INDEX [repl writer worker 3] build index on: db43.coll43 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db43.coll43" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.144-0400 m31202| 2015-07-09T14:08:34.144-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.147-0400 m31201| 2015-07-09T14:08:34.147-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.151-0400 m31100| 2015-07-09T14:08:34.150-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db43.coll43", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "clone", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.152-0400 m31200| 2015-07-09T14:08:34.150-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.152-0400 m31200| 2015-07-09T14:08:34.151-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db43.coll43' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.153-0400 m31202| 2015-07-09T14:08:34.153-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.217-0400 m31100| 2015-07-09T14:08:34.217-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db43.coll43", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.217-0400 m31100| 2015-07-09T14:08:34.217-0400 I SHARDING [conn34] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.218-0400 m31100| 2015-07-09T14:08:34.218-0400 I SHARDING [conn34] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.218-0400 m31100| 2015-07-09T14:08:34.218-0400 I SHARDING [conn34] moveChunk setting version to: 2|0||559eb8a1ca4787b9985d1d3d [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.222-0400 m31200| 2015-07-09T14:08:34.221-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db43.coll43' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.223-0400 m31200| 2015-07-09T14:08:34.222-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:08:34.222-0400-559eb8a2d5a107a5b9c0db3a", server: "bs-osx108-8", clientAddr: "", time: new Date(1436465314222), what: "moveChunk.to", ns: "db43.coll43", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 5: 48, step 2 of 5: 21, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 71, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.269-0400 m31100| 2015-07-09T14:08:34.268-0400 I SHARDING [conn34] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db43.coll43", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.269-0400 m31100| 2015-07-09T14:08:34.268-0400 I SHARDING [conn34] moveChunk updating self version to: 2|1||559eb8a1ca4787b9985d1d3d through { _id: MinKey } -> { _id: 0 } for collection 'db43.coll43' [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.289-0400 m31100| 2015-07-09T14:08:34.288-0400 I SHARDING [conn34] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:08:34.288-0400-559eb8a2792e00bb672749d7", server: "bs-osx108-8", clientAddr: "127.0.0.1:62636", time: new Date(1436465314288), what: "moveChunk.commit", ns: "db43.coll43", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.342-0400 m31100| 2015-07-09T14:08:34.342-0400 I SHARDING [conn34] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.343-0400 m31100| 2015-07-09T14:08:34.342-0400 I SHARDING [conn34] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.343-0400 m31100| 2015-07-09T14:08:34.343-0400 I SHARDING [conn34] Deleter starting delete for: db43.coll43 from { _id: 0 } -> { _id: MaxKey }, with opId: 76897 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.343-0400 m31100| 2015-07-09T14:08:34.343-0400 I SHARDING [conn34] rangeDeleter deleted 0 documents for db43.coll43 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.343-0400 m31100| 2015-07-09T14:08:34.343-0400 I SHARDING [conn34] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.344-0400 m31100| 2015-07-09T14:08:34.344-0400 I SHARDING [conn34] distributed lock 'db43.coll43/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.345-0400 m31100| 2015-07-09T14:08:34.344-0400 I SHARDING [conn34] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:08:34.344-0400-559eb8a2792e00bb672749d8", server: "bs-osx108-8", clientAddr: "127.0.0.1:62636", time: new Date(1436465314344), what: "moveChunk.from", ns: "db43.coll43", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 6: 0, step 2 of 6: 58, step 3 of 6: 3, step 4 of 6: 137, step 5 of 6: 125, step 6 of 6: 0, to: "test-rs1", from: "test-rs0", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.398-0400 m31100| 2015-07-09T14:08:34.397-0400 I COMMAND [conn34] command db43.coll43 command: moveChunk { moveChunk: "db43.coll43", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb8a1ca4787b9985d1d3d') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 381ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.401-0400 m30999| 2015-07-09T14:08:34.400-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db43.coll43: 1ms sequenceNumber: 189 version: 2|1||559eb8a1ca4787b9985d1d3d based on: 1|1||559eb8a1ca4787b9985d1d3d [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.403-0400 m31100| 2015-07-09T14:08:34.402-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db43.coll43", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a1ca4787b9985d1d3d') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.406-0400 m31100| 2015-07-09T14:08:34.406-0400 I SHARDING [conn34] distributed lock 'db43.coll43/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb8a2792e00bb672749d9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.406-0400 m31100| 2015-07-09T14:08:34.406-0400 I SHARDING [conn34] remotely refreshing metadata for db43.coll43 based on current shard version 2|0||559eb8a1ca4787b9985d1d3d, current metadata version is 2|0||559eb8a1ca4787b9985d1d3d [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.409-0400 m31100| 2015-07-09T14:08:34.408-0400 I SHARDING [conn34] updating metadata for db43.coll43 from shard version 2|0||559eb8a1ca4787b9985d1d3d to shard version 2|1||559eb8a1ca4787b9985d1d3d [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.409-0400 m31100| 2015-07-09T14:08:34.408-0400 I SHARDING [conn34] collection version was loaded at version 2|1||559eb8a1ca4787b9985d1d3d, took 2ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.409-0400 m31100| 2015-07-09T14:08:34.408-0400 I SHARDING [conn34] splitChunk accepted at version 2|1||559eb8a1ca4787b9985d1d3d [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.411-0400 m31100| 2015-07-09T14:08:34.411-0400 I SHARDING [conn34] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:08:34.411-0400-559eb8a2792e00bb672749da", server: "bs-osx108-8", clientAddr: "127.0.0.1:62636", time: new Date(1436465314411), what: "split", ns: "db43.coll43", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559eb8a1ca4787b9985d1d3d') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559eb8a1ca4787b9985d1d3d') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.467-0400 m31100| 2015-07-09T14:08:34.466-0400 I SHARDING [conn34] distributed lock 'db43.coll43/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.470-0400 m30999| 2015-07-09T14:08:34.469-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db43.coll43: 0ms sequenceNumber: 190 version: 2|3||559eb8a1ca4787b9985d1d3d based on: 2|1||559eb8a1ca4787b9985d1d3d [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.470-0400 m31200| 2015-07-09T14:08:34.470-0400 I SHARDING [conn84] received splitChunk request: { splitChunk: "db43.coll43", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a1ca4787b9985d1d3d') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.474-0400 m31200| 2015-07-09T14:08:34.474-0400 I SHARDING [conn84] distributed lock 'db43.coll43/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb8a2d5a107a5b9c0db3b [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.475-0400 m31200| 2015-07-09T14:08:34.474-0400 I SHARDING [conn84] remotely refreshing metadata for db43.coll43 based on current shard version 0|0||559eb8a1ca4787b9985d1d3d, current metadata version is 1|1||559eb8a1ca4787b9985d1d3d [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.476-0400 m31200| 2015-07-09T14:08:34.476-0400 I SHARDING [conn84] updating metadata for db43.coll43 from shard version 0|0||559eb8a1ca4787b9985d1d3d to shard version 2|0||559eb8a1ca4787b9985d1d3d [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.476-0400 m31200| 2015-07-09T14:08:34.476-0400 I SHARDING [conn84] collection version was loaded at version 2|3||559eb8a1ca4787b9985d1d3d, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.477-0400 m31200| 2015-07-09T14:08:34.476-0400 I SHARDING [conn84] splitChunk accepted at version 2|0||559eb8a1ca4787b9985d1d3d [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.478-0400 m31200| 2015-07-09T14:08:34.478-0400 I SHARDING [conn84] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:08:34.478-0400-559eb8a2d5a107a5b9c0db3c", server: "bs-osx108-8", clientAddr: "127.0.0.1:63007", time: new Date(1436465314478), what: "split", ns: "db43.coll43", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559eb8a1ca4787b9985d1d3d') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559eb8a1ca4787b9985d1d3d') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.534-0400 m31200| 2015-07-09T14:08:34.534-0400 I SHARDING [conn84] distributed lock 'db43.coll43/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.536-0400 m30999| 2015-07-09T14:08:34.536-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:08:34.532-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30999:1436464534:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.537-0400 m30999| 2015-07-09T14:08:34.536-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db43.coll43: 1ms sequenceNumber: 191 version: 2|5||559eb8a1ca4787b9985d1d3d based on: 2|3||559eb8a1ca4787b9985d1d3d [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.546-0400 m31200| 2015-07-09T14:08:34.545-0400 I INDEX [conn83] build index on: db43.coll43 properties: { v: 1, key: { value: 1.0 }, name: "value_1", ns: "db43.coll43" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.546-0400 m31100| 2015-07-09T14:08:34.545-0400 I INDEX [conn47] build index on: db43.coll43 properties: { v: 1, key: { value: 1.0 }, name: "value_1", ns: "db43.coll43" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.547-0400 m31200| 2015-07-09T14:08:34.545-0400 I INDEX [conn83] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.547-0400 m31100| 2015-07-09T14:08:34.545-0400 I INDEX [conn47] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.554-0400 m31200| 2015-07-09T14:08:34.553-0400 I INDEX [conn83] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.557-0400 m31100| 2015-07-09T14:08:34.557-0400 I INDEX [conn47] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.559-0400 m31201| 2015-07-09T14:08:34.558-0400 I INDEX [repl writer worker 15] build index on: db43.coll43 properties: { v: 1, key: { value: 1.0 }, name: "value_1", ns: "db43.coll43" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.560-0400 m31201| 2015-07-09T14:08:34.558-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.564-0400 m31201| 2015-07-09T14:08:34.563-0400 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.568-0400 m31202| 2015-07-09T14:08:34.567-0400 I INDEX [repl writer worker 14] build index on: db43.coll43 properties: { v: 1, key: { value: 1.0 }, name: "value_1", ns: "db43.coll43" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.568-0400 m31202| 2015-07-09T14:08:34.567-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.574-0400 m31101| 2015-07-09T14:08:34.572-0400 I INDEX [repl writer worker 7] build index on: db43.coll43 properties: { v: 1, key: { value: 1.0 }, name: "value_1", ns: "db43.coll43" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.574-0400 m31101| 2015-07-09T14:08:34.572-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.574-0400 Using 20 threads (requested 20) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.579-0400 m31102| 2015-07-09T14:08:34.577-0400 I INDEX [repl writer worker 4] build index on: db43.coll43 properties: { v: 1, key: { value: 1.0 }, name: "value_1", ns: "db43.coll43" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.579-0400 m31102| 2015-07-09T14:08:34.577-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.653-0400 m31202| 2015-07-09T14:08:34.646-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.662-0400 m31101| 2015-07-09T14:08:34.656-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.684-0400 m31102| 2015-07-09T14:08:34.679-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.771-0400 m30999| 2015-07-09T14:08:34.771-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63523 #267 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.772-0400 m30998| 2015-07-09T14:08:34.771-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63524 #267 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.782-0400 m30999| 2015-07-09T14:08:34.781-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63525 #268 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.784-0400 m30998| 2015-07-09T14:08:34.784-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63527 #268 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.787-0400 m30999| 2015-07-09T14:08:34.786-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63526 #269 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.794-0400 m30998| 2015-07-09T14:08:34.794-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63528 #269 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.805-0400 m30998| 2015-07-09T14:08:34.805-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63530 #270 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.805-0400 m30999| 2015-07-09T14:08:34.805-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63529 #270 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.809-0400 m30998| 2015-07-09T14:08:34.809-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63531 #271 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.809-0400 m30999| 2015-07-09T14:08:34.809-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63532 #271 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.813-0400 m30998| 2015-07-09T14:08:34.812-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63533 #272 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.813-0400 m30999| 2015-07-09T14:08:34.813-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63536 #272 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.815-0400 m30998| 2015-07-09T14:08:34.814-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63534 #273 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.817-0400 m30999| 2015-07-09T14:08:34.815-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63537 #273 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.818-0400 m30998| 2015-07-09T14:08:34.817-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63535 #274 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.818-0400 m30998| 2015-07-09T14:08:34.817-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63538 #275 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.818-0400 m30998| 2015-07-09T14:08:34.818-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63540 #276 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.822-0400 m30999| 2015-07-09T14:08:34.822-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63539 #274 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.823-0400 m30999| 2015-07-09T14:08:34.823-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63541 #275 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.824-0400 m30999| 2015-07-09T14:08:34.823-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63542 #276 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.830-0400 setting random seed: 7145577990449 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.830-0400 setting random seed: 6272214329801 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.830-0400 setting random seed: 7715397877618 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.831-0400 setting random seed: 5171776390634 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.831-0400 setting random seed: 2045837868936 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.831-0400 setting random seed: 4397079222835 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.832-0400 setting random seed: 3041187007911 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.834-0400 setting random seed: 2692189519293 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.835-0400 setting random seed: 1710532610304 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.837-0400 setting random seed: 3079880201257 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.837-0400 setting random seed: 7629271834157 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.838-0400 setting random seed: 5417079986073 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.839-0400 setting random seed: 5729341413825 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.841-0400 setting random seed: 3230065768584 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.842-0400 setting random seed: 4736959883011 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.842-0400 m30998| 2015-07-09T14:08:34.840-0400 I SHARDING [conn267] ChunkManager: time to load chunks for db43.coll43: 0ms sequenceNumber: 54 version: 2|5||559eb8a1ca4787b9985d1d3d based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.845-0400 setting random seed: 3546566641889 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.845-0400 setting random seed: 8921594773419 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.845-0400 setting random seed: 4084790851920 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.855-0400 setting random seed: 7106290059164 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:34.870-0400 setting random seed: 5243426677770 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.058-0400 m31200| 2015-07-09T14:08:35.057-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63543 #144 (87 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.106-0400 m30999| 2015-07-09T14:08:35.106-0400 I NETWORK [conn275] end connection 127.0.0.1:63541 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.134-0400 m30998| 2015-07-09T14:08:35.132-0400 I NETWORK [conn267] end connection 127.0.0.1:63524 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.147-0400 m30999| 2015-07-09T14:08:35.146-0400 I NETWORK [conn269] end connection 127.0.0.1:63526 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.158-0400 m30998| 2015-07-09T14:08:35.158-0400 I NETWORK [conn274] end connection 127.0.0.1:63535 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.174-0400 m30999| 2015-07-09T14:08:35.174-0400 I NETWORK [conn276] end connection 127.0.0.1:63542 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.177-0400 m30998| 2015-07-09T14:08:35.176-0400 I NETWORK [conn272] end connection 127.0.0.1:63533 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.188-0400 m30998| 2015-07-09T14:08:35.188-0400 I NETWORK [conn273] end connection 127.0.0.1:63534 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.190-0400 m30998| 2015-07-09T14:08:35.189-0400 I NETWORK [conn275] end connection 127.0.0.1:63538 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.208-0400 m30998| 2015-07-09T14:08:35.208-0400 I NETWORK [conn276] end connection 127.0.0.1:63540 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.211-0400 m30999| 2015-07-09T14:08:35.211-0400 I NETWORK [conn274] end connection 127.0.0.1:63539 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.225-0400 m30999| 2015-07-09T14:08:35.224-0400 I NETWORK [conn272] end connection 127.0.0.1:63536 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.232-0400 m30999| 2015-07-09T14:08:35.231-0400 I NETWORK [conn268] end connection 127.0.0.1:63525 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.237-0400 m30998| 2015-07-09T14:08:35.237-0400 I NETWORK [conn269] end connection 127.0.0.1:63528 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.253-0400 m30999| 2015-07-09T14:08:35.250-0400 I NETWORK [conn267] end connection 127.0.0.1:63523 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.254-0400 m30999| 2015-07-09T14:08:35.254-0400 I NETWORK [conn270] end connection 127.0.0.1:63529 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.279-0400 m30999| 2015-07-09T14:08:35.273-0400 I NETWORK [conn271] end connection 127.0.0.1:63532 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.283-0400 m30998| 2015-07-09T14:08:35.282-0400 I NETWORK [conn271] end connection 127.0.0.1:63531 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.299-0400 m30998| 2015-07-09T14:08:35.299-0400 I NETWORK [conn270] end connection 127.0.0.1:63530 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.304-0400 m30998| 2015-07-09T14:08:35.304-0400 I NETWORK [conn268] end connection 127.0.0.1:63527 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.322-0400 m30999| 2015-07-09T14:08:35.321-0400 I NETWORK [conn273] end connection 127.0.0.1:63537 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.342-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.342-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.342-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.342-0400 jstests/concurrency/fsm_workloads/update_simple.js: Workload completed in 767 ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.342-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.342-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.342-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.342-0400 m30999| 2015-07-09T14:08:35.342-0400 I COMMAND [conn1] DROP: db43.coll43 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.343-0400 m30999| 2015-07-09T14:08:35.342-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:08:35.342-0400-559eb8a3ca4787b9985d1d3f", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465315342), what: "dropCollection.start", ns: "db43.coll43", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.401-0400 m30999| 2015-07-09T14:08:35.400-0400 I SHARDING [conn1] distributed lock 'db43.coll43/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb8a3ca4787b9985d1d40 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.401-0400 m31100| 2015-07-09T14:08:35.401-0400 I COMMAND [conn40] CMD: drop db43.coll43 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.405-0400 m31200| 2015-07-09T14:08:35.404-0400 I COMMAND [conn18] CMD: drop db43.coll43 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.406-0400 m31101| 2015-07-09T14:08:35.405-0400 I COMMAND [repl writer worker 13] CMD: drop db43.coll43 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.406-0400 m31102| 2015-07-09T14:08:35.406-0400 I COMMAND [repl writer worker 15] CMD: drop db43.coll43 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.409-0400 m31202| 2015-07-09T14:08:35.408-0400 I COMMAND [repl writer worker 3] CMD: drop db43.coll43 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.409-0400 m31201| 2015-07-09T14:08:35.408-0400 I COMMAND [repl writer worker 1] CMD: drop db43.coll43 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.462-0400 m31100| 2015-07-09T14:08:35.462-0400 I SHARDING [conn40] remotely refreshing metadata for db43.coll43 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559eb8a1ca4787b9985d1d3d, current metadata version is 2|3||559eb8a1ca4787b9985d1d3d [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.464-0400 m31100| 2015-07-09T14:08:35.464-0400 W SHARDING [conn40] no chunks found when reloading db43.coll43, previous version was 0|0||559eb8a1ca4787b9985d1d3d, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.465-0400 m31100| 2015-07-09T14:08:35.464-0400 I SHARDING [conn40] dropping metadata for db43.coll43 at shard version 2|3||559eb8a1ca4787b9985d1d3d, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.466-0400 m31200| 2015-07-09T14:08:35.465-0400 I SHARDING [conn18] remotely refreshing metadata for db43.coll43 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559eb8a1ca4787b9985d1d3d, current metadata version is 2|5||559eb8a1ca4787b9985d1d3d [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.468-0400 m31200| 2015-07-09T14:08:35.468-0400 W SHARDING [conn18] no chunks found when reloading db43.coll43, previous version was 0|0||559eb8a1ca4787b9985d1d3d, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.468-0400 m31200| 2015-07-09T14:08:35.468-0400 I SHARDING [conn18] dropping metadata for db43.coll43 at shard version 2|5||559eb8a1ca4787b9985d1d3d, took 2ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.470-0400 m30999| 2015-07-09T14:08:35.470-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:08:35.470-0400-559eb8a3ca4787b9985d1d41", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465315470), what: "dropCollection", ns: "db43.coll43", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.525-0400 m30999| 2015-07-09T14:08:35.524-0400 I SHARDING [conn1] distributed lock 'db43.coll43/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.582-0400 m30999| 2015-07-09T14:08:35.581-0400 I COMMAND [conn1] DROP DATABASE: db43 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.582-0400 m30999| 2015-07-09T14:08:35.582-0400 I SHARDING [conn1] DBConfig::dropDatabase: db43 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.583-0400 m30999| 2015-07-09T14:08:35.582-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:08:35.582-0400-559eb8a3ca4787b9985d1d42", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465315582), what: "dropDatabase.start", ns: "db43", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.689-0400 m30999| 2015-07-09T14:08:35.689-0400 I SHARDING [conn1] DBConfig::dropDatabase: db43 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.690-0400 m31100| 2015-07-09T14:08:35.689-0400 I COMMAND [conn28] dropDatabase db43 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.690-0400 m31100| 2015-07-09T14:08:35.690-0400 I COMMAND [conn28] dropDatabase db43 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.691-0400 m30999| 2015-07-09T14:08:35.691-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:08:35.691-0400-559eb8a3ca4787b9985d1d43", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465315691), what: "dropDatabase", ns: "db43", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.692-0400 m31102| 2015-07-09T14:08:35.691-0400 I COMMAND [repl writer worker 14] dropDatabase db43 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.692-0400 m31102| 2015-07-09T14:08:35.691-0400 I COMMAND [repl writer worker 14] dropDatabase db43 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.692-0400 m31101| 2015-07-09T14:08:35.691-0400 I COMMAND [repl writer worker 1] dropDatabase db43 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.692-0400 m31101| 2015-07-09T14:08:35.691-0400 I COMMAND [repl writer worker 1] dropDatabase db43 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.789-0400 m31100| 2015-07-09T14:08:35.788-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.792-0400 m31102| 2015-07-09T14:08:35.792-0400 I COMMAND [repl writer worker 3] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.792-0400 m31101| 2015-07-09T14:08:35.792-0400 I COMMAND [repl writer worker 11] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.843-0400 m31200| 2015-07-09T14:08:35.843-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.846-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.846-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.846-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.846-0400 jstests/concurrency/fsm_workloads/remove_where.js [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.846-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.846-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.846-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.846-0400 m31201| 2015-07-09T14:08:35.846-0400 I COMMAND [repl writer worker 8] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.847-0400 m31202| 2015-07-09T14:08:35.846-0400 I COMMAND [repl writer worker 1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.856-0400 m30999| 2015-07-09T14:08:35.855-0400 I SHARDING [conn1] distributed lock 'db44/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb8a3ca4787b9985d1d44 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.861-0400 m30999| 2015-07-09T14:08:35.860-0400 I SHARDING [conn1] Placing [db44] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.861-0400 m30999| 2015-07-09T14:08:35.860-0400 I SHARDING [conn1] Enabling sharding for database [db44] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.915-0400 m30999| 2015-07-09T14:08:35.915-0400 I SHARDING [conn1] distributed lock 'db44/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.948-0400 m31100| 2015-07-09T14:08:35.947-0400 I INDEX [conn70] build index on: db44.coll44 properties: { v: 1, key: { tid: 1.0 }, name: "tid_1", ns: "db44.coll44" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.948-0400 m31100| 2015-07-09T14:08:35.947-0400 I INDEX [conn70] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.950-0400 m30998| 2015-07-09T14:08:35.949-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:08:35.947-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30998:1436464535:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.975-0400 m31100| 2015-07-09T14:08:35.974-0400 I INDEX [conn70] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.976-0400 m30999| 2015-07-09T14:08:35.976-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db44.coll44", key: { tid: 1.0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.980-0400 m30999| 2015-07-09T14:08:35.979-0400 I SHARDING [conn1] distributed lock 'db44.coll44/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb8a3ca4787b9985d1d45 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.982-0400 m30999| 2015-07-09T14:08:35.981-0400 I SHARDING [conn1] enable sharding on: db44.coll44 with shard key: { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.982-0400 m30999| 2015-07-09T14:08:35.981-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:08:35.981-0400-559eb8a3ca4787b9985d1d46", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465315981), what: "shardCollection.start", ns: "db44.coll44", details: { shardKey: { tid: 1.0 }, collection: "db44.coll44", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 1 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.987-0400 m31102| 2015-07-09T14:08:35.987-0400 I INDEX [repl writer worker 4] build index on: db44.coll44 properties: { v: 1, key: { tid: 1.0 }, name: "tid_1", ns: "db44.coll44" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.988-0400 m31102| 2015-07-09T14:08:35.987-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.992-0400 m31101| 2015-07-09T14:08:35.991-0400 I INDEX [repl writer worker 5] build index on: db44.coll44 properties: { v: 1, key: { tid: 1.0 }, name: "tid_1", ns: "db44.coll44" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:35.992-0400 m31101| 2015-07-09T14:08:35.991-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.015-0400 m31102| 2015-07-09T14:08:36.014-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.020-0400 m31101| 2015-07-09T14:08:36.019-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.036-0400 m30999| 2015-07-09T14:08:36.035-0400 I SHARDING [conn1] going to create 1 chunk(s) for: db44.coll44 using new epoch 559eb8a4ca4787b9985d1d47 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.092-0400 m30999| 2015-07-09T14:08:36.092-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db44.coll44: 1ms sequenceNumber: 192 version: 1|0||559eb8a4ca4787b9985d1d47 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.149-0400 m30999| 2015-07-09T14:08:36.148-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db44.coll44: 1ms sequenceNumber: 193 version: 1|0||559eb8a4ca4787b9985d1d47 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.151-0400 m31100| 2015-07-09T14:08:36.151-0400 I SHARDING [conn55] remotely refreshing metadata for db44.coll44 with requested shard version 1|0||559eb8a4ca4787b9985d1d47, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.153-0400 m31100| 2015-07-09T14:08:36.153-0400 I SHARDING [conn55] collection db44.coll44 was previously unsharded, new metadata loaded with shard version 1|0||559eb8a4ca4787b9985d1d47 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.154-0400 m31100| 2015-07-09T14:08:36.153-0400 I SHARDING [conn55] collection version was loaded at version 1|0||559eb8a4ca4787b9985d1d47, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.154-0400 m30999| 2015-07-09T14:08:36.153-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:08:36.153-0400-559eb8a4ca4787b9985d1d48", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465316153), what: "shardCollection", ns: "db44.coll44", details: { version: "1|0||559eb8a4ca4787b9985d1d47" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.209-0400 m30999| 2015-07-09T14:08:36.209-0400 I SHARDING [conn1] distributed lock 'db44.coll44/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.210-0400 Using 10 threads (requested 10) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.298-0400 m30998| 2015-07-09T14:08:36.293-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63545 #277 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.316-0400 m30999| 2015-07-09T14:08:36.308-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63546 #277 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.329-0400 m30998| 2015-07-09T14:08:36.321-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63547 #278 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.336-0400 m30999| 2015-07-09T14:08:36.332-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63548 #278 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.337-0400 m30999| 2015-07-09T14:08:36.337-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63549 #279 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.337-0400 m30998| 2015-07-09T14:08:36.337-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63551 #279 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.337-0400 m30999| 2015-07-09T14:08:36.337-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63550 #280 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.338-0400 m30999| 2015-07-09T14:08:36.337-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63552 #281 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.345-0400 m30998| 2015-07-09T14:08:36.341-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63553 #280 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.345-0400 m30998| 2015-07-09T14:08:36.345-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63554 #281 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.353-0400 setting random seed: 6899361591786 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.353-0400 setting random seed: 9645949690602 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.354-0400 setting random seed: 5778380609117 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.354-0400 setting random seed: 4488354753702 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.359-0400 setting random seed: 8941491395235 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.359-0400 setting random seed: 8631459595635 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.362-0400 setting random seed: 9476525448262 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.363-0400 m30998| 2015-07-09T14:08:36.362-0400 I SHARDING [conn277] ChunkManager: time to load chunks for db44.coll44: 0ms sequenceNumber: 55 version: 1|0||559eb8a4ca4787b9985d1d47 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.365-0400 setting random seed: 4863368324004 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.365-0400 setting random seed: 7570327143184 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.366-0400 setting random seed: 1963283116929 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.459-0400 m31100| 2015-07-09T14:08:36.459-0400 I SHARDING [conn40] request split points lookup for chunk db44.coll44 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.461-0400 m31100| 2015-07-09T14:08:36.460-0400 W SHARDING [conn40] possible low cardinality key detected in db44.coll44 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.461-0400 m31100| 2015-07-09T14:08:36.460-0400 W SHARDING [conn40] possible low cardinality key detected in db44.coll44 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.461-0400 m31100| 2015-07-09T14:08:36.460-0400 W SHARDING [conn40] possible low cardinality key detected in db44.coll44 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.461-0400 m31100| 2015-07-09T14:08:36.460-0400 W SHARDING [conn40] possible low cardinality key detected in db44.coll44 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.462-0400 m31100| 2015-07-09T14:08:36.460-0400 W SHARDING [conn40] possible low cardinality key detected in db44.coll44 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.462-0400 m31100| 2015-07-09T14:08:36.460-0400 W SHARDING [conn40] possible low cardinality key detected in db44.coll44 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.462-0400 m31100| 2015-07-09T14:08:36.460-0400 W SHARDING [conn40] possible low cardinality key detected in db44.coll44 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.462-0400 m31100| 2015-07-09T14:08:36.460-0400 W SHARDING [conn40] possible low cardinality key detected in db44.coll44 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.462-0400 m31100| 2015-07-09T14:08:36.460-0400 W SHARDING [conn40] possible low cardinality key detected in db44.coll44 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.463-0400 m31100| 2015-07-09T14:08:36.460-0400 W SHARDING [conn40] possible low cardinality key detected in db44.coll44 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.463-0400 m31100| 2015-07-09T14:08:36.461-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db44.coll44", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a4ca4787b9985d1d47') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.463-0400 m31100| 2015-07-09T14:08:36.462-0400 I SHARDING [conn40] distributed lock 'db44.coll44/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb8a4792e00bb672749dc [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.463-0400 m31100| 2015-07-09T14:08:36.462-0400 I SHARDING [conn40] remotely refreshing metadata for db44.coll44 based on current shard version 1|0||559eb8a4ca4787b9985d1d47, current metadata version is 1|0||559eb8a4ca4787b9985d1d47 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.466-0400 m31100| 2015-07-09T14:08:36.465-0400 I SHARDING [conn40] metadata of collection db44.coll44 already up to date (shard version : 1|0||559eb8a4ca4787b9985d1d47, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.466-0400 m31100| 2015-07-09T14:08:36.465-0400 I SHARDING [conn40] splitChunk accepted at version 1|0||559eb8a4ca4787b9985d1d47 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.467-0400 m31100| 2015-07-09T14:08:36.465-0400 I SHARDING [conn39] request split points lookup for chunk db44.coll44 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.467-0400 m31100| 2015-07-09T14:08:36.466-0400 W SHARDING [conn39] possible low cardinality key detected in db44.coll44 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.467-0400 m31100| 2015-07-09T14:08:36.466-0400 W SHARDING [conn39] possible low cardinality key detected in db44.coll44 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.467-0400 m31100| 2015-07-09T14:08:36.466-0400 W SHARDING [conn39] possible low cardinality key detected in db44.coll44 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.467-0400 m31100| 2015-07-09T14:08:36.466-0400 W SHARDING [conn39] possible low cardinality key detected in db44.coll44 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.467-0400 m31100| 2015-07-09T14:08:36.466-0400 W SHARDING [conn39] possible low cardinality key detected in db44.coll44 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.468-0400 m31100| 2015-07-09T14:08:36.466-0400 W SHARDING [conn39] possible low cardinality key detected in db44.coll44 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.468-0400 m31100| 2015-07-09T14:08:36.466-0400 W SHARDING [conn39] possible low cardinality key detected in db44.coll44 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.468-0400 m31100| 2015-07-09T14:08:36.466-0400 W SHARDING [conn39] possible low cardinality key detected in db44.coll44 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.468-0400 m31100| 2015-07-09T14:08:36.466-0400 W SHARDING [conn39] possible low cardinality key detected in db44.coll44 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.468-0400 m31100| 2015-07-09T14:08:36.466-0400 W SHARDING [conn39] possible low cardinality key detected in db44.coll44 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.468-0400 m31100| 2015-07-09T14:08:36.467-0400 I SHARDING [conn132] request split points lookup for chunk db44.coll44 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.469-0400 m31100| 2015-07-09T14:08:36.468-0400 I SHARDING [conn39] received splitChunk request: { splitChunk: "db44.coll44", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a4ca4787b9985d1d47') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.472-0400 m31100| 2015-07-09T14:08:36.471-0400 I SHARDING [conn132] received splitChunk request: { splitChunk: "db44.coll44", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a4ca4787b9985d1d47') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.473-0400 m31100| 2015-07-09T14:08:36.472-0400 W SHARDING [conn39] could not acquire collection lock for db44.coll44 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db44.coll44 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.473-0400 m30998| 2015-07-09T14:08:36.472-0400 W SHARDING [conn277] splitChunk failed - cmd: { splitChunk: "db44.coll44", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a4ca4787b9985d1d47') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db44.coll44 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.473-0400 m31100| 2015-07-09T14:08:36.473-0400 W SHARDING [conn132] could not acquire collection lock for db44.coll44 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db44.coll44 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.474-0400 m30998| 2015-07-09T14:08:36.473-0400 W SHARDING [conn279] splitChunk failed - cmd: { splitChunk: "db44.coll44", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a4ca4787b9985d1d47') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db44.coll44 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.479-0400 m31100| 2015-07-09T14:08:36.478-0400 I COMMAND [conn16] command db44.$cmd command: insert { insert: "coll44", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eb8a4ca4787b9985d1d47') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 105, w: 105 } }, Database: { acquireCount: { w: 105 } }, Collection: { acquireCount: { w: 5 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 2633 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 101ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.479-0400 m31100| 2015-07-09T14:08:36.479-0400 I COMMAND [conn25] command db44.$cmd command: insert { insert: "coll44", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eb8a4ca4787b9985d1d47') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 105, w: 105 } }, Database: { acquireCount: { w: 105 } }, Collection: { acquireCount: { w: 5 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 1629 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 104ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.480-0400 m30998| 2015-07-09T14:08:36.479-0400 I SHARDING [conn279] ChunkManager: time to load chunks for db44.coll44: 0ms sequenceNumber: 56 version: 1|10||559eb8a4ca4787b9985d1d47 based on: 1|0||559eb8a4ca4787b9985d1d47 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.499-0400 m31100| 2015-07-09T14:08:36.494-0400 I SHARDING [conn132] request split points lookup for chunk db44.coll44 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.500-0400 m31100| 2015-07-09T14:08:36.494-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:08:36.494-0400-559eb8a4792e00bb672749dd", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436465316494), what: "multi-split", ns: "db44.coll44", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 1, of: 10, chunk: { min: { tid: MinKey }, max: { tid: 0.0 }, lastmod: Timestamp 1000|1, lastmodEpoch: ObjectId('559eb8a4ca4787b9985d1d47') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.500-0400 m31100| 2015-07-09T14:08:36.494-0400 I SHARDING [conn38] request split points lookup for chunk db44.coll44 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.500-0400 m31100| 2015-07-09T14:08:36.496-0400 W SHARDING [conn132] possible low cardinality key detected in db44.coll44 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.500-0400 m31100| 2015-07-09T14:08:36.498-0400 W SHARDING [conn132] possible low cardinality key detected in db44.coll44 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.500-0400 m31100| 2015-07-09T14:08:36.498-0400 W SHARDING [conn132] possible low cardinality key detected in db44.coll44 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.500-0400 m31100| 2015-07-09T14:08:36.498-0400 W SHARDING [conn132] possible low cardinality key detected in db44.coll44 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.501-0400 m31100| 2015-07-09T14:08:36.498-0400 W SHARDING [conn132] possible low cardinality key detected in db44.coll44 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.501-0400 m31100| 2015-07-09T14:08:36.498-0400 W SHARDING [conn132] possible low cardinality key detected in db44.coll44 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.501-0400 m31100| 2015-07-09T14:08:36.498-0400 W SHARDING [conn132] possible low cardinality key detected in db44.coll44 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.501-0400 m31100| 2015-07-09T14:08:36.498-0400 W SHARDING [conn132] possible low cardinality key detected in db44.coll44 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.502-0400 m31100| 2015-07-09T14:08:36.498-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db44.coll44", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a4ca4787b9985d1d47') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.503-0400 m31100| 2015-07-09T14:08:36.498-0400 W SHARDING [conn132] possible low cardinality key detected in db44.coll44 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.503-0400 m31100| 2015-07-09T14:08:36.498-0400 W SHARDING [conn132] possible low cardinality key detected in db44.coll44 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.503-0400 m31100| 2015-07-09T14:08:36.499-0400 I SHARDING [conn132] received splitChunk request: { splitChunk: "db44.coll44", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a4ca4787b9985d1d47') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.504-0400 m31100| 2015-07-09T14:08:36.500-0400 I COMMAND [conn69] command db44.$cmd command: insert { insert: "coll44", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eb8a4ca4787b9985d1d47') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 106, w: 106 } }, Database: { acquireCount: { w: 106 } }, Collection: { acquireCount: { w: 6 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 2581 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 119ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.504-0400 m31100| 2015-07-09T14:08:36.500-0400 W SHARDING [conn38] could not acquire collection lock for db44.coll44 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db44.coll44 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.504-0400 m30999| 2015-07-09T14:08:36.500-0400 W SHARDING [conn281] splitChunk failed - cmd: { splitChunk: "db44.coll44", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a4ca4787b9985d1d47') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db44.coll44 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.505-0400 m31100| 2015-07-09T14:08:36.500-0400 I SHARDING [conn15] request split points lookup for chunk db44.coll44 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.505-0400 m31100| 2015-07-09T14:08:36.502-0400 W SHARDING [conn132] could not acquire collection lock for db44.coll44 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db44.coll44 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.505-0400 m30998| 2015-07-09T14:08:36.502-0400 W SHARDING [conn281] splitChunk failed - cmd: { splitChunk: "db44.coll44", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a4ca4787b9985d1d47') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db44.coll44 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.506-0400 m31100| 2015-07-09T14:08:36.502-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db44.coll44", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a4ca4787b9985d1d47') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.506-0400 m31100| 2015-07-09T14:08:36.503-0400 W SHARDING [conn15] could not acquire collection lock for db44.coll44 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db44.coll44 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.506-0400 m30999| 2015-07-09T14:08:36.503-0400 W SHARDING [conn279] splitChunk failed - cmd: { splitChunk: "db44.coll44", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a4ca4787b9985d1d47') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db44.coll44 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.516-0400 m31100| 2015-07-09T14:08:36.515-0400 I COMMAND [conn147] command db44.$cmd command: insert { insert: "coll44", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eb8a4ca4787b9985d1d47') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 107, w: 107 } }, Database: { acquireCount: { w: 107 } }, Collection: { acquireCount: { w: 7 }, acquireWaitCount: { w: 2 }, timeAcquiringMicros: { w: 15065 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 136ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.538-0400 m31100| 2015-07-09T14:08:36.536-0400 I COMMAND [conn68] command db44.$cmd command: insert { insert: "coll44", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eb8a4ca4787b9985d1d47') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 108, w: 108 } }, Database: { acquireCount: { w: 108 } }, Collection: { acquireCount: { w: 8 }, acquireWaitCount: { w: 2 }, timeAcquiringMicros: { w: 12996 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 157ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.538-0400 m31100| 2015-07-09T14:08:36.537-0400 I SHARDING [conn15] request split points lookup for chunk db44.coll44 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.539-0400 m31100| 2015-07-09T14:08:36.538-0400 W SHARDING [conn15] possible low cardinality key detected in db44.coll44 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.539-0400 m31100| 2015-07-09T14:08:36.538-0400 W SHARDING [conn15] possible low cardinality key detected in db44.coll44 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.539-0400 m31100| 2015-07-09T14:08:36.538-0400 W SHARDING [conn15] possible low cardinality key detected in db44.coll44 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.539-0400 m31100| 2015-07-09T14:08:36.538-0400 W SHARDING [conn15] possible low cardinality key detected in db44.coll44 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.540-0400 m31100| 2015-07-09T14:08:36.538-0400 W SHARDING [conn15] possible low cardinality key detected in db44.coll44 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.540-0400 m31100| 2015-07-09T14:08:36.539-0400 W SHARDING [conn15] possible low cardinality key detected in db44.coll44 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.540-0400 m31100| 2015-07-09T14:08:36.539-0400 W SHARDING [conn15] possible low cardinality key detected in db44.coll44 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.540-0400 m31100| 2015-07-09T14:08:36.539-0400 W SHARDING [conn15] possible low cardinality key detected in db44.coll44 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.540-0400 m31100| 2015-07-09T14:08:36.539-0400 W SHARDING [conn15] possible low cardinality key detected in db44.coll44 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.540-0400 m31100| 2015-07-09T14:08:36.539-0400 W SHARDING [conn15] possible low cardinality key detected in db44.coll44 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.541-0400 m31100| 2015-07-09T14:08:36.540-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db44.coll44", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a4ca4787b9985d1d47') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.541-0400 m31100| 2015-07-09T14:08:36.541-0400 W SHARDING [conn15] could not acquire collection lock for db44.coll44 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db44.coll44 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.542-0400 m30999| 2015-07-09T14:08:36.541-0400 W SHARDING [conn280] splitChunk failed - cmd: { splitChunk: "db44.coll44", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a4ca4787b9985d1d47') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db44.coll44 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.546-0400 m31100| 2015-07-09T14:08:36.546-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:08:36.546-0400-559eb8a4792e00bb672749de", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436465316546), what: "multi-split", ns: "db44.coll44", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 2, of: 10, chunk: { min: { tid: 0.0 }, max: { tid: 2.0 }, lastmod: Timestamp 1000|2, lastmodEpoch: ObjectId('559eb8a4ca4787b9985d1d47') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.598-0400 m31100| 2015-07-09T14:08:36.598-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:08:36.598-0400-559eb8a4792e00bb672749df", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436465316598), what: "multi-split", ns: "db44.coll44", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 3, of: 10, chunk: { min: { tid: 2.0 }, max: { tid: 3.0 }, lastmod: Timestamp 1000|3, lastmodEpoch: ObjectId('559eb8a4ca4787b9985d1d47') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.608-0400 m31100| 2015-07-09T14:08:36.608-0400 I SHARDING [conn15] request split points lookup for chunk db44.coll44 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.613-0400 m31100| 2015-07-09T14:08:36.610-0400 W SHARDING [conn15] possible low cardinality key detected in db44.coll44 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.613-0400 m31100| 2015-07-09T14:08:36.610-0400 W SHARDING [conn15] possible low cardinality key detected in db44.coll44 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.613-0400 m31100| 2015-07-09T14:08:36.610-0400 W SHARDING [conn15] possible low cardinality key detected in db44.coll44 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.614-0400 m31100| 2015-07-09T14:08:36.610-0400 W SHARDING [conn15] possible low cardinality key detected in db44.coll44 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.614-0400 m31100| 2015-07-09T14:08:36.610-0400 W SHARDING [conn15] possible low cardinality key detected in db44.coll44 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.614-0400 m31100| 2015-07-09T14:08:36.610-0400 W SHARDING [conn15] possible low cardinality key detected in db44.coll44 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.614-0400 m31100| 2015-07-09T14:08:36.610-0400 W SHARDING [conn15] possible low cardinality key detected in db44.coll44 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.614-0400 m31100| 2015-07-09T14:08:36.610-0400 W SHARDING [conn15] possible low cardinality key detected in db44.coll44 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.614-0400 m31100| 2015-07-09T14:08:36.610-0400 W SHARDING [conn15] possible low cardinality key detected in db44.coll44 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.614-0400 m31100| 2015-07-09T14:08:36.610-0400 W SHARDING [conn15] possible low cardinality key detected in db44.coll44 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.615-0400 m31100| 2015-07-09T14:08:36.610-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db44.coll44", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a4ca4787b9985d1d47') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.615-0400 m31100| 2015-07-09T14:08:36.612-0400 W SHARDING [conn15] could not acquire collection lock for db44.coll44 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db44.coll44 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.615-0400 m30999| 2015-07-09T14:08:36.612-0400 W SHARDING [conn279] splitChunk failed - cmd: { splitChunk: "db44.coll44", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a4ca4787b9985d1d47') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db44.coll44 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.651-0400 m31100| 2015-07-09T14:08:36.650-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:08:36.650-0400-559eb8a4792e00bb672749e0", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436465316650), what: "multi-split", ns: "db44.coll44", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 4, of: 10, chunk: { min: { tid: 3.0 }, max: { tid: 4.0 }, lastmod: Timestamp 1000|4, lastmodEpoch: ObjectId('559eb8a4ca4787b9985d1d47') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.705-0400 m31100| 2015-07-09T14:08:36.704-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:08:36.704-0400-559eb8a4792e00bb672749e1", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436465316704), what: "multi-split", ns: "db44.coll44", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 5, of: 10, chunk: { min: { tid: 4.0 }, max: { tid: 5.0 }, lastmod: Timestamp 1000|5, lastmodEpoch: ObjectId('559eb8a4ca4787b9985d1d47') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.758-0400 m31100| 2015-07-09T14:08:36.757-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:08:36.757-0400-559eb8a4792e00bb672749e2", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436465316757), what: "multi-split", ns: "db44.coll44", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 6, of: 10, chunk: { min: { tid: 5.0 }, max: { tid: 6.0 }, lastmod: Timestamp 1000|6, lastmodEpoch: ObjectId('559eb8a4ca4787b9985d1d47') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.811-0400 m31100| 2015-07-09T14:08:36.810-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:08:36.810-0400-559eb8a4792e00bb672749e3", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436465316810), what: "multi-split", ns: "db44.coll44", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 7, of: 10, chunk: { min: { tid: 6.0 }, max: { tid: 7.0 }, lastmod: Timestamp 1000|7, lastmodEpoch: ObjectId('559eb8a4ca4787b9985d1d47') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.852-0400 m31100| 2015-07-09T14:08:36.852-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:08:36.852-0400-559eb8a4792e00bb672749e4", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436465316852), what: "multi-split", ns: "db44.coll44", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 8, of: 10, chunk: { min: { tid: 7.0 }, max: { tid: 8.0 }, lastmod: Timestamp 1000|8, lastmodEpoch: ObjectId('559eb8a4ca4787b9985d1d47') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.853-0400 m31100| 2015-07-09T14:08:36.852-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:08:36.843-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31100:1436464536:197041335', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.906-0400 m31100| 2015-07-09T14:08:36.905-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:08:36.905-0400-559eb8a4792e00bb672749e5", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436465316905), what: "multi-split", ns: "db44.coll44", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 9, of: 10, chunk: { min: { tid: 8.0 }, max: { tid: 9.0 }, lastmod: Timestamp 1000|9, lastmodEpoch: ObjectId('559eb8a4ca4787b9985d1d47') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:36.959-0400 m31100| 2015-07-09T14:08:36.958-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:08:36.958-0400-559eb8a4792e00bb672749e6", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436465316958), what: "multi-split", ns: "db44.coll44", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 10, of: 10, chunk: { min: { tid: 9.0 }, max: { tid: MaxKey }, lastmod: Timestamp 1000|10, lastmodEpoch: ObjectId('559eb8a4ca4787b9985d1d47') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:37.013-0400 m31100| 2015-07-09T14:08:37.012-0400 I SHARDING [conn40] distributed lock 'db44.coll44/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:37.014-0400 m31100| 2015-07-09T14:08:37.013-0400 I COMMAND [conn40] command db44.coll44 command: splitChunk { splitChunk: "db44.coll44", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb8a4ca4787b9985d1d47') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 4, w: 2 } }, Database: { acquireCount: { r: 1, w: 2 } }, Collection: { acquireCount: { r: 1, W: 2 }, acquireWaitCount: { W: 2 }, timeAcquiringMicros: { W: 24677 } } } protocol:op_command 551ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:37.015-0400 m30999| 2015-07-09T14:08:37.015-0400 I SHARDING [conn277] ChunkManager: time to load chunks for db44.coll44: 0ms sequenceNumber: 194 version: 1|10||559eb8a4ca4787b9985d1d47 based on: 1|0||559eb8a4ca4787b9985d1d47 [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:37.015-0400 m30999| 2015-07-09T14:08:37.015-0400 I SHARDING [conn277] autosplitted db44.coll44 shard: ns: db44.coll44, shard: test-rs0, lastmod: 1|0||000000000000000000000000, min: { tid: MinKey }, max: { tid: MaxKey } into 10 (splitThreshold 921) [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:37.273-0400 m31200| 2015-07-09T14:08:37.272-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:08:37.270-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31200:1436464537:809424560', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:56.030-0400 m31100| 2015-07-09T14:08:56.030-0400 I QUERY [conn55] query db44.coll44 query: { $where: "this.tid === 8" } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1200 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:776 nreturned:100 reslen:4620 locks:{ Global: { acquireCount: { r: 1558 } }, Database: { acquireCount: { r: 779 } }, Collection: { acquireCount: { r: 779 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 4184 } } } 19535ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:08:59.337-0400 m31100| 2015-07-09T14:08:59.336-0400 I QUERY [conn46] query db44.coll44 query: { $where: "this.tid === 1" } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1200 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:915 nreturned:100 reslen:4620 locks:{ Global: { acquireCount: { r: 1836 } }, Database: { acquireCount: { r: 918 } }, Collection: { acquireCount: { r: 918 } } } 22837ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:09:04.542-0400 m30999| 2015-07-09T14:09:04.542-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:09:04.536-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30999:1436464534:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:09:05.602-0400 m31100| 2015-07-09T14:09:05.601-0400 I WRITE [conn133] remove db44.coll44 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 3" } ndeleted:12 keyUpdates:0 writeConflicts:0 numYields:1178 locks:{ Global: { acquireCount: { r: 1195, w: 1191 } }, Database: { acquireCount: { r: 2, w: 1191 } }, Collection: { acquireCount: { r: 2, w: 1179 } }, Metadata: { acquireCount: { w: 12 } }, oplog: { acquireCount: { w: 12 } } } 29124ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:09:05.603-0400 m31100| 2015-07-09T14:09:05.601-0400 I COMMAND [conn133] command db44.$cmd command: delete { delete: "coll44", deletes: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 3" }, limit: 0 } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eb8a4ca4787b9985d1d47') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 1195, w: 1191 } }, Database: { acquireCount: { r: 2, w: 1191 } }, Collection: { acquireCount: { r: 2, w: 1179 } }, Metadata: { acquireCount: { w: 12 } }, oplog: { acquireCount: { w: 12 } } } protocol:op_command 29125ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:09:05.603-0400 m31100| 2015-07-09T14:09:05.602-0400 I WRITE [conn27] remove db44.coll44 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 9" } ndeleted:10 keyUpdates:0 writeConflicts:0 numYields:1151 locks:{ Global: { acquireCount: { r: 1166, w: 1162 } }, Database: { acquireCount: { r: 2, w: 1162 } }, Collection: { acquireCount: { r: 2, w: 1152 } }, Metadata: { acquireCount: { w: 10 } }, oplog: { acquireCount: { w: 10 } } } 29134ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:09:05.604-0400 m31100| 2015-07-09T14:09:05.603-0400 I COMMAND [conn27] command db44.$cmd command: delete { delete: "coll44", deletes: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 9" }, limit: 0 } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eb8a4ca4787b9985d1d47') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 1166, w: 1162 } }, Database: { acquireCount: { r: 2, w: 1162 } }, Collection: { acquireCount: { r: 2, w: 1152 } }, Metadata: { acquireCount: { w: 10 } }, oplog: { acquireCount: { w: 10 } } } protocol:op_command 29134ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:09:05.838-0400 m31100| 2015-07-09T14:09:05.837-0400 I QUERY [conn46] query db44.coll44 query: { $where: "this.tid === 1" } planSummary: COLLSCAN cursorid:2843793953393 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1102 keyUpdates:0 writeConflicts:0 numYields:270 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 542 } }, Database: { acquireCount: { r: 271 } }, Collection: { acquireCount: { r: 271 } } } 6438ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:09:05.878-0400 m31100| 2015-07-09T14:09:05.878-0400 I WRITE [conn25] remove db44.coll44 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 5" } ndeleted:16 keyUpdates:0 writeConflicts:0 numYields:1168 locks:{ Global: { acquireCount: { r: 1189, w: 1185 } }, Database: { acquireCount: { r: 2, w: 1185 } }, Collection: { acquireCount: { r: 2, w: 1169 } }, Metadata: { acquireCount: { w: 16 } }, oplog: { acquireCount: { w: 16 } } } 29284ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:09:05.879-0400 m31100| 2015-07-09T14:09:05.878-0400 I COMMAND [conn25] command db44.$cmd command: delete { delete: "coll44", deletes: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 5" }, limit: 0 } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb8a4ca4787b9985d1d47') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 1189, w: 1185 } }, Database: { acquireCount: { r: 2, w: 1185 } }, Collection: { acquireCount: { r: 2, w: 1169 } }, Metadata: { acquireCount: { w: 16 } }, oplog: { acquireCount: { w: 16 } } } protocol:op_command 29284ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:09:05.886-0400 m31100| 2015-07-09T14:09:05.885-0400 I WRITE [conn68] remove db44.coll44 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 0" } ndeleted:13 keyUpdates:0 writeConflicts:0 numYields:1180 locks:{ Global: { acquireCount: { r: 1198, w: 1194 } }, Database: { acquireCount: { r: 2, w: 1194 } }, Collection: { acquireCount: { r: 2, w: 1181 } }, Metadata: { acquireCount: { w: 13 } }, oplog: { acquireCount: { w: 13 } } } 29336ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:09:05.886-0400 m31100| 2015-07-09T14:09:05.885-0400 I COMMAND [conn68] command db44.$cmd command: delete { delete: "coll44", deletes: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 0" }, limit: 0 } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eb8a4ca4787b9985d1d47') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 1198, w: 1194 } }, Database: { acquireCount: { r: 2, w: 1194 } }, Collection: { acquireCount: { r: 2, w: 1181 } }, Metadata: { acquireCount: { w: 13 } }, oplog: { acquireCount: { w: 13 } } } protocol:op_command 29336ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:09:05.892-0400 m31100| 2015-07-09T14:09:05.892-0400 I QUERY [conn57] query db44.coll44 query: { $where: "this.tid === 6" } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1338 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1143 nreturned:100 reslen:4620 locks:{ Global: { acquireCount: { r: 2292 } }, Database: { acquireCount: { r: 1146 } }, Collection: { acquireCount: { r: 1146 } } } 28835ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:09:05.896-0400 m31100| 2015-07-09T14:09:05.896-0400 I WRITE [conn147] remove db44.coll44 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 7" } ndeleted:10 keyUpdates:0 writeConflicts:0 numYields:1170 locks:{ Global: { acquireCount: { r: 1185, w: 1181 } }, Database: { acquireCount: { r: 2, w: 1181 } }, Collection: { acquireCount: { r: 2, w: 1171 } }, Metadata: { acquireCount: { w: 10 } }, oplog: { acquireCount: { w: 10 } } } 29366ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:09:05.897-0400 m31100| 2015-07-09T14:09:05.896-0400 I COMMAND [conn147] command db44.$cmd command: delete { delete: "coll44", deletes: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 7" }, limit: 0 } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb8a4ca4787b9985d1d47') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 1185, w: 1181 } }, Database: { acquireCount: { r: 2, w: 1181 } }, Collection: { acquireCount: { r: 2, w: 1171 } }, Metadata: { acquireCount: { w: 10 } }, oplog: { acquireCount: { w: 10 } } } protocol:op_command 29366ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:09:05.898-0400 m31100| 2015-07-09T14:09:05.898-0400 I WRITE [conn16] remove db44.coll44 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 4" } ndeleted:9 keyUpdates:0 writeConflicts:0 numYields:1161 locks:{ Global: { acquireCount: { r: 1175, w: 1171 } }, Database: { acquireCount: { r: 2, w: 1171 } }, Collection: { acquireCount: { r: 2, w: 1162 } }, Metadata: { acquireCount: { w: 9 } }, oplog: { acquireCount: { w: 9 } } } 29372ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:09:05.898-0400 m31100| 2015-07-09T14:09:05.898-0400 I COMMAND [conn16] command db44.$cmd command: delete { delete: "coll44", deletes: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 4" }, limit: 0 } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eb8a4ca4787b9985d1d47') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 1175, w: 1171 } }, Database: { acquireCount: { r: 2, w: 1171 } }, Collection: { acquireCount: { r: 2, w: 1162 } }, Metadata: { acquireCount: { w: 9 } }, oplog: { acquireCount: { w: 9 } } } protocol:op_command 29372ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:09:05.899-0400 m31100| 2015-07-09T14:09:05.899-0400 I WRITE [conn69] remove db44.coll44 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 2" } ndeleted:22 keyUpdates:0 writeConflicts:0 numYields:1166 locks:{ Global: { acquireCount: { r: 1193, w: 1189 } }, Database: { acquireCount: { r: 2, w: 1189 } }, Collection: { acquireCount: { r: 2, w: 1167 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } 29284ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:09:05.900-0400 m31100| 2015-07-09T14:09:05.899-0400 I COMMAND [conn69] command db44.$cmd command: delete { delete: "coll44", deletes: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 2" }, limit: 0 } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eb8a4ca4787b9985d1d47') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 1193, w: 1189 } }, Database: { acquireCount: { r: 2, w: 1189 } }, Collection: { acquireCount: { r: 2, w: 1167 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 29284ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:09:05.950-0400 m30998| 2015-07-09T14:09:05.950-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:09:05.948-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30998:1436464535:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:09:06.855-0400 m31100| 2015-07-09T14:09:06.854-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:09:06.851-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31100:1436464536:197041335', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:09:07.275-0400 m31200| 2015-07-09T14:09:07.275-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:09:07.272-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31200:1436464537:809424560', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:09:16.588-0400 m31100| 2015-07-09T14:09:16.587-0400 I WRITE [conn23] remove db44.coll44 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 8" } ndeleted:10 keyUpdates:0 writeConflicts:0 numYields:820 locks:{ Global: { acquireCount: { r: 831, w: 831 } }, Database: { acquireCount: { w: 831 } }, Collection: { acquireCount: { w: 821 } }, Metadata: { acquireCount: { w: 10 } }, oplog: { acquireCount: { w: 10 } } } 20553ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:09:16.589-0400 m31100| 2015-07-09T14:09:16.587-0400 I COMMAND [conn23] command db44.$cmd command: delete { delete: "coll44", deletes: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 8" }, limit: 0 } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb8a4ca4787b9985d1d47') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 831, w: 831 } }, Database: { acquireCount: { w: 831 } }, Collection: { acquireCount: { w: 821 } }, Metadata: { acquireCount: { w: 10 } }, oplog: { acquireCount: { w: 10 } } } protocol:op_command 20554ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:09:18.473-0400 m31100| 2015-07-09T14:09:18.472-0400 I QUERY [conn136] getmore db44.coll44 query: { $where: "this.tid === 1" } cursorid:2843793953393 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:511 nreturned:199 reslen:9174 locks:{ Global: { acquireCount: { r: 1024 } }, Database: { acquireCount: { r: 512 } }, Collection: { acquireCount: { r: 512 } } } 12631ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:09:26.741-0400 m31100| 2015-07-09T14:09:26.740-0400 I QUERY [conn46] query db44.coll44 query: { $where: "this.tid === 5" } planSummary: COLLSCAN cursorid:2844657273379 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:926 keyUpdates:0 writeConflicts:0 numYields:846 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 1694 } }, Database: { acquireCount: { r: 847 } }, Collection: { acquireCount: { r: 847 } } } 20835ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:09:27.654-0400 m31100| 2015-07-09T14:09:27.653-0400 I QUERY [conn57] query db44.coll44 query: { $where: "this.tid === 2" } planSummary: COLLSCAN cursorid:2844280102946 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:965 keyUpdates:0 writeConflicts:0 numYields:886 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 1774 } }, Database: { acquireCount: { r: 887 } }, Collection: { acquireCount: { r: 887 } } } 21745ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:09:34.545-0400 m30999| 2015-07-09T14:09:34.545-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:09:34.542-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30999:1436464534:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:09:35.953-0400 m30998| 2015-07-09T14:09:35.952-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:09:35.949-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30998:1436464535:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:09:36.059-0400 m31100| 2015-07-09T14:09:36.059-0400 I QUERY [conn72] query db44.coll44 query: { $where: "this.tid === 9" } planSummary: COLLSCAN cursorid:2843394426325 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1330 keyUpdates:0 writeConflicts:0 numYields:1219 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 2440 } }, Database: { acquireCount: { r: 1220 } }, Collection: { acquireCount: { r: 1220 } } } 30168ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:09:36.857-0400 m31100| 2015-07-09T14:09:36.856-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:09:36.854-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31100:1436464536:197041335', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:09:37.278-0400 m31200| 2015-07-09T14:09:37.277-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:09:37.275-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31200:1436464537:809424560', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:09:40.227-0400 m31100| 2015-07-09T14:09:40.226-0400 I QUERY [conn55] query db44.coll44 query: { $where: "this.tid === 6" } planSummary: COLLSCAN cursorid:2844476020942 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1475 keyUpdates:0 writeConflicts:0 numYields:1393 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 2790 } }, Database: { acquireCount: { r: 1395 } }, Collection: { acquireCount: { r: 1395 } } } 34273ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:09:41.266-0400 m31100| 2015-07-09T14:09:41.266-0400 I QUERY [conn73] query db44.coll44 query: { $where: "this.tid === 7" } planSummary: COLLSCAN cursorid:2844870269504 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1513 keyUpdates:0 writeConflicts:0 numYields:1420 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 2844 } }, Database: { acquireCount: { r: 1422 } }, Collection: { acquireCount: { r: 1422 } } } 35309ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:09:41.291-0400 m31100| 2015-07-09T14:09:41.291-0400 I QUERY [conn151] query db44.coll44 query: { $where: "this.tid === 4" } planSummary: COLLSCAN cursorid:2843578281661 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1510 keyUpdates:0 writeConflicts:0 numYields:1422 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 2850 } }, Database: { acquireCount: { r: 1425 } }, Collection: { acquireCount: { r: 1425 } } } 35299ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:09:49.857-0400 m31100| 2015-07-09T14:09:49.857-0400 I QUERY [conn143] getmore db44.coll44 query: { $where: "this.tid === 2" } cursorid:2844280102946 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:874 nreturned:77 reslen:3562 locks:{ Global: { acquireCount: { r: 1750 } }, Database: { acquireCount: { r: 875 } }, Collection: { acquireCount: { r: 875 } } } 22201ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:09:49.862-0400 m31100| 2015-07-09T14:09:49.861-0400 I QUERY [conn74] getmore db44.coll44 query: { $where: "this.tid === 9" } cursorid:2843394426325 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:533 nreturned:89 reslen:4114 locks:{ Global: { acquireCount: { r: 1068 } }, Database: { acquireCount: { r: 534 } }, Collection: { acquireCount: { r: 534 } } } 13800ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:09:49.862-0400 m31100| 2015-07-09T14:09:49.862-0400 I QUERY [conn136] getmore db44.coll44 query: { $where: "this.tid === 5" } cursorid:2844657273379 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:918 nreturned:83 reslen:3838 locks:{ Global: { acquireCount: { r: 1838 } }, Database: { acquireCount: { r: 919 } }, Collection: { acquireCount: { r: 919 } } } 23119ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:09:49.866-0400 m31100| 2015-07-09T14:09:49.866-0400 I QUERY [conn138] getmore db44.coll44 query: { $where: "this.tid === 7" } cursorid:2844870269504 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:336 nreturned:89 reslen:4114 locks:{ Global: { acquireCount: { r: 674 } }, Database: { acquireCount: { r: 337 } }, Collection: { acquireCount: { r: 337 } } } 8597ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:09:49.890-0400 m31100| 2015-07-09T14:09:49.889-0400 I WRITE [conn68] remove db44.coll44 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 0" } ndeleted:14 keyUpdates:0 writeConflicts:0 numYields:1760 locks:{ Global: { acquireCount: { r: 1775, w: 1775 } }, Database: { acquireCount: { w: 1775 } }, Collection: { acquireCount: { w: 1761 } }, Metadata: { acquireCount: { w: 14 } }, oplog: { acquireCount: { w: 14 } } } 44001ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:09:49.890-0400 m31100| 2015-07-09T14:09:49.889-0400 I COMMAND [conn68] command db44.$cmd command: delete { delete: "coll44", deletes: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 0" }, limit: 0 } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb8a4ca4787b9985d1d47') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 1775, w: 1775 } }, Database: { acquireCount: { w: 1775 } }, Collection: { acquireCount: { w: 1761 } }, Metadata: { acquireCount: { w: 14 } }, oplog: { acquireCount: { w: 14 } } } protocol:op_command 44002ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:09:49.894-0400 m31100| 2015-07-09T14:09:49.894-0400 I QUERY [conn150] getmore db44.coll44 query: { $where: "this.tid === 6" } cursorid:2844476020942 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:374 nreturned:99 reslen:4574 locks:{ Global: { acquireCount: { r: 750 } }, Database: { acquireCount: { r: 375 } }, Collection: { acquireCount: { r: 375 } } } 9664ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:09:49.899-0400 m31100| 2015-07-09T14:09:49.898-0400 I WRITE [conn133] remove db44.coll44 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 3" } ndeleted:15 keyUpdates:0 writeConflicts:0 numYields:1755 locks:{ Global: { acquireCount: { r: 1771, w: 1771 } }, Database: { acquireCount: { w: 1771 } }, Collection: { acquireCount: { w: 1756 } }, Metadata: { acquireCount: { w: 15 } }, oplog: { acquireCount: { w: 15 } } } 44247ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:09:49.900-0400 m31100| 2015-07-09T14:09:49.898-0400 I COMMAND [conn133] command db44.$cmd command: delete { delete: "coll44", deletes: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 3" }, limit: 0 } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb8a4ca4787b9985d1d47') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 1771, w: 1771 } }, Database: { acquireCount: { w: 1771 } }, Collection: { acquireCount: { w: 1756 } }, Metadata: { acquireCount: { w: 15 } }, oplog: { acquireCount: { w: 15 } } } protocol:op_command 44247ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:09:49.902-0400 m31100| 2015-07-09T14:09:49.901-0400 I QUERY [conn86] getmore db44.coll44 query: { $where: "this.tid === 4" } cursorid:2843578281661 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:332 nreturned:90 reslen:4160 locks:{ Global: { acquireCount: { r: 666 } }, Database: { acquireCount: { r: 333 } }, Collection: { acquireCount: { r: 333 } } } 8608ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:10:04.548-0400 m30999| 2015-07-09T14:10:04.547-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:10:04.545-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30999:1436464534:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:10:05.955-0400 m30998| 2015-07-09T14:10:05.954-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:10:05.952-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30998:1436464535:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:10:06.859-0400 m31100| 2015-07-09T14:10:06.859-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:10:06.856-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31100:1436464536:197041335', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:10:07.280-0400 m31200| 2015-07-09T14:10:07.279-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:10:07.277-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31200:1436464537:809424560', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:10:09.611-0400 m31100| 2015-07-09T14:10:09.610-0400 I QUERY [conn60] query db44.coll44 query: { $where: "this.tid === 8" } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:2369 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2120 nreturned:90 reslen:4160 locks:{ Global: { acquireCount: { r: 4242 } }, Database: { acquireCount: { r: 2121 } }, Collection: { acquireCount: { r: 2121 } } } 53009ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:10:10.966-0400 m31100| 2015-07-09T14:10:10.966-0400 I QUERY [conn55] query db44.coll44 query: { $where: "this.tid === 2" } planSummary: COLLSCAN cursorid:2843466394698 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:936 keyUpdates:0 writeConflicts:0 numYields:861 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 1724 } }, Database: { acquireCount: { r: 862 } }, Collection: { acquireCount: { r: 862 } } } 21061ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:10:13.937-0400 m31100| 2015-07-09T14:10:13.936-0400 I WRITE [conn147] remove db44.coll44 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 1" } ndeleted:37 keyUpdates:0 writeConflicts:0 numYields:2221 locks:{ Global: { acquireCount: { r: 2259, w: 2259 } }, Database: { acquireCount: { w: 2259 } }, Collection: { acquireCount: { w: 2222 } }, Metadata: { acquireCount: { w: 37 } }, oplog: { acquireCount: { w: 37 } } } 55431ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:10:13.937-0400 m31100| 2015-07-09T14:10:13.936-0400 I COMMAND [conn147] command db44.$cmd command: delete { delete: "coll44", deletes: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 1" }, limit: 0 } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb8a4ca4787b9985d1d47') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 2259, w: 2259 } }, Database: { acquireCount: { w: 2259 } }, Collection: { acquireCount: { w: 2222 } }, Metadata: { acquireCount: { w: 37 } }, oplog: { acquireCount: { w: 37 } } } protocol:op_command 55432ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:10:18.646-0400 m31100| 2015-07-09T14:10:18.646-0400 I QUERY [conn72] query db44.coll44 query: { $where: "this.tid === 3" } planSummary: COLLSCAN cursorid:2845137647637 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1270 keyUpdates:0 writeConflicts:0 numYields:1165 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 2334 } }, Database: { acquireCount: { r: 1167 } }, Collection: { acquireCount: { r: 1167 } } } 28738ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:10:18.819-0400 m31100| 2015-07-09T14:10:18.818-0400 I QUERY [conn73] query db44.coll44 query: { $where: "this.tid === 9" } planSummary: COLLSCAN cursorid:2843625375458 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1282 keyUpdates:0 writeConflicts:0 numYields:1174 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 2350 } }, Database: { acquireCount: { r: 1175 } }, Collection: { acquireCount: { r: 1175 } } } 28921ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:10:23.276-0400 m31100| 2015-07-09T14:10:23.276-0400 I QUERY [conn47] query db44.coll44 query: { $where: "this.tid === 4" } planSummary: COLLSCAN cursorid:2844099874353 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1465 keyUpdates:0 writeConflicts:0 numYields:1344 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 2694 } }, Database: { acquireCount: { r: 1347 } }, Collection: { acquireCount: { r: 1347 } } } 33257ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:10:34.551-0400 m30999| 2015-07-09T14:10:34.550-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:10:34.547-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30999:1436464534:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:10:35.958-0400 m30998| 2015-07-09T14:10:35.957-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:10:35.954-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30998:1436464535:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:10:36.862-0400 m31100| 2015-07-09T14:10:36.862-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:10:36.859-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31100:1436464536:197041335', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:10:37.282-0400 m31200| 2015-07-09T14:10:37.282-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:10:37.279-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31200:1436464537:809424560', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:10:37.302-0400 m31100| 2015-07-09T14:10:37.301-0400 I QUERY [conn50] query db44.coll44 query: { $where: "this.tid === 1" } planSummary: COLLSCAN cursorid:2844700384245 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1024 keyUpdates:0 writeConflicts:0 numYields:946 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 1894 } }, Database: { acquireCount: { r: 947 } }, Collection: { acquireCount: { r: 947 } } } 23355ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:10:44.960-0400 m31100| 2015-07-09T14:10:44.959-0400 I QUERY [conn86] getmore db44.coll44 query: { $where: "this.tid === 2" } cursorid:2843466394698 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1373 nreturned:177 reslen:8162 locks:{ Global: { acquireCount: { r: 2748 } }, Database: { acquireCount: { r: 1374 } }, Collection: { acquireCount: { r: 1374 } } } 33992ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:10:44.992-0400 m31100| 2015-07-09T14:10:44.992-0400 I QUERY [conn138] getmore db44.coll44 query: { $where: "this.tid === 3" } cursorid:2845137647637 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1060 nreturned:72 reslen:3332 locks:{ Global: { acquireCount: { r: 2122 } }, Database: { acquireCount: { r: 1061 } }, Collection: { acquireCount: { r: 1061 } } } 26344ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:10:45.002-0400 m31100| 2015-07-09T14:10:45.002-0400 I QUERY [conn151] query db44.coll44 query: { $where: "this.tid === 0" } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:2432 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2227 nreturned:73 reslen:3378 locks:{ Global: { acquireCount: { r: 4456 } }, Database: { acquireCount: { r: 2228 } }, Collection: { acquireCount: { r: 2228 } } } 55099ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:10:45.008-0400 m31100| 2015-07-09T14:10:45.008-0400 I QUERY [conn150] getmore db44.coll44 query: { $where: "this.tid === 4" } cursorid:2844099874353 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:871 nreturned:390 reslen:17960 locks:{ Global: { acquireCount: { r: 1744 } }, Database: { acquireCount: { r: 872 } }, Collection: { acquireCount: { r: 872 } } } 21730ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:10:45.010-0400 m31100| 2015-07-09T14:10:45.010-0400 I WRITE [conn31] remove db44.coll44 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 5" } ndeleted:22 keyUpdates:0 writeConflicts:0 numYields:2218 locks:{ Global: { acquireCount: { r: 2241, w: 2241 } }, Database: { acquireCount: { w: 2241 } }, Collection: { acquireCount: { w: 2219 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } 55144ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:10:45.011-0400 m31100| 2015-07-09T14:10:45.010-0400 I COMMAND [conn31] command db44.$cmd command: delete { delete: "coll44", deletes: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 5" }, limit: 0 } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb8a4ca4787b9985d1d47') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 2241, w: 2241 } }, Database: { acquireCount: { w: 2241 } }, Collection: { acquireCount: { w: 2219 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 55144ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:10:45.017-0400 m31100| 2015-07-09T14:10:45.016-0400 I WRITE [conn67] remove db44.coll44 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 7" } ndeleted:28 keyUpdates:0 writeConflicts:0 numYields:2217 locks:{ Global: { acquireCount: { r: 2248, w: 2246 } }, Database: { acquireCount: { r: 1, w: 2246 } }, Collection: { acquireCount: { r: 1, w: 2218 } }, Metadata: { acquireCount: { w: 28 } }, oplog: { acquireCount: { w: 28 } } } 55110ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:10:45.017-0400 m31100| 2015-07-09T14:10:45.016-0400 I COMMAND [conn67] command db44.$cmd command: delete { delete: "coll44", deletes: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 7" }, limit: 0 } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb8a4ca4787b9985d1d47') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 2248, w: 2246 } }, Database: { acquireCount: { r: 1, w: 2246 } }, Collection: { acquireCount: { r: 1, w: 2218 } }, Metadata: { acquireCount: { w: 28 } }, oplog: { acquireCount: { w: 28 } } } protocol:op_command 55111ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:10:45.019-0400 m31100| 2015-07-09T14:10:45.018-0400 I QUERY [conn136] getmore db44.coll44 query: { $where: "this.tid === 9" } cursorid:2843625375458 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1055 nreturned:89 reslen:4114 locks:{ Global: { acquireCount: { r: 2112 } }, Database: { acquireCount: { r: 1056 } }, Collection: { acquireCount: { r: 1056 } } } 26197ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:10:45.034-0400 m31100| 2015-07-09T14:10:45.033-0400 I WRITE [conn30] remove db44.coll44 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 6" } ndeleted:26 keyUpdates:0 writeConflicts:0 numYields:2233 locks:{ Global: { acquireCount: { r: 2260, w: 2260 } }, Database: { acquireCount: { w: 2260 } }, Collection: { acquireCount: { w: 2234 } }, Metadata: { acquireCount: { w: 26 } }, oplog: { acquireCount: { w: 26 } } } 55137ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:10:45.034-0400 m31100| 2015-07-09T14:10:45.033-0400 I COMMAND [conn30] command db44.$cmd command: delete { delete: "coll44", deletes: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 6" }, limit: 0 } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb8a4ca4787b9985d1d47') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 2260, w: 2260 } }, Database: { acquireCount: { w: 2260 } }, Collection: { acquireCount: { w: 2234 } }, Metadata: { acquireCount: { w: 26 } }, oplog: { acquireCount: { w: 26 } } } protocol:op_command 55137ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:11:03.240-0400 m31100| 2015-07-09T14:11:03.239-0400 I QUERY [conn50] query db44.coll44 query: { $where: "this.tid === 5" } planSummary: COLLSCAN cursorid:2844942281435 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:859 keyUpdates:0 writeConflicts:0 numYields:732 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 1466 } }, Database: { acquireCount: { r: 733 } }, Collection: { acquireCount: { r: 733 } } } 18202ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:11:04.553-0400 m30999| 2015-07-09T14:11:04.552-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:11:04.550-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30999:1436464534:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:11:05.961-0400 m30998| 2015-07-09T14:11:05.960-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:11:05.958-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30998:1436464535:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:11:06.864-0400 m31100| 2015-07-09T14:11:06.864-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:11:06.861-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31100:1436464536:197041335', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:11:07.285-0400 m31200| 2015-07-09T14:11:07.285-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:11:07.282-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31200:1436464537:809424560', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:11:09.672-0400 m31100| 2015-07-09T14:11:09.671-0400 I WRITE [conn68] remove db44.coll44 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 8" } ndeleted:21 keyUpdates:0 writeConflicts:0 numYields:2427 locks:{ Global: { acquireCount: { r: 2449, w: 2449 } }, Database: { acquireCount: { w: 2449 } }, Collection: { acquireCount: { w: 2428 } }, Metadata: { acquireCount: { w: 21 } }, oplog: { acquireCount: { w: 21 } } } 60023ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:11:09.673-0400 m31100| 2015-07-09T14:11:09.671-0400 I COMMAND [conn68] command db44.$cmd command: delete { delete: "coll44", deletes: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 8" }, limit: 0 } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb8a4ca4787b9985d1d47') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 2449, w: 2449 } }, Database: { acquireCount: { w: 2449 } }, Collection: { acquireCount: { w: 2428 } }, Metadata: { acquireCount: { w: 21 } }, oplog: { acquireCount: { w: 21 } } } protocol:op_command 60023ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:11:10.016-0400 m31100| 2015-07-09T14:11:10.016-0400 I QUERY [conn46] query db44.coll44 query: { $where: "this.tid === 9" } planSummary: COLLSCAN cursorid:2844486631575 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1203 keyUpdates:0 writeConflicts:0 numYields:1014 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 2032 } }, Database: { acquireCount: { r: 1016 } }, Collection: { acquireCount: { r: 1016 } } } 24941ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:11:13.779-0400 m31100| 2015-07-09T14:11:13.779-0400 I QUERY [conn151] query db44.coll44 query: { $where: "this.tid === 4" } planSummary: COLLSCAN cursorid:2843382509107 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1403 keyUpdates:0 writeConflicts:0 numYields:1156 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 2314 } }, Database: { acquireCount: { r: 1157 } }, Collection: { acquireCount: { r: 1157 } } } 28717ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:11:14.704-0400 m31100| 2015-07-09T14:11:14.704-0400 I QUERY [conn42] getmore db44.coll44 query: { $where: "this.tid === 1" } cursorid:2844700384245 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1514 nreturned:262 reslen:12072 locks:{ Global: { acquireCount: { r: 3030 } }, Database: { acquireCount: { r: 1515 } }, Collection: { acquireCount: { r: 1515 } } } 37400ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:11:34.556-0400 m30999| 2015-07-09T14:11:34.555-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:11:34.553-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30999:1436464534:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:11:35.963-0400 m30998| 2015-07-09T14:11:35.962-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:11:35.960-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30998:1436464535:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:11:36.866-0400 m31100| 2015-07-09T14:11:36.866-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:11:36.863-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31100:1436464536:197041335', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:11:37.287-0400 m31200| 2015-07-09T14:11:37.287-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:11:37.284-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31200:1436464537:809424560', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:11:40.690-0400 m31100| 2015-07-09T14:11:40.690-0400 I QUERY [conn136] getmore db44.coll44 query: { $where: "this.tid === 5" } cursorid:2844942281435 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1467 nreturned:61 reslen:2826 locks:{ Global: { acquireCount: { r: 2936 } }, Database: { acquireCount: { r: 1468 } }, Collection: { acquireCount: { r: 1468 } } } 37448ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:11:40.706-0400 m31100| 2015-07-09T14:11:40.706-0400 I QUERY [conn150] getmore db44.coll44 query: { $where: "this.tid === 4" } cursorid:2843382509107 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1041 nreturned:490 reslen:22560 locks:{ Global: { acquireCount: { r: 2084 } }, Database: { acquireCount: { r: 1042 } }, Collection: { acquireCount: { r: 1042 } } } 26925ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:11:40.715-0400 m30999| 2015-07-09T14:11:40.715-0400 I NETWORK [conn281] end connection 127.0.0.1:63552 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:11:40.716-0400 m31100| 2015-07-09T14:11:40.715-0400 I QUERY [conn138] getmore db44.coll44 query: { $where: "this.tid === 9" } cursorid:2844486631575 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1185 nreturned:189 reslen:8714 locks:{ Global: { acquireCount: { r: 2372 } }, Database: { acquireCount: { r: 1186 } }, Collection: { acquireCount: { r: 1186 } } } 30697ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:11:40.718-0400 m31100| 2015-07-09T14:11:40.718-0400 I WRITE [conn69] remove db44.coll44 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 0" } ndeleted:16 keyUpdates:0 writeConflicts:0 numYields:2201 locks:{ Global: { acquireCount: { r: 2218, w: 2218 } }, Database: { acquireCount: { w: 2218 } }, Collection: { acquireCount: { w: 2202 } }, Metadata: { acquireCount: { w: 16 } }, oplog: { acquireCount: { w: 16 } } } 55665ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:11:40.719-0400 m31100| 2015-07-09T14:11:40.718-0400 I COMMAND [conn69] command db44.$cmd command: delete { delete: "coll44", deletes: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 0" }, limit: 0 } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb8a4ca4787b9985d1d47') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 2218, w: 2218 } }, Database: { acquireCount: { w: 2218 } }, Collection: { acquireCount: { w: 2202 } }, Metadata: { acquireCount: { w: 16 } }, oplog: { acquireCount: { w: 16 } } } protocol:op_command 55665ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:11:40.728-0400 m31100| 2015-07-09T14:11:40.728-0400 I WRITE [conn23] remove db44.coll44 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 2" } ndeleted:30 keyUpdates:0 writeConflicts:0 numYields:2201 locks:{ Global: { acquireCount: { r: 2232, w: 2232 } }, Database: { acquireCount: { w: 2232 } }, Collection: { acquireCount: { w: 2202 } }, Metadata: { acquireCount: { w: 30 } }, oplog: { acquireCount: { w: 30 } } } 55764ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:11:40.729-0400 m31100| 2015-07-09T14:11:40.728-0400 I COMMAND [conn23] command db44.$cmd command: delete { delete: "coll44", deletes: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 2" }, limit: 0 } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb8a4ca4787b9985d1d47') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 2232, w: 2232 } }, Database: { acquireCount: { w: 2232 } }, Collection: { acquireCount: { w: 2202 } }, Metadata: { acquireCount: { w: 30 } }, oplog: { acquireCount: { w: 30 } } } protocol:op_command 55764ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:11:40.734-0400 m31100| 2015-07-09T14:11:40.733-0400 I WRITE [conn67] remove db44.coll44 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 7" } ndeleted:30 keyUpdates:0 writeConflicts:0 numYields:2196 locks:{ Global: { acquireCount: { r: 2231, w: 2227 } }, Database: { acquireCount: { r: 2, w: 2227 } }, Collection: { acquireCount: { r: 2, w: 2197 } }, Metadata: { acquireCount: { w: 30 } }, oplog: { acquireCount: { w: 30 } } } 55658ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:11:40.734-0400 m31100| 2015-07-09T14:11:40.733-0400 I COMMAND [conn67] command db44.$cmd command: delete { delete: "coll44", deletes: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 7" }, limit: 0 } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb8a4ca4787b9985d1d47') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 2231, w: 2227 } }, Database: { acquireCount: { r: 2, w: 2227 } }, Collection: { acquireCount: { r: 2, w: 2197 } }, Metadata: { acquireCount: { w: 30 } }, oplog: { acquireCount: { w: 30 } } } protocol:op_command 55658ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:11:40.748-0400 m31100| 2015-07-09T14:11:40.747-0400 I WRITE [conn30] remove db44.coll44 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 6" } ndeleted:15 keyUpdates:0 writeConflicts:0 numYields:2197 locks:{ Global: { acquireCount: { r: 2213, w: 2213 } }, Database: { acquireCount: { w: 2213 } }, Collection: { acquireCount: { w: 2198 } }, Metadata: { acquireCount: { w: 15 } }, oplog: { acquireCount: { w: 15 } } } 55712ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:11:40.749-0400 m31100| 2015-07-09T14:11:40.748-0400 I COMMAND [conn30] command db44.$cmd command: delete { delete: "coll44", deletes: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 6" }, limit: 0 } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb8a4ca4787b9985d1d47') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 2213, w: 2213 } }, Database: { acquireCount: { w: 2213 } }, Collection: { acquireCount: { w: 2198 } }, Metadata: { acquireCount: { w: 15 } }, oplog: { acquireCount: { w: 15 } } } protocol:op_command 55712ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:11:40.761-0400 m31100| 2015-07-09T14:11:40.761-0400 I WRITE [conn25] remove db44.coll44 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 3" } ndeleted:18 keyUpdates:0 writeConflicts:0 numYields:2197 locks:{ Global: { acquireCount: { r: 2216, w: 2216 } }, Database: { acquireCount: { w: 2216 } }, Collection: { acquireCount: { w: 2198 } }, Metadata: { acquireCount: { w: 18 } }, oplog: { acquireCount: { w: 18 } } } 55765ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:11:40.762-0400 m31100| 2015-07-09T14:11:40.761-0400 I COMMAND [conn25] command db44.$cmd command: delete { delete: "coll44", deletes: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 3" }, limit: 0 } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb8a4ca4787b9985d1d47') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 2216, w: 2216 } }, Database: { acquireCount: { w: 2216 } }, Collection: { acquireCount: { w: 2198 } }, Metadata: { acquireCount: { w: 18 } }, oplog: { acquireCount: { w: 18 } } } protocol:op_command 55765ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:12:01.927-0400 m31100| 2015-07-09T14:12:01.926-0400 I QUERY [conn73] query db44.coll44 query: { $where: "this.tid === 9" } planSummary: COLLSCAN cursorid:2845227407788 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1126 keyUpdates:0 writeConflicts:0 numYields:831 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 1664 } }, Database: { acquireCount: { r: 832 } }, Collection: { acquireCount: { r: 832 } } } 21165ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:12:02.954-0400 m31100| 2015-07-09T14:12:02.953-0400 I WRITE [conn68] remove db44.coll44 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 8" } ndeleted:26 keyUpdates:0 writeConflicts:0 numYields:2071 locks:{ Global: { acquireCount: { r: 2098, w: 2098 } }, Database: { acquireCount: { w: 2098 } }, Collection: { acquireCount: { w: 2072 } }, Metadata: { acquireCount: { w: 26 } }, oplog: { acquireCount: { w: 26 } } } 53254ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:12:02.955-0400 m31100| 2015-07-09T14:12:02.954-0400 I COMMAND [conn68] command db44.$cmd command: delete { delete: "coll44", deletes: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 8" }, limit: 0 } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb8a4ca4787b9985d1d47') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 2098, w: 2098 } }, Database: { acquireCount: { w: 2098 } }, Collection: { acquireCount: { w: 2072 } }, Metadata: { acquireCount: { w: 26 } }, oplog: { acquireCount: { w: 26 } } } protocol:op_command 53255ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:12:04.557-0400 m30999| 2015-07-09T14:12:04.557-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:12:04.555-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30999:1436464534:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:12:05.968-0400 m30998| 2015-07-09T14:12:05.967-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:12:05.962-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30998:1436464535:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:12:06.646-0400 m31100| 2015-07-09T14:12:06.645-0400 I QUERY [conn72] query db44.coll44 query: { $where: "this.tid === 7" } planSummary: COLLSCAN cursorid:2843906323741 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1373 keyUpdates:0 writeConflicts:0 numYields:1016 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 2034 } }, Database: { acquireCount: { r: 1017 } }, Collection: { acquireCount: { r: 1017 } } } 25879ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:12:06.868-0400 m31100| 2015-07-09T14:12:06.868-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:12:06.865-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31100:1436464536:197041335', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:12:07.290-0400 m31200| 2015-07-09T14:12:07.290-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:12:07.287-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31200:1436464537:809424560', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:12:07.422-0400 m31100| 2015-07-09T14:12:07.421-0400 I WRITE [conn31] remove db44.coll44 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 1" } ndeleted:40 keyUpdates:0 writeConflicts:0 numYields:2059 locks:{ Global: { acquireCount: { r: 2100, w: 2100 } }, Database: { acquireCount: { w: 2100 } }, Collection: { acquireCount: { w: 2060 } }, Metadata: { acquireCount: { w: 40 } }, oplog: { acquireCount: { w: 40 } } } 52713ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:12:07.422-0400 m31100| 2015-07-09T14:12:07.421-0400 I COMMAND [conn31] command db44.$cmd command: delete { delete: "coll44", deletes: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 1" }, limit: 0 } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb8a4ca4787b9985d1d47') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 2100, w: 2100 } }, Database: { acquireCount: { w: 2100 } }, Collection: { acquireCount: { w: 2060 } }, Metadata: { acquireCount: { w: 40 } }, oplog: { acquireCount: { w: 40 } } } protocol:op_command 52713ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:12:07.451-0400 m30998| 2015-07-09T14:12:07.451-0400 I NETWORK [conn279] end connection 127.0.0.1:63551 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:12:23.781-0400 m31100| 2015-07-09T14:12:23.781-0400 I QUERY [conn47] query db44.coll44 query: { $where: "this.tid === 0" } planSummary: COLLSCAN cursorid:2845369372786 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:2220 keyUpdates:0 writeConflicts:0 numYields:1703 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 3408 } }, Database: { acquireCount: { r: 1704 } }, Collection: { acquireCount: { r: 1704 } } } 43016ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:12:34.561-0400 m30999| 2015-07-09T14:12:34.560-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:12:34.557-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30999:1436464534:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:12:35.970-0400 m30998| 2015-07-09T14:12:35.970-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:12:35.967-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30998:1436464535:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:12:36.883-0400 m31100| 2015-07-09T14:12:36.882-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:12:36.868-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31100:1436464536:197041335', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:12:37.294-0400 m31200| 2015-07-09T14:12:37.293-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:12:37.290-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31200:1436464537:809424560', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:12:37.628-0400 m31100| 2015-07-09T14:12:37.627-0400 I WRITE [conn147] remove db44.coll44 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 5" } ndeleted:14 keyUpdates:0 writeConflicts:0 numYields:2272 locks:{ Global: { acquireCount: { r: 2287, w: 2287 } }, Database: { acquireCount: { w: 2287 } }, Collection: { acquireCount: { w: 2273 } }, Metadata: { acquireCount: { w: 14 } }, oplog: { acquireCount: { w: 14 } } } 56934ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:12:37.629-0400 m31100| 2015-07-09T14:12:37.628-0400 I COMMAND [conn147] command db44.$cmd command: delete { delete: "coll44", deletes: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 5" }, limit: 0 } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb8a4ca4787b9985d1d47') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 2287, w: 2287 } }, Database: { acquireCount: { w: 2287 } }, Collection: { acquireCount: { w: 2273 } }, Metadata: { acquireCount: { w: 14 } }, oplog: { acquireCount: { w: 14 } } } protocol:op_command 56935ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:12:37.703-0400 m31100| 2015-07-09T14:12:37.703-0400 I WRITE [conn23] remove db44.coll44 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 2" } ndeleted:26 keyUpdates:0 writeConflicts:0 numYields:2290 locks:{ Global: { acquireCount: { r: 2317, w: 2317 } }, Database: { acquireCount: { w: 2317 } }, Collection: { acquireCount: { w: 2291 } }, Metadata: { acquireCount: { w: 26 } }, oplog: { acquireCount: { w: 26 } } } 56973ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:12:37.704-0400 m31100| 2015-07-09T14:12:37.703-0400 I COMMAND [conn23] command db44.$cmd command: delete { delete: "coll44", deletes: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 2" }, limit: 0 } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb8a4ca4787b9985d1d47') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 2317, w: 2317 } }, Database: { acquireCount: { w: 2317 } }, Collection: { acquireCount: { w: 2291 } }, Metadata: { acquireCount: { w: 26 } }, oplog: { acquireCount: { w: 26 } } } protocol:op_command 56973ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:12:37.708-0400 m31100| 2015-07-09T14:12:37.707-0400 I QUERY [conn150] getmore db44.coll44 query: { $where: "this.tid === 0" } cursorid:2845369372786 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:573 nreturned:56 reslen:2596 locks:{ Global: { acquireCount: { r: 1148 } }, Database: { acquireCount: { r: 574 } }, Collection: { acquireCount: { r: 574 } } } 13923ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:12:37.737-0400 m31100| 2015-07-09T14:12:37.737-0400 I QUERY [conn138] getmore db44.coll44 query: { $where: "this.tid === 9" } cursorid:2845227407788 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1460 nreturned:189 reslen:8714 locks:{ Global: { acquireCount: { r: 2922 } }, Database: { acquireCount: { r: 1461 } }, Collection: { acquireCount: { r: 1461 } } } 35808ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:12:37.739-0400 m31100| 2015-07-09T14:12:37.738-0400 I WRITE [conn25] remove db44.coll44 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 3" } ndeleted:28 keyUpdates:0 writeConflicts:0 numYields:2273 locks:{ Global: { acquireCount: { r: 2306, w: 2302 } }, Database: { acquireCount: { r: 2, w: 2302 } }, Collection: { acquireCount: { r: 2, w: 2274 } }, Metadata: { acquireCount: { w: 28 } }, oplog: { acquireCount: { w: 28 } } } 56952ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:12:37.739-0400 m31100| 2015-07-09T14:12:37.738-0400 I COMMAND [conn25] command db44.$cmd command: delete { delete: "coll44", deletes: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 3" }, limit: 0 } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb8a4ca4787b9985d1d47') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 2306, w: 2302 } }, Database: { acquireCount: { r: 2, w: 2302 } }, Collection: { acquireCount: { r: 2, w: 2274 } }, Metadata: { acquireCount: { w: 28 } }, oplog: { acquireCount: { w: 28 } } } protocol:op_command 56952ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:12:37.759-0400 m31100| 2015-07-09T14:12:37.758-0400 I QUERY [conn55] query db44.coll44 query: { $where: "this.tid === 8" } planSummary: COLLSCAN cursorid:2843619546512 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:2067 keyUpdates:0 writeConflicts:0 numYields:1417 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 2836 } }, Database: { acquireCount: { r: 1418 } }, Collection: { acquireCount: { r: 1418 } } } 34796ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:12:37.759-0400 m31100| 2015-07-09T14:12:37.759-0400 I QUERY [conn136] getmore db44.coll44 query: { $where: "this.tid === 7" } cursorid:2843906323741 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1252 nreturned:231 reslen:10646 locks:{ Global: { acquireCount: { r: 2506 } }, Database: { acquireCount: { r: 1253 } }, Collection: { acquireCount: { r: 1253 } } } 31112ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:12:37.784-0400 m31100| 2015-07-09T14:12:37.784-0400 I WRITE [conn30] remove db44.coll44 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 6" } ndeleted:12 keyUpdates:0 writeConflicts:0 numYields:2297 locks:{ Global: { acquireCount: { r: 2310, w: 2310 } }, Database: { acquireCount: { w: 2310 } }, Collection: { acquireCount: { w: 2298 } }, Metadata: { acquireCount: { w: 12 } }, oplog: { acquireCount: { w: 12 } } } 57034ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:12:37.785-0400 m31100| 2015-07-09T14:12:37.784-0400 I COMMAND [conn30] command db44.$cmd command: delete { delete: "coll44", deletes: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 6" }, limit: 0 } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb8a4ca4787b9985d1d47') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 2310, w: 2310 } }, Database: { acquireCount: { w: 2310 } }, Collection: { acquireCount: { w: 2298 } }, Metadata: { acquireCount: { w: 12 } }, oplog: { acquireCount: { w: 12 } } } protocol:op_command 57034ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:12:37.798-0400 m30998| 2015-07-09T14:12:37.798-0400 I NETWORK [conn280] end connection 127.0.0.1:63553 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:12:53.621-0400 m31100| 2015-07-09T14:12:53.621-0400 I QUERY [conn151] query db44.coll44 query: { $where: "this.tid === 2" } planSummary: COLLSCAN cursorid:2844454643480 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:826 keyUpdates:0 writeConflicts:0 numYields:631 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 1264 } }, Database: { acquireCount: { r: 632 } }, Collection: { acquireCount: { r: 632 } } } 15834ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:12:55.593-0400 m31100| 2015-07-09T14:12:55.592-0400 I QUERY [conn150] getmore db44.coll44 query: { $where: "this.tid === 8" } cursorid:2843619546512 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:717 nreturned:142 reslen:6552 locks:{ Global: { acquireCount: { r: 1436 } }, Database: { acquireCount: { r: 718 } }, Collection: { acquireCount: { r: 718 } } } 17830ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:12:55.623-0400 m30999| 2015-07-09T14:12:55.623-0400 I NETWORK [conn278] end connection 127.0.0.1:63548 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:12:58.575-0400 m31100| 2015-07-09T14:12:58.574-0400 I QUERY [conn46] query db44.coll44 query: { $where: "this.tid === 3" } planSummary: COLLSCAN cursorid:2844694977678 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1122 keyUpdates:0 writeConflicts:0 numYields:820 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 1642 } }, Database: { acquireCount: { r: 821 } }, Collection: { acquireCount: { r: 821 } } } 20788ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:01.323-0400 m31100| 2015-07-09T14:13:01.323-0400 I QUERY [conn55] query db44.coll44 query: { $where: "this.tid === 6" } planSummary: COLLSCAN cursorid:2843521429213 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1299 keyUpdates:0 writeConflicts:0 numYields:919 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 1840 } }, Database: { acquireCount: { r: 920 } }, Collection: { acquireCount: { r: 920 } } } 23533ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:04.563-0400 m30999| 2015-07-09T14:13:04.563-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:13:04.560-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30999:1436464534:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:05.973-0400 m30998| 2015-07-09T14:13:05.972-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:13:05.970-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30998:1436464535:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:06.884-0400 m31100| 2015-07-09T14:13:06.884-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:13:06.881-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31100:1436464536:197041335', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:07.296-0400 m31200| 2015-07-09T14:13:07.296-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:13:07.293-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31200:1436464537:809424560', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:25.393-0400 m31100| 2015-07-09T14:13:25.392-0400 I WRITE [conn147] remove db44.coll44 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 5" } ndeleted:15 keyUpdates:0 writeConflicts:0 numYields:1829 locks:{ Global: { acquireCount: { r: 1845, w: 1845 } }, Database: { acquireCount: { w: 1845 } }, Collection: { acquireCount: { w: 1830 } }, Metadata: { acquireCount: { w: 15 } }, oplog: { acquireCount: { w: 15 } } } 47762ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:25.393-0400 m31100| 2015-07-09T14:13:25.392-0400 I COMMAND [conn147] command db44.$cmd command: delete { delete: "coll44", deletes: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 5" }, limit: 0 } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb8a4ca4787b9985d1d47') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 1845, w: 1845 } }, Database: { acquireCount: { w: 1845 } }, Collection: { acquireCount: { w: 1830 } }, Metadata: { acquireCount: { w: 15 } }, oplog: { acquireCount: { w: 15 } } } protocol:op_command 47763ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:29.292-0400 m31100| 2015-07-09T14:13:29.292-0400 I QUERY [conn86] getmore db44.coll44 query: { $where: "this.tid === 2" } cursorid:2844454643480 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1362 nreturned:121 reslen:5586 locks:{ Global: { acquireCount: { r: 2726 } }, Database: { acquireCount: { r: 1363 } }, Collection: { acquireCount: { r: 1363 } } } 35669ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:29.293-0400 m31100| 2015-07-09T14:13:29.292-0400 I QUERY [conn150] getmore db44.coll44 query: { $where: "this.tid === 6" } cursorid:2843521429213 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1076 nreturned:46 reslen:2136 locks:{ Global: { acquireCount: { r: 2154 } }, Database: { acquireCount: { r: 1077 } }, Collection: { acquireCount: { r: 1077 } } } 27967ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:29.299-0400 m31100| 2015-07-09T14:13:29.298-0400 I QUERY [conn136] getmore db44.coll44 query: { $where: "this.tid === 3" } cursorid:2844694977678 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1181 nreturned:126 reslen:5816 locks:{ Global: { acquireCount: { r: 2364 } }, Database: { acquireCount: { r: 1182 } }, Collection: { acquireCount: { r: 1182 } } } 30721ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:29.333-0400 m31100| 2015-07-09T14:13:29.332-0400 I WRITE [conn23] remove db44.coll44 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 0" } ndeleted:28 keyUpdates:0 writeConflicts:0 numYields:1984 locks:{ Global: { acquireCount: { r: 2013, w: 2013 } }, Database: { acquireCount: { w: 2013 } }, Collection: { acquireCount: { w: 1985 } }, Metadata: { acquireCount: { w: 28 } }, oplog: { acquireCount: { w: 28 } } } 51590ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:29.333-0400 m31100| 2015-07-09T14:13:29.332-0400 I COMMAND [conn23] command db44.$cmd command: delete { delete: "coll44", deletes: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 0" }, limit: 0 } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb8a4ca4787b9985d1d47') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 2013, w: 2013 } }, Database: { acquireCount: { w: 2013 } }, Collection: { acquireCount: { w: 1985 } }, Metadata: { acquireCount: { w: 28 } }, oplog: { acquireCount: { w: 28 } } } protocol:op_command 51590ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:29.338-0400 m31100| 2015-07-09T14:13:29.338-0400 I WRITE [conn25] remove db44.coll44 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 9" } ndeleted:36 keyUpdates:0 writeConflicts:0 numYields:1998 locks:{ Global: { acquireCount: { r: 2035, w: 2035 } }, Database: { acquireCount: { w: 2035 } }, Collection: { acquireCount: { w: 1999 } }, Metadata: { acquireCount: { w: 36 } }, oplog: { acquireCount: { w: 36 } } } 51562ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:29.339-0400 m31100| 2015-07-09T14:13:29.338-0400 I COMMAND [conn25] command db44.$cmd command: delete { delete: "coll44", deletes: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 9" }, limit: 0 } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb8a4ca4787b9985d1d47') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 2035, w: 2035 } }, Database: { acquireCount: { w: 2035 } }, Collection: { acquireCount: { w: 1999 } }, Metadata: { acquireCount: { w: 36 } }, oplog: { acquireCount: { w: 36 } } } protocol:op_command 51562ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:29.343-0400 m30998| 2015-07-09T14:13:29.342-0400 I NETWORK [conn278] end connection 127.0.0.1:63547 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:29.374-0400 m30999| 2015-07-09T14:13:29.373-0400 I NETWORK [conn280] end connection 127.0.0.1:63550 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:33.755-0400 m31100| 2015-07-09T14:13:33.755-0400 I QUERY [conn46] query db44.coll44 query: { $where: "this.tid === 5" } planSummary: COLLSCAN cursorid:2845100587258 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:783 keyUpdates:0 writeConflicts:0 numYields:362 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 726 } }, Database: { acquireCount: { r: 363 } }, Collection: { acquireCount: { r: 363 } } } 8354ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:34.566-0400 m30999| 2015-07-09T14:13:34.566-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:13:34.562-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30999:1436464534:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:35.976-0400 m30998| 2015-07-09T14:13:35.975-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:13:35.972-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30998:1436464535:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:36.891-0400 m31100| 2015-07-09T14:13:36.890-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:13:36.884-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31100:1436464536:197041335', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:37.300-0400 m31200| 2015-07-09T14:13:37.299-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:13:37.296-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31200:1436464537:809424560', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:39.281-0400 m31100| 2015-07-09T14:13:39.280-0400 I QUERY [conn50] query db44.coll44 query: { $where: "this.tid === 3" } planSummary: COLLSCAN cursorid:2845404667875 ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:1101 keyUpdates:0 writeConflicts:0 numYields:454 nreturned:101 reslen:4666 locks:{ Global: { acquireCount: { r: 910 } }, Database: { acquireCount: { r: 455 } }, Collection: { acquireCount: { r: 455 } } } 9970ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:56.136-0400 m31100| 2015-07-09T14:13:56.136-0400 I QUERY [conn136] getmore db44.coll44 query: { $where: "this.tid === 5" } cursorid:2845100587258 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1020 nreturned:32 reslen:1492 locks:{ Global: { acquireCount: { r: 2042 } }, Database: { acquireCount: { r: 1021 } }, Collection: { acquireCount: { r: 1021 } } } 22378ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:56.169-0400 m30998| 2015-07-09T14:13:56.169-0400 I NETWORK [conn281] end connection 127.0.0.1:63554 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:58.725-0400 m31100| 2015-07-09T14:13:58.724-0400 I WRITE [conn70] remove db44.coll44 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 2" } ndeleted:18 keyUpdates:0 writeConflicts:0 numYields:1339 locks:{ Global: { acquireCount: { r: 1358, w: 1358 } }, Database: { acquireCount: { w: 1358 } }, Collection: { acquireCount: { w: 1340 } }, Metadata: { acquireCount: { w: 18 } }, oplog: { acquireCount: { w: 18 } } } 29429ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:58.725-0400 m31100| 2015-07-09T14:13:58.724-0400 I COMMAND [conn70] command db44.$cmd command: delete { delete: "coll44", deletes: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 2" }, limit: 0 } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb8a4ca4787b9985d1d47') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 1358, w: 1358 } }, Database: { acquireCount: { w: 1358 } }, Collection: { acquireCount: { w: 1340 } }, Metadata: { acquireCount: { w: 18 } }, oplog: { acquireCount: { w: 18 } } } protocol:op_command 29430ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:58.726-0400 m31100| 2015-07-09T14:13:58.725-0400 I QUERY [conn74] getmore db44.coll44 query: { $where: "this.tid === 3" } cursorid:2845404667875 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:886 nreturned:126 reslen:5816 locks:{ Global: { acquireCount: { r: 1774 } }, Database: { acquireCount: { r: 887 } }, Collection: { acquireCount: { r: 887 } } } 19442ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:58.730-0400 m30999| 2015-07-09T14:13:58.729-0400 I NETWORK [conn279] end connection 127.0.0.1:63549 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:58.731-0400 m30998| 2015-07-09T14:13:58.730-0400 I NETWORK [conn277] end connection 127.0.0.1:63545 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:58.736-0400 m31100| 2015-07-09T14:13:58.736-0400 I WRITE [conn30] remove db44.coll44 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 6" } ndeleted:18 keyUpdates:0 writeConflicts:0 numYields:1340 locks:{ Global: { acquireCount: { r: 1359, w: 1359 } }, Database: { acquireCount: { w: 1359 } }, Collection: { acquireCount: { w: 1341 } }, Metadata: { acquireCount: { w: 18 } }, oplog: { acquireCount: { w: 18 } } } 29441ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:58.737-0400 m31100| 2015-07-09T14:13:58.736-0400 I COMMAND [conn30] command db44.$cmd command: delete { delete: "coll44", deletes: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 6" }, limit: 0 } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb8a4ca4787b9985d1d47') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 1359, w: 1359 } }, Database: { acquireCount: { w: 1359 } }, Collection: { acquireCount: { w: 1341 } }, Metadata: { acquireCount: { w: 18 } }, oplog: { acquireCount: { w: 18 } } } protocol:op_command 29442ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:58.895-0400 m31100| 2015-07-09T14:13:58.894-0400 I WRITE [conn30] remove db44.coll44 query: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 6" } ndeleted:12 keyUpdates:0 writeConflicts:0 numYields:25 locks:{ Global: { acquireCount: { r: 38, w: 38 } }, Database: { acquireCount: { w: 38 } }, Collection: { acquireCount: { w: 26 } }, Metadata: { acquireCount: { w: 12 } }, oplog: { acquireCount: { w: 12 } } } 155ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:58.896-0400 m31100| 2015-07-09T14:13:58.895-0400 I COMMAND [conn30] command db44.$cmd command: delete { delete: "coll44", deletes: [ { q: { $where: "this.x === Math.floor(Math.random() * 10) && this.tid === 6" }, limit: 0 } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559eb8a4ca4787b9985d1d47') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 38, w: 38 } }, Database: { acquireCount: { w: 38 } }, Collection: { acquireCount: { w: 26 } }, Metadata: { acquireCount: { w: 12 } }, oplog: { acquireCount: { w: 12 } } } protocol:op_command 156ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:58.901-0400 m30999| 2015-07-09T14:13:58.901-0400 I NETWORK [conn277] end connection 127.0.0.1:63546 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:58.930-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:58.930-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:58.930-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:58.930-0400 jstests/concurrency/fsm_workloads/remove_where.js: Workload completed in 322719 ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:58.930-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:58.930-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:58.930-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:58.930-0400 m30999| 2015-07-09T14:13:58.930-0400 I COMMAND [conn1] DROP: db44.coll44 [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:58.931-0400 m30999| 2015-07-09T14:13:58.930-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:13:58.930-0400-559eb9e6ca4787b9985d1d49", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465638930), what: "dropCollection.start", ns: "db44.coll44", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:58.991-0400 m30999| 2015-07-09T14:13:58.990-0400 I SHARDING [conn1] distributed lock 'db44.coll44/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb9e6ca4787b9985d1d4a [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:58.992-0400 m31100| 2015-07-09T14:13:58.992-0400 I COMMAND [conn34] CMD: drop db44.coll44 [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:58.994-0400 m31200| 2015-07-09T14:13:58.994-0400 I COMMAND [conn84] CMD: drop db44.coll44 [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:58.996-0400 m31102| 2015-07-09T14:13:58.996-0400 I COMMAND [repl writer worker 12] CMD: drop db44.coll44 [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:58.996-0400 m31101| 2015-07-09T14:13:58.996-0400 I COMMAND [repl writer worker 13] CMD: drop db44.coll44 [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.050-0400 m31100| 2015-07-09T14:13:59.050-0400 I SHARDING [conn34] remotely refreshing metadata for db44.coll44 with requested shard version 0|0||000000000000000000000000, current shard version is 1|10||559eb8a4ca4787b9985d1d47, current metadata version is 1|10||559eb8a4ca4787b9985d1d47 [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.052-0400 m31100| 2015-07-09T14:13:59.052-0400 W SHARDING [conn34] no chunks found when reloading db44.coll44, previous version was 0|0||559eb8a4ca4787b9985d1d47, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.052-0400 m31100| 2015-07-09T14:13:59.052-0400 I SHARDING [conn34] dropping metadata for db44.coll44 at shard version 1|10||559eb8a4ca4787b9985d1d47, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.054-0400 m30999| 2015-07-09T14:13:59.054-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:13:59.054-0400-559eb9e7ca4787b9985d1d4b", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465639054), what: "dropCollection", ns: "db44.coll44", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.109-0400 m30999| 2015-07-09T14:13:59.108-0400 I SHARDING [conn1] distributed lock 'db44.coll44/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.166-0400 m30999| 2015-07-09T14:13:59.165-0400 I COMMAND [conn1] DROP DATABASE: db44 [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.166-0400 m30999| 2015-07-09T14:13:59.165-0400 I SHARDING [conn1] DBConfig::dropDatabase: db44 [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.166-0400 m30999| 2015-07-09T14:13:59.165-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:13:59.165-0400-559eb9e7ca4787b9985d1d4c", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465639165), what: "dropDatabase.start", ns: "db44", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.276-0400 m30999| 2015-07-09T14:13:59.275-0400 I SHARDING [conn1] DBConfig::dropDatabase: db44 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.276-0400 m31100| 2015-07-09T14:13:59.276-0400 I COMMAND [conn28] dropDatabase db44 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.276-0400 m31100| 2015-07-09T14:13:59.276-0400 I COMMAND [conn28] dropDatabase db44 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.277-0400 m30999| 2015-07-09T14:13:59.277-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:13:59.277-0400-559eb9e7ca4787b9985d1d4d", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465639277), what: "dropDatabase", ns: "db44", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.278-0400 m31102| 2015-07-09T14:13:59.277-0400 I COMMAND [repl writer worker 0] dropDatabase db44 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.278-0400 m31102| 2015-07-09T14:13:59.277-0400 I COMMAND [repl writer worker 0] dropDatabase db44 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.278-0400 m31101| 2015-07-09T14:13:59.277-0400 I COMMAND [repl writer worker 3] dropDatabase db44 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.278-0400 m31101| 2015-07-09T14:13:59.277-0400 I COMMAND [repl writer worker 3] dropDatabase db44 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.380-0400 m31100| 2015-07-09T14:13:59.379-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.384-0400 m31101| 2015-07-09T14:13:59.383-0400 I COMMAND [repl writer worker 8] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.384-0400 m31102| 2015-07-09T14:13:59.384-0400 I COMMAND [repl writer worker 1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.421-0400 m31200| 2015-07-09T14:13:59.421-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.422-0400 m31201| 2015-07-09T14:13:59.422-0400 I COMMAND [repl writer worker 12] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.422-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.423-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.423-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.423-0400 jstests/concurrency/fsm_workloads/indexed_insert_2d.js [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.423-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.423-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.423-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.425-0400 m31202| 2015-07-09T14:13:59.424-0400 I COMMAND [repl writer worker 5] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.432-0400 m30999| 2015-07-09T14:13:59.431-0400 I SHARDING [conn1] distributed lock 'db45/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb9e7ca4787b9985d1d4e [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.437-0400 m30999| 2015-07-09T14:13:59.436-0400 I SHARDING [conn1] Placing [db45] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.437-0400 m30999| 2015-07-09T14:13:59.436-0400 I SHARDING [conn1] Enabling sharding for database [db45] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.491-0400 m30999| 2015-07-09T14:13:59.491-0400 I SHARDING [conn1] distributed lock 'db45/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.517-0400 m31100| 2015-07-09T14:13:59.516-0400 I INDEX [conn30] build index on: db45.coll45 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db45.coll45" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.517-0400 m31100| 2015-07-09T14:13:59.516-0400 I INDEX [conn30] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.533-0400 m31100| 2015-07-09T14:13:59.532-0400 I INDEX [conn30] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.535-0400 m30999| 2015-07-09T14:13:59.534-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db45.coll45", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.538-0400 m30999| 2015-07-09T14:13:59.538-0400 I SHARDING [conn1] distributed lock 'db45.coll45/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb9e7ca4787b9985d1d4f [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.539-0400 m30999| 2015-07-09T14:13:59.539-0400 I SHARDING [conn1] enable sharding on: db45.coll45 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.541-0400 m30999| 2015-07-09T14:13:59.539-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:13:59.539-0400-559eb9e7ca4787b9985d1d50", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465639539), what: "shardCollection.start", ns: "db45.coll45", details: { shardKey: { _id: "hashed" }, collection: "db45.coll45", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.542-0400 m31102| 2015-07-09T14:13:59.542-0400 I INDEX [repl writer worker 7] build index on: db45.coll45 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db45.coll45" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.543-0400 m31102| 2015-07-09T14:13:59.542-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.547-0400 m31101| 2015-07-09T14:13:59.547-0400 I INDEX [repl writer worker 2] build index on: db45.coll45 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db45.coll45" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.547-0400 m31101| 2015-07-09T14:13:59.547-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.548-0400 m31102| 2015-07-09T14:13:59.548-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.553-0400 m31101| 2015-07-09T14:13:59.553-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.593-0400 m30999| 2015-07-09T14:13:59.592-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db45.coll45 using new epoch 559eb9e7ca4787b9985d1d51 [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.700-0400 m30999| 2015-07-09T14:13:59.700-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db45.coll45: 1ms sequenceNumber: 195 version: 1|1||559eb9e7ca4787b9985d1d51 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.756-0400 m30999| 2015-07-09T14:13:59.755-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db45.coll45: 0ms sequenceNumber: 196 version: 1|1||559eb9e7ca4787b9985d1d51 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.758-0400 m31100| 2015-07-09T14:13:59.758-0400 I SHARDING [conn47] remotely refreshing metadata for db45.coll45 with requested shard version 1|1||559eb9e7ca4787b9985d1d51, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.760-0400 m31100| 2015-07-09T14:13:59.759-0400 I SHARDING [conn47] collection db45.coll45 was previously unsharded, new metadata loaded with shard version 1|1||559eb9e7ca4787b9985d1d51 [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.760-0400 m31100| 2015-07-09T14:13:59.759-0400 I SHARDING [conn47] collection version was loaded at version 1|1||559eb9e7ca4787b9985d1d51, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.761-0400 m30999| 2015-07-09T14:13:59.760-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:13:59.760-0400-559eb9e7ca4787b9985d1d52", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465639760), what: "shardCollection", ns: "db45.coll45", details: { version: "1|1||559eb9e7ca4787b9985d1d51" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.815-0400 m30999| 2015-07-09T14:13:59.815-0400 I SHARDING [conn1] distributed lock 'db45.coll45/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.816-0400 m30999| 2015-07-09T14:13:59.816-0400 I SHARDING [conn1] moving chunk ns: db45.coll45 moving ( ns: db45.coll45, shard: test-rs0, lastmod: 1|1||000000000000000000000000, min: { _id: 0 }, max: { _id: MaxKey }) test-rs0 -> test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.817-0400 m31100| 2015-07-09T14:13:59.816-0400 I SHARDING [conn34] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.818-0400 m31100| 2015-07-09T14:13:59.818-0400 I SHARDING [conn34] received moveChunk request: { moveChunk: "db45.coll45", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb9e7ca4787b9985d1d51') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.822-0400 m31100| 2015-07-09T14:13:59.821-0400 I SHARDING [conn34] distributed lock 'db45.coll45/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb9e7792e00bb672749e8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.823-0400 m31100| 2015-07-09T14:13:59.822-0400 I SHARDING [conn34] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:13:59.822-0400-559eb9e7792e00bb672749e9", server: "bs-osx108-8", clientAddr: "127.0.0.1:62636", time: new Date(1436465639822), what: "moveChunk.start", ns: "db45.coll45", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.876-0400 m31100| 2015-07-09T14:13:59.875-0400 I SHARDING [conn34] remotely refreshing metadata for db45.coll45 based on current shard version 1|1||559eb9e7ca4787b9985d1d51, current metadata version is 1|1||559eb9e7ca4787b9985d1d51 [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.877-0400 m31100| 2015-07-09T14:13:59.877-0400 I SHARDING [conn34] metadata of collection db45.coll45 already up to date (shard version : 1|1||559eb9e7ca4787b9985d1d51, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.878-0400 m31100| 2015-07-09T14:13:59.877-0400 I SHARDING [conn34] moveChunk request accepted at version 1|1||559eb9e7ca4787b9985d1d51 [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.878-0400 m31100| 2015-07-09T14:13:59.878-0400 I SHARDING [conn34] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.879-0400 m31200| 2015-07-09T14:13:59.878-0400 I SHARDING [conn16] remotely refreshing metadata for db45.coll45, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.881-0400 m31200| 2015-07-09T14:13:59.880-0400 I SHARDING [conn16] collection db45.coll45 was previously unsharded, new metadata loaded with shard version 0|0||559eb9e7ca4787b9985d1d51 [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.881-0400 m31200| 2015-07-09T14:13:59.880-0400 I SHARDING [conn16] collection version was loaded at version 1|1||559eb9e7ca4787b9985d1d51, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.881-0400 m31200| 2015-07-09T14:13:59.881-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: 0 } -> { _id: MaxKey } for collection db45.coll45 from test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 at epoch 559eb9e7ca4787b9985d1d51 [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.883-0400 m31100| 2015-07-09T14:13:59.883-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db45.coll45", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.887-0400 m31100| 2015-07-09T14:13:59.886-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db45.coll45", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.892-0400 m31100| 2015-07-09T14:13:59.891-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db45.coll45", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.901-0400 m31100| 2015-07-09T14:13:59.901-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db45.coll45", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.902-0400 m31200| 2015-07-09T14:13:59.901-0400 I INDEX [migrateThread] build index on: db45.coll45 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db45.coll45" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.902-0400 m31200| 2015-07-09T14:13:59.901-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.918-0400 m31200| 2015-07-09T14:13:59.918-0400 I INDEX [migrateThread] build index on: db45.coll45 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db45.coll45" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.919-0400 m31200| 2015-07-09T14:13:59.918-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.919-0400 m31100| 2015-07-09T14:13:59.919-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db45.coll45", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.928-0400 m31200| 2015-07-09T14:13:59.928-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.929-0400 m31200| 2015-07-09T14:13:59.929-0400 I SHARDING [migrateThread] Deleter starting delete for: db45.coll45 from { _id: 0 } -> { _id: MaxKey }, with opId: 73661 [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.929-0400 m31200| 2015-07-09T14:13:59.929-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db45.coll45 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.937-0400 m31201| 2015-07-09T14:13:59.936-0400 I INDEX [repl writer worker 3] build index on: db45.coll45 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db45.coll45" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.938-0400 m31201| 2015-07-09T14:13:59.937-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.940-0400 m31202| 2015-07-09T14:13:59.939-0400 I INDEX [repl writer worker 9] build index on: db45.coll45 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db45.coll45" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.940-0400 m31202| 2015-07-09T14:13:59.939-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.941-0400 m31201| 2015-07-09T14:13:59.940-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.943-0400 m31200| 2015-07-09T14:13:59.942-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.943-0400 m31200| 2015-07-09T14:13:59.942-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db45.coll45' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.946-0400 m31202| 2015-07-09T14:13:59.946-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.953-0400 m31100| 2015-07-09T14:13:59.953-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db45.coll45", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.954-0400 m31100| 2015-07-09T14:13:59.953-0400 I SHARDING [conn34] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.954-0400 m31100| 2015-07-09T14:13:59.954-0400 I SHARDING [conn34] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.954-0400 m31100| 2015-07-09T14:13:59.954-0400 I SHARDING [conn34] moveChunk setting version to: 2|0||559eb9e7ca4787b9985d1d51 [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.966-0400 m31200| 2015-07-09T14:13:59.965-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db45.coll45' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:13:59.966-0400 m31200| 2015-07-09T14:13:59.965-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:13:59.965-0400-559eb9e7d5a107a5b9c0db3d", server: "bs-osx108-8", clientAddr: "", time: new Date(1436465639965), what: "moveChunk.to", ns: "db45.coll45", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 5: 47, step 2 of 5: 12, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 22, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.020-0400 m31100| 2015-07-09T14:14:00.019-0400 I SHARDING [conn34] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db45.coll45", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.020-0400 m31100| 2015-07-09T14:14:00.019-0400 I SHARDING [conn34] moveChunk updating self version to: 2|1||559eb9e7ca4787b9985d1d51 through { _id: MinKey } -> { _id: 0 } for collection 'db45.coll45' [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.022-0400 m31100| 2015-07-09T14:14:00.021-0400 I SHARDING [conn34] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:00.021-0400-559eb9e8792e00bb672749ea", server: "bs-osx108-8", clientAddr: "127.0.0.1:62636", time: new Date(1436465640021), what: "moveChunk.commit", ns: "db45.coll45", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.075-0400 m31100| 2015-07-09T14:14:00.075-0400 I SHARDING [conn34] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.075-0400 m31100| 2015-07-09T14:14:00.075-0400 I SHARDING [conn34] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.076-0400 m31100| 2015-07-09T14:14:00.075-0400 I SHARDING [conn34] Deleter starting delete for: db45.coll45 from { _id: 0 } -> { _id: MaxKey }, with opId: 81744 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.076-0400 m31100| 2015-07-09T14:14:00.075-0400 I SHARDING [conn34] rangeDeleter deleted 0 documents for db45.coll45 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.076-0400 m31100| 2015-07-09T14:14:00.075-0400 I SHARDING [conn34] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.076-0400 m31100| 2015-07-09T14:14:00.076-0400 I SHARDING [conn34] distributed lock 'db45.coll45/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.077-0400 m31100| 2015-07-09T14:14:00.076-0400 I SHARDING [conn34] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:00.076-0400-559eb9e8792e00bb672749eb", server: "bs-osx108-8", clientAddr: "127.0.0.1:62636", time: new Date(1436465640076), what: "moveChunk.from", ns: "db45.coll45", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 6: 0, step 2 of 6: 59, step 3 of 6: 4, step 4 of 6: 71, step 5 of 6: 122, step 6 of 6: 0, to: "test-rs1", from: "test-rs0", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.131-0400 m31100| 2015-07-09T14:14:00.130-0400 I COMMAND [conn34] command db45.coll45 command: moveChunk { moveChunk: "db45.coll45", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb9e7ca4787b9985d1d51') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 313ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.133-0400 m30999| 2015-07-09T14:14:00.132-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db45.coll45: 0ms sequenceNumber: 197 version: 2|1||559eb9e7ca4787b9985d1d51 based on: 1|1||559eb9e7ca4787b9985d1d51 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.135-0400 m31100| 2015-07-09T14:14:00.134-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db45.coll45", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb9e7ca4787b9985d1d51') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.139-0400 m31100| 2015-07-09T14:14:00.138-0400 I SHARDING [conn34] distributed lock 'db45.coll45/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb9e8792e00bb672749ec [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.139-0400 m31100| 2015-07-09T14:14:00.139-0400 I SHARDING [conn34] remotely refreshing metadata for db45.coll45 based on current shard version 2|0||559eb9e7ca4787b9985d1d51, current metadata version is 2|0||559eb9e7ca4787b9985d1d51 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.141-0400 m31100| 2015-07-09T14:14:00.140-0400 I SHARDING [conn34] updating metadata for db45.coll45 from shard version 2|0||559eb9e7ca4787b9985d1d51 to shard version 2|1||559eb9e7ca4787b9985d1d51 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.141-0400 m31100| 2015-07-09T14:14:00.140-0400 I SHARDING [conn34] collection version was loaded at version 2|1||559eb9e7ca4787b9985d1d51, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.141-0400 m31100| 2015-07-09T14:14:00.140-0400 I SHARDING [conn34] splitChunk accepted at version 2|1||559eb9e7ca4787b9985d1d51 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.142-0400 m31100| 2015-07-09T14:14:00.142-0400 I SHARDING [conn34] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:00.142-0400-559eb9e8792e00bb672749ed", server: "bs-osx108-8", clientAddr: "127.0.0.1:62636", time: new Date(1436465640142), what: "split", ns: "db45.coll45", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559eb9e7ca4787b9985d1d51') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559eb9e7ca4787b9985d1d51') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.198-0400 m31100| 2015-07-09T14:14:00.198-0400 I SHARDING [conn34] distributed lock 'db45.coll45/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.201-0400 m30999| 2015-07-09T14:14:00.201-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db45.coll45: 1ms sequenceNumber: 198 version: 2|3||559eb9e7ca4787b9985d1d51 based on: 2|1||559eb9e7ca4787b9985d1d51 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.202-0400 m31200| 2015-07-09T14:14:00.201-0400 I SHARDING [conn84] received splitChunk request: { splitChunk: "db45.coll45", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb9e7ca4787b9985d1d51') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.206-0400 m31200| 2015-07-09T14:14:00.206-0400 I SHARDING [conn84] distributed lock 'db45.coll45/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb9e8d5a107a5b9c0db3e [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.206-0400 m31200| 2015-07-09T14:14:00.206-0400 I SHARDING [conn84] remotely refreshing metadata for db45.coll45 based on current shard version 0|0||559eb9e7ca4787b9985d1d51, current metadata version is 1|1||559eb9e7ca4787b9985d1d51 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.208-0400 m31200| 2015-07-09T14:14:00.207-0400 I SHARDING [conn84] updating metadata for db45.coll45 from shard version 0|0||559eb9e7ca4787b9985d1d51 to shard version 2|0||559eb9e7ca4787b9985d1d51 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.208-0400 m31200| 2015-07-09T14:14:00.208-0400 I SHARDING [conn84] collection version was loaded at version 2|3||559eb9e7ca4787b9985d1d51, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.208-0400 m31200| 2015-07-09T14:14:00.208-0400 I SHARDING [conn84] splitChunk accepted at version 2|0||559eb9e7ca4787b9985d1d51 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.210-0400 m31200| 2015-07-09T14:14:00.209-0400 I SHARDING [conn84] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:00.209-0400-559eb9e8d5a107a5b9c0db3f", server: "bs-osx108-8", clientAddr: "127.0.0.1:63007", time: new Date(1436465640209), what: "split", ns: "db45.coll45", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559eb9e7ca4787b9985d1d51') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559eb9e7ca4787b9985d1d51') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.265-0400 m31200| 2015-07-09T14:14:00.264-0400 I SHARDING [conn84] distributed lock 'db45.coll45/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.267-0400 m30999| 2015-07-09T14:14:00.266-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db45.coll45: 0ms sequenceNumber: 199 version: 2|5||559eb9e7ca4787b9985d1d51 based on: 2|3||559eb9e7ca4787b9985d1d51 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.274-0400 m31100| 2015-07-09T14:14:00.274-0400 I INDEX [conn47] build index on: db45.coll45 properties: { v: 1, key: { indexed_insert_2d: "2d" }, name: "indexed_insert_2d_2d", ns: "db45.coll45" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.275-0400 m31100| 2015-07-09T14:14:00.274-0400 I INDEX [conn47] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.275-0400 m31200| 2015-07-09T14:14:00.274-0400 I INDEX [conn83] build index on: db45.coll45 properties: { v: 1, key: { indexed_insert_2d: "2d" }, name: "indexed_insert_2d_2d", ns: "db45.coll45" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.276-0400 m31200| 2015-07-09T14:14:00.274-0400 I INDEX [conn83] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.281-0400 m31100| 2015-07-09T14:14:00.281-0400 I INDEX [conn47] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.284-0400 m31200| 2015-07-09T14:14:00.284-0400 I INDEX [conn83] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.285-0400 Using 20 threads (requested 20) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.369-0400 m31102| 2015-07-09T14:14:00.368-0400 I INDEX [repl writer worker 11] build index on: db45.coll45 properties: { v: 1, key: { indexed_insert_2d: "2d" }, name: "indexed_insert_2d_2d", ns: "db45.coll45" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.369-0400 m31102| 2015-07-09T14:14:00.368-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.378-0400 m31101| 2015-07-09T14:14:00.377-0400 I INDEX [repl writer worker 11] build index on: db45.coll45 properties: { v: 1, key: { indexed_insert_2d: "2d" }, name: "indexed_insert_2d_2d", ns: "db45.coll45" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.378-0400 m31101| 2015-07-09T14:14:00.377-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.387-0400 m31202| 2015-07-09T14:14:00.385-0400 I INDEX [repl writer worker 0] build index on: db45.coll45 properties: { v: 1, key: { indexed_insert_2d: "2d" }, name: "indexed_insert_2d_2d", ns: "db45.coll45" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.388-0400 m31202| 2015-07-09T14:14:00.385-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.388-0400 m31201| 2015-07-09T14:14:00.386-0400 I INDEX [repl writer worker 14] build index on: db45.coll45 properties: { v: 1, key: { indexed_insert_2d: "2d" }, name: "indexed_insert_2d_2d", ns: "db45.coll45" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.388-0400 m31201| 2015-07-09T14:14:00.386-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.420-0400 m31102| 2015-07-09T14:14:00.418-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.434-0400 m31202| 2015-07-09T14:14:00.433-0400 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.435-0400 m30999| 2015-07-09T14:14:00.434-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63593 #282 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.436-0400 m31101| 2015-07-09T14:14:00.435-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.480-0400 m30998| 2015-07-09T14:14:00.480-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63594 #282 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.490-0400 m30998| 2015-07-09T14:14:00.490-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63595 #283 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.501-0400 m31201| 2015-07-09T14:14:00.497-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.506-0400 m30998| 2015-07-09T14:14:00.505-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63596 #284 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.545-0400 m30999| 2015-07-09T14:14:00.543-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63597 #283 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.545-0400 m30999| 2015-07-09T14:14:00.544-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63598 #284 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.547-0400 m30998| 2015-07-09T14:14:00.547-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63599 #285 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.559-0400 m30998| 2015-07-09T14:14:00.558-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63600 #286 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.559-0400 m30999| 2015-07-09T14:14:00.558-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63601 #285 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.562-0400 m30999| 2015-07-09T14:14:00.562-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63603 #286 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.563-0400 m30999| 2015-07-09T14:14:00.563-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63604 #287 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.564-0400 m30998| 2015-07-09T14:14:00.564-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63602 #287 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.572-0400 m30999| 2015-07-09T14:14:00.567-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63605 #288 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.572-0400 m30998| 2015-07-09T14:14:00.567-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63606 #288 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.573-0400 m30998| 2015-07-09T14:14:00.573-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63609 #289 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.574-0400 m30999| 2015-07-09T14:14:00.573-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63607 #289 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.581-0400 m30999| 2015-07-09T14:14:00.580-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63608 #290 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.582-0400 m30999| 2015-07-09T14:14:00.582-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63611 #291 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.583-0400 m30998| 2015-07-09T14:14:00.583-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63610 #290 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.588-0400 m30998| 2015-07-09T14:14:00.583-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63612 #291 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.601-0400 setting random seed: 6830753767862 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.601-0400 setting random seed: 2333267633803 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.601-0400 setting random seed: 975942802615 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.601-0400 setting random seed: 8908643922768 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.601-0400 setting random seed: 2137298290617 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.602-0400 setting random seed: 645234393887 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.602-0400 setting random seed: 7197059313766 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.603-0400 setting random seed: 7110662735067 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.605-0400 setting random seed: 7242942657321 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.606-0400 setting random seed: 6149109434336 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.608-0400 setting random seed: 4478629929944 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.610-0400 m30998| 2015-07-09T14:14:00.608-0400 I SHARDING [conn284] ChunkManager: time to load chunks for db45.coll45: 0ms sequenceNumber: 57 version: 2|5||559eb9e7ca4787b9985d1d51 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.614-0400 setting random seed: 1572387139312 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.614-0400 setting random seed: 7578479689545 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.632-0400 setting random seed: 6306934324093 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.633-0400 setting random seed: 5229139588773 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.649-0400 setting random seed: 1508201509714 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.653-0400 setting random seed: 6161529761739 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.653-0400 setting random seed: 4454463110305 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.654-0400 setting random seed: 8342785565182 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:00.654-0400 setting random seed: 2670691194944 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.151-0400 m30998| 2015-07-09T14:14:01.150-0400 I NETWORK [conn284] end connection 127.0.0.1:63596 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.335-0400 m30999| 2015-07-09T14:14:01.161-0400 I NETWORK [conn282] end connection 127.0.0.1:63593 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.335-0400 m30998| 2015-07-09T14:14:01.191-0400 I NETWORK [conn286] end connection 127.0.0.1:63600 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.335-0400 m30998| 2015-07-09T14:14:01.239-0400 I NETWORK [conn283] end connection 127.0.0.1:63595 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.336-0400 m30999| 2015-07-09T14:14:01.255-0400 I NETWORK [conn284] end connection 127.0.0.1:63598 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.336-0400 m30998| 2015-07-09T14:14:01.272-0400 I NETWORK [conn282] end connection 127.0.0.1:63594 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.336-0400 m30999| 2015-07-09T14:14:01.290-0400 I NETWORK [conn286] end connection 127.0.0.1:63603 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.336-0400 m30998| 2015-07-09T14:14:01.303-0400 I NETWORK [conn287] end connection 127.0.0.1:63602 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.336-0400 m30999| 2015-07-09T14:14:01.310-0400 I NETWORK [conn283] end connection 127.0.0.1:63597 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.338-0400 m30999| 2015-07-09T14:14:01.338-0400 I NETWORK [conn289] end connection 127.0.0.1:63607 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.350-0400 m30999| 2015-07-09T14:14:01.349-0400 I NETWORK [conn285] end connection 127.0.0.1:63601 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.369-0400 m30999| 2015-07-09T14:14:01.368-0400 I NETWORK [conn287] end connection 127.0.0.1:63604 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.375-0400 m30998| 2015-07-09T14:14:01.375-0400 I NETWORK [conn289] end connection 127.0.0.1:63609 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.379-0400 m30998| 2015-07-09T14:14:01.379-0400 I NETWORK [conn285] end connection 127.0.0.1:63599 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.390-0400 m30998| 2015-07-09T14:14:01.390-0400 I NETWORK [conn288] end connection 127.0.0.1:63606 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.413-0400 m30999| 2015-07-09T14:14:01.413-0400 I NETWORK [conn290] end connection 127.0.0.1:63608 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.456-0400 m30998| 2015-07-09T14:14:01.456-0400 I NETWORK [conn290] end connection 127.0.0.1:63610 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.464-0400 m30999| 2015-07-09T14:14:01.464-0400 I NETWORK [conn291] end connection 127.0.0.1:63611 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.483-0400 m30998| 2015-07-09T14:14:01.482-0400 I NETWORK [conn291] end connection 127.0.0.1:63612 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.488-0400 m30999| 2015-07-09T14:14:01.488-0400 I NETWORK [conn288] end connection 127.0.0.1:63605 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.508-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.508-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.508-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.508-0400 jstests/concurrency/fsm_workloads/indexed_insert_2d.js: Workload completed in 1223 ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.508-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.508-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.509-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.509-0400 m30999| 2015-07-09T14:14:01.508-0400 I COMMAND [conn1] DROP: db45.coll45 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.509-0400 m30999| 2015-07-09T14:14:01.509-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:01.508-0400-559eb9e9ca4787b9985d1d53", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465641508), what: "dropCollection.start", ns: "db45.coll45", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.566-0400 m30999| 2015-07-09T14:14:01.565-0400 I SHARDING [conn1] distributed lock 'db45.coll45/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb9e9ca4787b9985d1d54 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.567-0400 m31100| 2015-07-09T14:14:01.566-0400 I COMMAND [conn34] CMD: drop db45.coll45 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.570-0400 m31200| 2015-07-09T14:14:01.569-0400 I COMMAND [conn84] CMD: drop db45.coll45 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.571-0400 m31102| 2015-07-09T14:14:01.571-0400 I COMMAND [repl writer worker 11] CMD: drop db45.coll45 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.572-0400 m31101| 2015-07-09T14:14:01.571-0400 I COMMAND [repl writer worker 1] CMD: drop db45.coll45 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.574-0400 m31201| 2015-07-09T14:14:01.574-0400 I COMMAND [repl writer worker 3] CMD: drop db45.coll45 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.574-0400 m31202| 2015-07-09T14:14:01.574-0400 I COMMAND [repl writer worker 12] CMD: drop db45.coll45 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.627-0400 m31100| 2015-07-09T14:14:01.626-0400 I SHARDING [conn34] remotely refreshing metadata for db45.coll45 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559eb9e7ca4787b9985d1d51, current metadata version is 2|3||559eb9e7ca4787b9985d1d51 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.628-0400 m31100| 2015-07-09T14:14:01.627-0400 W SHARDING [conn34] no chunks found when reloading db45.coll45, previous version was 0|0||559eb9e7ca4787b9985d1d51, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.628-0400 m31100| 2015-07-09T14:14:01.628-0400 I SHARDING [conn34] dropping metadata for db45.coll45 at shard version 2|3||559eb9e7ca4787b9985d1d51, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.630-0400 m31200| 2015-07-09T14:14:01.629-0400 I SHARDING [conn84] remotely refreshing metadata for db45.coll45 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559eb9e7ca4787b9985d1d51, current metadata version is 2|5||559eb9e7ca4787b9985d1d51 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.631-0400 m31200| 2015-07-09T14:14:01.631-0400 W SHARDING [conn84] no chunks found when reloading db45.coll45, previous version was 0|0||559eb9e7ca4787b9985d1d51, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.632-0400 m31200| 2015-07-09T14:14:01.631-0400 I SHARDING [conn84] dropping metadata for db45.coll45 at shard version 2|5||559eb9e7ca4787b9985d1d51, took 2ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.633-0400 m30999| 2015-07-09T14:14:01.632-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:01.632-0400-559eb9e9ca4787b9985d1d55", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465641632), what: "dropCollection", ns: "db45.coll45", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.687-0400 m30999| 2015-07-09T14:14:01.687-0400 I SHARDING [conn1] distributed lock 'db45.coll45/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.744-0400 m30999| 2015-07-09T14:14:01.743-0400 I COMMAND [conn1] DROP DATABASE: db45 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.744-0400 m30999| 2015-07-09T14:14:01.743-0400 I SHARDING [conn1] DBConfig::dropDatabase: db45 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.744-0400 m30999| 2015-07-09T14:14:01.743-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:01.743-0400-559eb9e9ca4787b9985d1d56", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465641743), what: "dropDatabase.start", ns: "db45", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.851-0400 m30999| 2015-07-09T14:14:01.850-0400 I SHARDING [conn1] DBConfig::dropDatabase: db45 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.852-0400 m31100| 2015-07-09T14:14:01.851-0400 I COMMAND [conn28] dropDatabase db45 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.852-0400 m31100| 2015-07-09T14:14:01.851-0400 I COMMAND [conn28] dropDatabase db45 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.853-0400 m30999| 2015-07-09T14:14:01.852-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:01.852-0400-559eb9e9ca4787b9985d1d57", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465641852), what: "dropDatabase", ns: "db45", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.853-0400 m31102| 2015-07-09T14:14:01.852-0400 I COMMAND [repl writer worker 9] dropDatabase db45 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.853-0400 m31102| 2015-07-09T14:14:01.853-0400 I COMMAND [repl writer worker 9] dropDatabase db45 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.853-0400 m31101| 2015-07-09T14:14:01.853-0400 I COMMAND [repl writer worker 8] dropDatabase db45 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.853-0400 m31101| 2015-07-09T14:14:01.853-0400 I COMMAND [repl writer worker 8] dropDatabase db45 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.947-0400 m31100| 2015-07-09T14:14:01.946-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.951-0400 m31102| 2015-07-09T14:14:01.951-0400 I COMMAND [repl writer worker 12] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.951-0400 m31101| 2015-07-09T14:14:01.950-0400 I COMMAND [repl writer worker 13] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.988-0400 m31200| 2015-07-09T14:14:01.987-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.991-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.991-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.991-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.991-0400 jstests/concurrency/fsm_workloads/create_capped_collection.js [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.991-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.992-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.992-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.992-0400 m31202| 2015-07-09T14:14:01.991-0400 I COMMAND [repl writer worker 15] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:01.992-0400 m31201| 2015-07-09T14:14:01.991-0400 I COMMAND [repl writer worker 4] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.000-0400 m30999| 2015-07-09T14:14:02.000-0400 I SHARDING [conn1] distributed lock 'db46/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb9e9ca4787b9985d1d58 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.004-0400 m30999| 2015-07-09T14:14:02.004-0400 I SHARDING [conn1] Placing [db46] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.005-0400 m30999| 2015-07-09T14:14:02.004-0400 I SHARDING [conn1] Enabling sharding for database [db46] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.059-0400 m30999| 2015-07-09T14:14:02.059-0400 I SHARDING [conn1] distributed lock 'db46/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.085-0400 m31100| 2015-07-09T14:14:02.082-0400 I INDEX [conn70] build index on: db46.coll46 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db46.coll46" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.086-0400 m31100| 2015-07-09T14:14:02.082-0400 I INDEX [conn70] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.091-0400 m31100| 2015-07-09T14:14:02.091-0400 I INDEX [conn70] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.093-0400 m30999| 2015-07-09T14:14:02.092-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db46.coll46", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.096-0400 m30999| 2015-07-09T14:14:02.096-0400 I SHARDING [conn1] distributed lock 'db46.coll46/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb9eaca4787b9985d1d59 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.097-0400 m30999| 2015-07-09T14:14:02.097-0400 I SHARDING [conn1] enable sharding on: db46.coll46 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.098-0400 m30999| 2015-07-09T14:14:02.097-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:02.097-0400-559eb9eaca4787b9985d1d5a", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465642097), what: "shardCollection.start", ns: "db46.coll46", details: { shardKey: { _id: "hashed" }, collection: "db46.coll46", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.109-0400 m31102| 2015-07-09T14:14:02.109-0400 I INDEX [repl writer worker 8] build index on: db46.coll46 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db46.coll46" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.110-0400 m31102| 2015-07-09T14:14:02.109-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.118-0400 m31101| 2015-07-09T14:14:02.117-0400 I INDEX [repl writer worker 4] build index on: db46.coll46 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db46.coll46" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.118-0400 m31101| 2015-07-09T14:14:02.117-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.120-0400 m31102| 2015-07-09T14:14:02.119-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.127-0400 m31101| 2015-07-09T14:14:02.126-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.150-0400 m30999| 2015-07-09T14:14:02.149-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db46.coll46 using new epoch 559eb9eaca4787b9985d1d5b [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.258-0400 m30999| 2015-07-09T14:14:02.258-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db46.coll46: 0ms sequenceNumber: 200 version: 1|1||559eb9eaca4787b9985d1d5b based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.315-0400 m30999| 2015-07-09T14:14:02.314-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db46.coll46: 0ms sequenceNumber: 201 version: 1|1||559eb9eaca4787b9985d1d5b based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.317-0400 m31100| 2015-07-09T14:14:02.316-0400 I SHARDING [conn51] remotely refreshing metadata for db46.coll46 with requested shard version 1|1||559eb9eaca4787b9985d1d5b, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.319-0400 m31100| 2015-07-09T14:14:02.318-0400 I SHARDING [conn51] collection db46.coll46 was previously unsharded, new metadata loaded with shard version 1|1||559eb9eaca4787b9985d1d5b [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.319-0400 m31100| 2015-07-09T14:14:02.318-0400 I SHARDING [conn51] collection version was loaded at version 1|1||559eb9eaca4787b9985d1d5b, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.320-0400 m30999| 2015-07-09T14:14:02.319-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:02.319-0400-559eb9eaca4787b9985d1d5c", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465642319), what: "shardCollection", ns: "db46.coll46", details: { version: "1|1||559eb9eaca4787b9985d1d5b" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.373-0400 m30999| 2015-07-09T14:14:02.373-0400 I SHARDING [conn1] distributed lock 'db46.coll46/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.374-0400 m30999| 2015-07-09T14:14:02.374-0400 I SHARDING [conn1] moving chunk ns: db46.coll46 moving ( ns: db46.coll46, shard: test-rs0, lastmod: 1|1||000000000000000000000000, min: { _id: 0 }, max: { _id: MaxKey }) test-rs0 -> test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.375-0400 m31100| 2015-07-09T14:14:02.374-0400 I SHARDING [conn34] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.376-0400 m31100| 2015-07-09T14:14:02.375-0400 I SHARDING [conn34] received moveChunk request: { moveChunk: "db46.coll46", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb9eaca4787b9985d1d5b') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.380-0400 m31100| 2015-07-09T14:14:02.379-0400 I SHARDING [conn34] distributed lock 'db46.coll46/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb9ea792e00bb672749ef [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.380-0400 m31100| 2015-07-09T14:14:02.380-0400 I SHARDING [conn34] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:02.380-0400-559eb9ea792e00bb672749f0", server: "bs-osx108-8", clientAddr: "127.0.0.1:62636", time: new Date(1436465642380), what: "moveChunk.start", ns: "db46.coll46", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.433-0400 m31100| 2015-07-09T14:14:02.432-0400 I SHARDING [conn34] remotely refreshing metadata for db46.coll46 based on current shard version 1|1||559eb9eaca4787b9985d1d5b, current metadata version is 1|1||559eb9eaca4787b9985d1d5b [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.435-0400 m31100| 2015-07-09T14:14:02.435-0400 I SHARDING [conn34] metadata of collection db46.coll46 already up to date (shard version : 1|1||559eb9eaca4787b9985d1d5b, took 2ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.435-0400 m31100| 2015-07-09T14:14:02.435-0400 I SHARDING [conn34] moveChunk request accepted at version 1|1||559eb9eaca4787b9985d1d5b [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.436-0400 m31100| 2015-07-09T14:14:02.435-0400 I SHARDING [conn34] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.436-0400 m31200| 2015-07-09T14:14:02.436-0400 I SHARDING [conn16] remotely refreshing metadata for db46.coll46, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.438-0400 m31200| 2015-07-09T14:14:02.438-0400 I SHARDING [conn16] collection db46.coll46 was previously unsharded, new metadata loaded with shard version 0|0||559eb9eaca4787b9985d1d5b [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.438-0400 m31200| 2015-07-09T14:14:02.438-0400 I SHARDING [conn16] collection version was loaded at version 1|1||559eb9eaca4787b9985d1d5b, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.439-0400 m31200| 2015-07-09T14:14:02.438-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: 0 } -> { _id: MaxKey } for collection db46.coll46 from test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 at epoch 559eb9eaca4787b9985d1d5b [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.441-0400 m31100| 2015-07-09T14:14:02.441-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db46.coll46", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.446-0400 m31100| 2015-07-09T14:14:02.445-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db46.coll46", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.451-0400 m31100| 2015-07-09T14:14:02.450-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db46.coll46", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.459-0400 m31200| 2015-07-09T14:14:02.458-0400 I INDEX [migrateThread] build index on: db46.coll46 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db46.coll46" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.460-0400 m31200| 2015-07-09T14:14:02.459-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.461-0400 m31100| 2015-07-09T14:14:02.460-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db46.coll46", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.470-0400 m31200| 2015-07-09T14:14:02.469-0400 I INDEX [migrateThread] build index on: db46.coll46 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db46.coll46" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.470-0400 m31200| 2015-07-09T14:14:02.469-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.478-0400 m31100| 2015-07-09T14:14:02.477-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db46.coll46", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.486-0400 m31200| 2015-07-09T14:14:02.486-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.487-0400 m31200| 2015-07-09T14:14:02.487-0400 I SHARDING [migrateThread] Deleter starting delete for: db46.coll46 from { _id: 0 } -> { _id: MaxKey }, with opId: 75255 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.489-0400 m31200| 2015-07-09T14:14:02.488-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db46.coll46 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.495-0400 m31201| 2015-07-09T14:14:02.495-0400 I INDEX [repl writer worker 14] build index on: db46.coll46 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db46.coll46" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.496-0400 m31201| 2015-07-09T14:14:02.495-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.496-0400 m31202| 2015-07-09T14:14:02.495-0400 I INDEX [repl writer worker 8] build index on: db46.coll46 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db46.coll46" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.496-0400 m31202| 2015-07-09T14:14:02.495-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.500-0400 m31201| 2015-07-09T14:14:02.499-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.503-0400 m31200| 2015-07-09T14:14:02.502-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.503-0400 m31200| 2015-07-09T14:14:02.502-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db46.coll46' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.503-0400 m31202| 2015-07-09T14:14:02.503-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.512-0400 m31100| 2015-07-09T14:14:02.511-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db46.coll46", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.512-0400 m31100| 2015-07-09T14:14:02.511-0400 I SHARDING [conn34] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.512-0400 m31100| 2015-07-09T14:14:02.512-0400 I SHARDING [conn34] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.513-0400 m31100| 2015-07-09T14:14:02.512-0400 I SHARDING [conn34] moveChunk setting version to: 2|0||559eb9eaca4787b9985d1d5b [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.515-0400 m31200| 2015-07-09T14:14:02.515-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db46.coll46' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.516-0400 m31200| 2015-07-09T14:14:02.515-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:02.515-0400-559eb9ead5a107a5b9c0db40", server: "bs-osx108-8", clientAddr: "", time: new Date(1436465642515), what: "moveChunk.to", ns: "db46.coll46", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 5: 48, step 2 of 5: 14, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 12, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.569-0400 m31100| 2015-07-09T14:14:02.568-0400 I SHARDING [conn34] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db46.coll46", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.569-0400 m31100| 2015-07-09T14:14:02.568-0400 I SHARDING [conn34] moveChunk updating self version to: 2|1||559eb9eaca4787b9985d1d5b through { _id: MinKey } -> { _id: 0 } for collection 'db46.coll46' [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.570-0400 m31100| 2015-07-09T14:14:02.570-0400 I SHARDING [conn34] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:02.570-0400-559eb9ea792e00bb672749f1", server: "bs-osx108-8", clientAddr: "127.0.0.1:62636", time: new Date(1436465642570), what: "moveChunk.commit", ns: "db46.coll46", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.624-0400 m31100| 2015-07-09T14:14:02.623-0400 I SHARDING [conn34] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.624-0400 m31100| 2015-07-09T14:14:02.624-0400 I SHARDING [conn34] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.624-0400 m31100| 2015-07-09T14:14:02.624-0400 I SHARDING [conn34] Deleter starting delete for: db46.coll46 from { _id: 0 } -> { _id: MaxKey }, with opId: 83362 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.624-0400 m31100| 2015-07-09T14:14:02.624-0400 I SHARDING [conn34] rangeDeleter deleted 0 documents for db46.coll46 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.624-0400 m31100| 2015-07-09T14:14:02.624-0400 I SHARDING [conn34] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.625-0400 m31100| 2015-07-09T14:14:02.625-0400 I SHARDING [conn34] distributed lock 'db46.coll46/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.626-0400 m31100| 2015-07-09T14:14:02.625-0400 I SHARDING [conn34] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:02.625-0400-559eb9ea792e00bb672749f2", server: "bs-osx108-8", clientAddr: "127.0.0.1:62636", time: new Date(1436465642625), what: "moveChunk.from", ns: "db46.coll46", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 6: 0, step 2 of 6: 59, step 3 of 6: 3, step 4 of 6: 72, step 5 of 6: 112, step 6 of 6: 0, to: "test-rs1", from: "test-rs0", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.679-0400 m31100| 2015-07-09T14:14:02.678-0400 I COMMAND [conn34] command db46.coll46 command: moveChunk { moveChunk: "db46.coll46", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb9eaca4787b9985d1d5b') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 303ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.680-0400 m30999| 2015-07-09T14:14:02.680-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db46.coll46: 0ms sequenceNumber: 202 version: 2|1||559eb9eaca4787b9985d1d5b based on: 1|1||559eb9eaca4787b9985d1d5b [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.681-0400 m31100| 2015-07-09T14:14:02.681-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db46.coll46", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb9eaca4787b9985d1d5b') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.684-0400 m31100| 2015-07-09T14:14:02.684-0400 I SHARDING [conn34] distributed lock 'db46.coll46/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb9ea792e00bb672749f3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.684-0400 m31100| 2015-07-09T14:14:02.684-0400 I SHARDING [conn34] remotely refreshing metadata for db46.coll46 based on current shard version 2|0||559eb9eaca4787b9985d1d5b, current metadata version is 2|0||559eb9eaca4787b9985d1d5b [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.686-0400 m31100| 2015-07-09T14:14:02.685-0400 I SHARDING [conn34] updating metadata for db46.coll46 from shard version 2|0||559eb9eaca4787b9985d1d5b to shard version 2|1||559eb9eaca4787b9985d1d5b [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.686-0400 m31100| 2015-07-09T14:14:02.685-0400 I SHARDING [conn34] collection version was loaded at version 2|1||559eb9eaca4787b9985d1d5b, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.686-0400 m31100| 2015-07-09T14:14:02.686-0400 I SHARDING [conn34] splitChunk accepted at version 2|1||559eb9eaca4787b9985d1d5b [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.687-0400 m31100| 2015-07-09T14:14:02.687-0400 I SHARDING [conn34] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:02.687-0400-559eb9ea792e00bb672749f4", server: "bs-osx108-8", clientAddr: "127.0.0.1:62636", time: new Date(1436465642687), what: "split", ns: "db46.coll46", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559eb9eaca4787b9985d1d5b') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559eb9eaca4787b9985d1d5b') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.742-0400 m31100| 2015-07-09T14:14:02.742-0400 I SHARDING [conn34] distributed lock 'db46.coll46/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.744-0400 m30999| 2015-07-09T14:14:02.744-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db46.coll46: 0ms sequenceNumber: 203 version: 2|3||559eb9eaca4787b9985d1d5b based on: 2|1||559eb9eaca4787b9985d1d5b [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.745-0400 m31200| 2015-07-09T14:14:02.744-0400 I SHARDING [conn84] received splitChunk request: { splitChunk: "db46.coll46", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb9eaca4787b9985d1d5b') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.748-0400 m31200| 2015-07-09T14:14:02.747-0400 I SHARDING [conn84] distributed lock 'db46.coll46/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb9ead5a107a5b9c0db41 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.748-0400 m31200| 2015-07-09T14:14:02.747-0400 I SHARDING [conn84] remotely refreshing metadata for db46.coll46 based on current shard version 0|0||559eb9eaca4787b9985d1d5b, current metadata version is 1|1||559eb9eaca4787b9985d1d5b [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.749-0400 m31200| 2015-07-09T14:14:02.749-0400 I SHARDING [conn84] updating metadata for db46.coll46 from shard version 0|0||559eb9eaca4787b9985d1d5b to shard version 2|0||559eb9eaca4787b9985d1d5b [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.749-0400 m31200| 2015-07-09T14:14:02.749-0400 I SHARDING [conn84] collection version was loaded at version 2|3||559eb9eaca4787b9985d1d5b, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.749-0400 m31200| 2015-07-09T14:14:02.749-0400 I SHARDING [conn84] splitChunk accepted at version 2|0||559eb9eaca4787b9985d1d5b [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.751-0400 m31200| 2015-07-09T14:14:02.750-0400 I SHARDING [conn84] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:02.750-0400-559eb9ead5a107a5b9c0db42", server: "bs-osx108-8", clientAddr: "127.0.0.1:63007", time: new Date(1436465642750), what: "split", ns: "db46.coll46", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559eb9eaca4787b9985d1d5b') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559eb9eaca4787b9985d1d5b') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.805-0400 m31200| 2015-07-09T14:14:02.804-0400 I SHARDING [conn84] distributed lock 'db46.coll46/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.806-0400 m30999| 2015-07-09T14:14:02.806-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db46.coll46: 0ms sequenceNumber: 204 version: 2|5||559eb9eaca4787b9985d1d5b based on: 2|3||559eb9eaca4787b9985d1d5b [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.808-0400 Using 5 threads (requested 5) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.863-0400 m30998| 2015-07-09T14:14:02.863-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63615 #292 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.863-0400 m30999| 2015-07-09T14:14:02.863-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63616 #292 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.865-0400 m30999| 2015-07-09T14:14:02.864-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63617 #293 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.865-0400 m30999| 2015-07-09T14:14:02.865-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63618 #294 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.870-0400 m30998| 2015-07-09T14:14:02.870-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63619 #293 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.876-0400 setting random seed: 9401270733214 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.876-0400 setting random seed: 2517917985096 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.876-0400 setting random seed: 8647595220245 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.877-0400 setting random seed: 8082311013713 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.877-0400 setting random seed: 7157286717556 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:02.879-0400 m30998| 2015-07-09T14:14:02.878-0400 I SHARDING [conn293] ChunkManager: time to load chunks for db46.coll46: 0ms sequenceNumber: 58 version: 2|5||559eb9eaca4787b9985d1d5b based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:04.567-0400 m30999| 2015-07-09T14:14:04.567-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:14:04.566-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30999:1436464534:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:05.977-0400 m30998| 2015-07-09T14:14:05.976-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:14:05.975-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30998:1436464535:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:06.891-0400 m31100| 2015-07-09T14:14:06.891-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:14:06.890-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31100:1436464536:197041335', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:07.300-0400 m31200| 2015-07-09T14:14:07.300-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:14:07.299-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31200:1436464537:809424560', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.016-0400 m30998| 2015-07-09T14:14:17.016-0400 I NETWORK [conn293] end connection 127.0.0.1:63619 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.068-0400 m30999| 2015-07-09T14:14:17.068-0400 I NETWORK [conn293] end connection 127.0.0.1:63617 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.071-0400 m30999| 2015-07-09T14:14:17.070-0400 I NETWORK [conn292] end connection 127.0.0.1:63616 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.115-0400 m30998| 2015-07-09T14:14:17.114-0400 I NETWORK [conn292] end connection 127.0.0.1:63615 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.143-0400 m30999| 2015-07-09T14:14:17.142-0400 I NETWORK [conn294] end connection 127.0.0.1:63618 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.182-0400 m30999| 2015-07-09T14:14:17.181-0400 I COMMAND [conn1] DROP: db46.create_capped_collection0_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.182-0400 m30999| 2015-07-09T14:14:17.181-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.182-0400 m31100| 2015-07-09T14:14:17.182-0400 I COMMAND [conn57] CMD: drop db46.create_capped_collection0_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.186-0400 m30999| 2015-07-09T14:14:17.186-0400 I COMMAND [conn1] DROP: db46.create_capped_collection0_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.186-0400 m30999| 2015-07-09T14:14:17.186-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.186-0400 m31100| 2015-07-09T14:14:17.186-0400 I COMMAND [conn57] CMD: drop db46.create_capped_collection0_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.187-0400 m31102| 2015-07-09T14:14:17.186-0400 I COMMAND [repl writer worker 13] CMD: drop db46.create_capped_collection0_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.187-0400 m30999| 2015-07-09T14:14:17.187-0400 I COMMAND [conn1] DROP: db46.create_capped_collection0_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.187-0400 m30999| 2015-07-09T14:14:17.187-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.187-0400 m31100| 2015-07-09T14:14:17.187-0400 I COMMAND [conn57] CMD: drop db46.create_capped_collection0_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.188-0400 m31101| 2015-07-09T14:14:17.187-0400 I COMMAND [repl writer worker 1] CMD: drop db46.create_capped_collection0_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.192-0400 m30999| 2015-07-09T14:14:17.192-0400 I COMMAND [conn1] DROP: db46.create_capped_collection0_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.193-0400 m30999| 2015-07-09T14:14:17.192-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.193-0400 m31100| 2015-07-09T14:14:17.192-0400 I COMMAND [conn57] CMD: drop db46.create_capped_collection0_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.193-0400 m31101| 2015-07-09T14:14:17.193-0400 I COMMAND [repl writer worker 12] CMD: drop db46.create_capped_collection0_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.194-0400 m31102| 2015-07-09T14:14:17.194-0400 I COMMAND [repl writer worker 12] CMD: drop db46.create_capped_collection0_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.195-0400 m30999| 2015-07-09T14:14:17.195-0400 I COMMAND [conn1] DROP: db46.create_capped_collection1_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.195-0400 m30999| 2015-07-09T14:14:17.195-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.195-0400 m31100| 2015-07-09T14:14:17.195-0400 I COMMAND [conn57] CMD: drop db46.create_capped_collection1_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.196-0400 m31101| 2015-07-09T14:14:17.195-0400 I COMMAND [repl writer worker 2] CMD: drop db46.create_capped_collection0_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.197-0400 m31102| 2015-07-09T14:14:17.196-0400 I COMMAND [repl writer worker 6] CMD: drop db46.create_capped_collection0_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.197-0400 m31101| 2015-07-09T14:14:17.197-0400 I COMMAND [repl writer worker 10] CMD: drop db46.create_capped_collection0_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.197-0400 m30999| 2015-07-09T14:14:17.197-0400 I COMMAND [conn1] DROP: db46.create_capped_collection1_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.197-0400 m30999| 2015-07-09T14:14:17.197-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.197-0400 m31100| 2015-07-09T14:14:17.197-0400 I COMMAND [conn57] CMD: drop db46.create_capped_collection1_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.198-0400 m31102| 2015-07-09T14:14:17.198-0400 I COMMAND [repl writer worker 15] CMD: drop db46.create_capped_collection0_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.199-0400 m31101| 2015-07-09T14:14:17.199-0400 I COMMAND [repl writer worker 6] CMD: drop db46.create_capped_collection1_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.200-0400 m30999| 2015-07-09T14:14:17.200-0400 I COMMAND [conn1] DROP: db46.create_capped_collection1_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.200-0400 m30999| 2015-07-09T14:14:17.200-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.200-0400 m31100| 2015-07-09T14:14:17.200-0400 I COMMAND [conn57] CMD: drop db46.create_capped_collection1_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.201-0400 m31102| 2015-07-09T14:14:17.200-0400 I COMMAND [repl writer worker 10] CMD: drop db46.create_capped_collection1_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.202-0400 m30999| 2015-07-09T14:14:17.202-0400 I COMMAND [conn1] DROP: db46.create_capped_collection1_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.203-0400 m30999| 2015-07-09T14:14:17.202-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.203-0400 m31102| 2015-07-09T14:14:17.202-0400 I COMMAND [repl writer worker 7] CMD: drop db46.create_capped_collection1_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.203-0400 m31100| 2015-07-09T14:14:17.202-0400 I COMMAND [conn57] CMD: drop db46.create_capped_collection1_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.204-0400 m31101| 2015-07-09T14:14:17.202-0400 I COMMAND [repl writer worker 15] CMD: drop db46.create_capped_collection1_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.204-0400 m31101| 2015-07-09T14:14:17.204-0400 I COMMAND [repl writer worker 9] CMD: drop db46.create_capped_collection1_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.204-0400 m30999| 2015-07-09T14:14:17.204-0400 I COMMAND [conn1] DROP: db46.create_capped_collection2_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.204-0400 m30999| 2015-07-09T14:14:17.204-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.205-0400 m31102| 2015-07-09T14:14:17.204-0400 I COMMAND [repl writer worker 4] CMD: drop db46.create_capped_collection1_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.205-0400 m31100| 2015-07-09T14:14:17.205-0400 I COMMAND [conn57] CMD: drop db46.create_capped_collection2_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.206-0400 m31101| 2015-07-09T14:14:17.206-0400 I COMMAND [repl writer worker 14] CMD: drop db46.create_capped_collection1_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.207-0400 m30999| 2015-07-09T14:14:17.206-0400 I COMMAND [conn1] DROP: db46.create_capped_collection2_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.207-0400 m30999| 2015-07-09T14:14:17.206-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.207-0400 m31100| 2015-07-09T14:14:17.206-0400 I COMMAND [conn57] CMD: drop db46.create_capped_collection2_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.207-0400 m31102| 2015-07-09T14:14:17.207-0400 I COMMAND [repl writer worker 1] CMD: drop db46.create_capped_collection1_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.208-0400 m31101| 2015-07-09T14:14:17.208-0400 I COMMAND [repl writer worker 13] CMD: drop db46.create_capped_collection2_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.209-0400 m30999| 2015-07-09T14:14:17.209-0400 I COMMAND [conn1] DROP: db46.create_capped_collection2_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.210-0400 m30999| 2015-07-09T14:14:17.209-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.210-0400 m31102| 2015-07-09T14:14:17.209-0400 I COMMAND [repl writer worker 11] CMD: drop db46.create_capped_collection2_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.210-0400 m31100| 2015-07-09T14:14:17.209-0400 I COMMAND [conn57] CMD: drop db46.create_capped_collection2_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.211-0400 m31101| 2015-07-09T14:14:17.210-0400 I COMMAND [repl writer worker 5] CMD: drop db46.create_capped_collection2_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.212-0400 m30999| 2015-07-09T14:14:17.211-0400 I COMMAND [conn1] DROP: db46.create_capped_collection2_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.212-0400 m30999| 2015-07-09T14:14:17.212-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.212-0400 m31100| 2015-07-09T14:14:17.212-0400 I COMMAND [conn57] CMD: drop db46.create_capped_collection2_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.212-0400 m31102| 2015-07-09T14:14:17.212-0400 I COMMAND [repl writer worker 3] CMD: drop db46.create_capped_collection2_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.214-0400 m31101| 2015-07-09T14:14:17.213-0400 I COMMAND [repl writer worker 7] CMD: drop db46.create_capped_collection2_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.214-0400 m30999| 2015-07-09T14:14:17.214-0400 I COMMAND [conn1] DROP: db46.create_capped_collection3_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.214-0400 m30999| 2015-07-09T14:14:17.214-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.215-0400 m31102| 2015-07-09T14:14:17.214-0400 I COMMAND [repl writer worker 8] CMD: drop db46.create_capped_collection2_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.215-0400 m31100| 2015-07-09T14:14:17.214-0400 I COMMAND [conn57] CMD: drop db46.create_capped_collection3_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.216-0400 m30999| 2015-07-09T14:14:17.216-0400 I COMMAND [conn1] DROP: db46.create_capped_collection3_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.216-0400 m30999| 2015-07-09T14:14:17.216-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.217-0400 m31100| 2015-07-09T14:14:17.216-0400 I COMMAND [conn57] CMD: drop db46.create_capped_collection3_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.217-0400 m31101| 2015-07-09T14:14:17.217-0400 I COMMAND [repl writer worker 11] CMD: drop db46.create_capped_collection2_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.217-0400 m31102| 2015-07-09T14:14:17.217-0400 I COMMAND [repl writer worker 9] CMD: drop db46.create_capped_collection2_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.218-0400 m30999| 2015-07-09T14:14:17.217-0400 I COMMAND [conn1] DROP: db46.create_capped_collection3_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.218-0400 m30999| 2015-07-09T14:14:17.218-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.218-0400 m31100| 2015-07-09T14:14:17.218-0400 I COMMAND [conn57] CMD: drop db46.create_capped_collection3_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.219-0400 m31102| 2015-07-09T14:14:17.219-0400 I COMMAND [repl writer worker 14] CMD: drop db46.create_capped_collection3_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.220-0400 m30999| 2015-07-09T14:14:17.220-0400 I COMMAND [conn1] DROP: db46.create_capped_collection3_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.221-0400 m30999| 2015-07-09T14:14:17.220-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.221-0400 m31101| 2015-07-09T14:14:17.220-0400 I COMMAND [repl writer worker 8] CMD: drop db46.create_capped_collection3_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.221-0400 m31100| 2015-07-09T14:14:17.220-0400 I COMMAND [conn57] CMD: drop db46.create_capped_collection3_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.222-0400 m31101| 2015-07-09T14:14:17.221-0400 I COMMAND [repl writer worker 3] CMD: drop db46.create_capped_collection3_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.222-0400 m31102| 2015-07-09T14:14:17.222-0400 I COMMAND [repl writer worker 2] CMD: drop db46.create_capped_collection3_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.224-0400 m30999| 2015-07-09T14:14:17.223-0400 I COMMAND [conn1] DROP: db46.create_capped_collection4_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.224-0400 m30999| 2015-07-09T14:14:17.223-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.224-0400 m31101| 2015-07-09T14:14:17.223-0400 I COMMAND [repl writer worker 4] CMD: drop db46.create_capped_collection3_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.224-0400 m31100| 2015-07-09T14:14:17.223-0400 I COMMAND [conn57] CMD: drop db46.create_capped_collection4_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.225-0400 m31102| 2015-07-09T14:14:17.224-0400 I COMMAND [repl writer worker 0] CMD: drop db46.create_capped_collection3_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.225-0400 m31101| 2015-07-09T14:14:17.225-0400 I COMMAND [repl writer worker 0] CMD: drop db46.create_capped_collection3_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.226-0400 m30999| 2015-07-09T14:14:17.225-0400 I COMMAND [conn1] DROP: db46.create_capped_collection4_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.226-0400 m30999| 2015-07-09T14:14:17.225-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.226-0400 m31102| 2015-07-09T14:14:17.225-0400 I COMMAND [repl writer worker 5] CMD: drop db46.create_capped_collection3_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.226-0400 m31100| 2015-07-09T14:14:17.225-0400 I COMMAND [conn57] CMD: drop db46.create_capped_collection4_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.227-0400 m31101| 2015-07-09T14:14:17.226-0400 I COMMAND [repl writer worker 1] CMD: drop db46.create_capped_collection4_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.227-0400 m30999| 2015-07-09T14:14:17.227-0400 I COMMAND [conn1] DROP: db46.create_capped_collection4_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.228-0400 m30999| 2015-07-09T14:14:17.227-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.228-0400 m31100| 2015-07-09T14:14:17.227-0400 I COMMAND [conn57] CMD: drop db46.create_capped_collection4_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.228-0400 m31102| 2015-07-09T14:14:17.228-0400 I COMMAND [repl writer worker 13] CMD: drop db46.create_capped_collection4_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.229-0400 m31101| 2015-07-09T14:14:17.228-0400 I COMMAND [repl writer worker 12] CMD: drop db46.create_capped_collection4_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.229-0400 m31102| 2015-07-09T14:14:17.229-0400 I COMMAND [repl writer worker 12] CMD: drop db46.create_capped_collection4_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.230-0400 m30999| 2015-07-09T14:14:17.229-0400 I COMMAND [conn1] DROP: db46.create_capped_collection4_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.230-0400 m30999| 2015-07-09T14:14:17.229-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.230-0400 m31100| 2015-07-09T14:14:17.229-0400 I COMMAND [conn57] CMD: drop db46.create_capped_collection4_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.231-0400 m31102| 2015-07-09T14:14:17.231-0400 I COMMAND [repl writer worker 6] CMD: drop db46.create_capped_collection4_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.231-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.231-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.231-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.232-0400 jstests/concurrency/fsm_workloads/create_capped_collection.js: Workload completed in 14372 ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.232-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.232-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.232-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.232-0400 m30999| 2015-07-09T14:14:17.231-0400 I COMMAND [conn1] DROP: db46.coll46 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.233-0400 m30999| 2015-07-09T14:14:17.231-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:17.231-0400-559eb9f9ca4787b9985d1d5d", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465657231), what: "dropCollection.start", ns: "db46.coll46", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.233-0400 m31101| 2015-07-09T14:14:17.231-0400 I COMMAND [repl writer worker 2] CMD: drop db46.create_capped_collection4_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.233-0400 m31101| 2015-07-09T14:14:17.233-0400 I COMMAND [repl writer worker 10] CMD: drop db46.create_capped_collection4_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.234-0400 m31102| 2015-07-09T14:14:17.234-0400 I COMMAND [repl writer worker 15] CMD: drop db46.create_capped_collection4_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.293-0400 m30999| 2015-07-09T14:14:17.292-0400 I SHARDING [conn1] distributed lock 'db46.coll46/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb9f9ca4787b9985d1d5e [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.294-0400 m31100| 2015-07-09T14:14:17.293-0400 I COMMAND [conn34] CMD: drop db46.coll46 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.298-0400 m31200| 2015-07-09T14:14:17.297-0400 I COMMAND [conn84] CMD: drop db46.coll46 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.299-0400 m31102| 2015-07-09T14:14:17.298-0400 I COMMAND [repl writer worker 10] CMD: drop db46.coll46 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.299-0400 m31101| 2015-07-09T14:14:17.299-0400 I COMMAND [repl writer worker 6] CMD: drop db46.coll46 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.302-0400 m31202| 2015-07-09T14:14:17.302-0400 I COMMAND [repl writer worker 3] CMD: drop db46.coll46 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.302-0400 m31201| 2015-07-09T14:14:17.302-0400 I COMMAND [repl writer worker 6] CMD: drop db46.coll46 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.355-0400 m31100| 2015-07-09T14:14:17.355-0400 I SHARDING [conn34] remotely refreshing metadata for db46.coll46 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559eb9eaca4787b9985d1d5b, current metadata version is 2|3||559eb9eaca4787b9985d1d5b [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.357-0400 m31100| 2015-07-09T14:14:17.356-0400 W SHARDING [conn34] no chunks found when reloading db46.coll46, previous version was 0|0||559eb9eaca4787b9985d1d5b, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.357-0400 m31100| 2015-07-09T14:14:17.357-0400 I SHARDING [conn34] dropping metadata for db46.coll46 at shard version 2|3||559eb9eaca4787b9985d1d5b, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.359-0400 m31200| 2015-07-09T14:14:17.358-0400 I SHARDING [conn84] remotely refreshing metadata for db46.coll46 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559eb9eaca4787b9985d1d5b, current metadata version is 2|5||559eb9eaca4787b9985d1d5b [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.360-0400 m31200| 2015-07-09T14:14:17.360-0400 W SHARDING [conn84] no chunks found when reloading db46.coll46, previous version was 0|0||559eb9eaca4787b9985d1d5b, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.360-0400 m31200| 2015-07-09T14:14:17.360-0400 I SHARDING [conn84] dropping metadata for db46.coll46 at shard version 2|5||559eb9eaca4787b9985d1d5b, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.361-0400 m30999| 2015-07-09T14:14:17.361-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:17.361-0400-559eb9f9ca4787b9985d1d5f", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465657361), what: "dropCollection", ns: "db46.coll46", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.416-0400 m30999| 2015-07-09T14:14:17.415-0400 I SHARDING [conn1] distributed lock 'db46.coll46/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.472-0400 m30999| 2015-07-09T14:14:17.471-0400 I COMMAND [conn1] DROP DATABASE: db46 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.472-0400 m30999| 2015-07-09T14:14:17.471-0400 I SHARDING [conn1] DBConfig::dropDatabase: db46 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.472-0400 m30999| 2015-07-09T14:14:17.471-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:17.471-0400-559eb9f9ca4787b9985d1d60", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465657471), what: "dropDatabase.start", ns: "db46", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.577-0400 m30999| 2015-07-09T14:14:17.577-0400 I SHARDING [conn1] DBConfig::dropDatabase: db46 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.578-0400 m31100| 2015-07-09T14:14:17.577-0400 I COMMAND [conn28] dropDatabase db46 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.578-0400 m31100| 2015-07-09T14:14:17.578-0400 I COMMAND [conn28] dropDatabase db46 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.579-0400 m30999| 2015-07-09T14:14:17.578-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:17.578-0400-559eb9f9ca4787b9985d1d61", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465657578), what: "dropDatabase", ns: "db46", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.579-0400 m31102| 2015-07-09T14:14:17.579-0400 I COMMAND [repl writer worker 7] dropDatabase db46 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.580-0400 m31102| 2015-07-09T14:14:17.579-0400 I COMMAND [repl writer worker 7] dropDatabase db46 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.580-0400 m31101| 2015-07-09T14:14:17.579-0400 I COMMAND [repl writer worker 15] dropDatabase db46 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.580-0400 m31101| 2015-07-09T14:14:17.579-0400 I COMMAND [repl writer worker 15] dropDatabase db46 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.673-0400 m31100| 2015-07-09T14:14:17.673-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.677-0400 m31101| 2015-07-09T14:14:17.677-0400 I COMMAND [repl writer worker 13] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.677-0400 m31102| 2015-07-09T14:14:17.677-0400 I COMMAND [repl writer worker 11] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.716-0400 m31200| 2015-07-09T14:14:17.715-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.718-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.719-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.719-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.719-0400 jstests/concurrency/fsm_workloads/yield_fetch.js [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.719-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.719-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.719-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.719-0400 m31201| 2015-07-09T14:14:17.719-0400 I COMMAND [repl writer worker 7] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.720-0400 m31202| 2015-07-09T14:14:17.719-0400 I COMMAND [repl writer worker 11] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.726-0400 m30999| 2015-07-09T14:14:17.725-0400 I SHARDING [conn1] distributed lock 'db47/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb9f9ca4787b9985d1d62 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.729-0400 m30999| 2015-07-09T14:14:17.729-0400 I SHARDING [conn1] Placing [db47] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.729-0400 m30999| 2015-07-09T14:14:17.729-0400 I SHARDING [conn1] Enabling sharding for database [db47] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.784-0400 m30999| 2015-07-09T14:14:17.784-0400 I SHARDING [conn1] distributed lock 'db47/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.811-0400 m31100| 2015-07-09T14:14:17.810-0400 I INDEX [conn70] build index on: db47.coll47 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db47.coll47" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.811-0400 m31100| 2015-07-09T14:14:17.810-0400 I INDEX [conn70] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.819-0400 m31100| 2015-07-09T14:14:17.817-0400 I INDEX [conn70] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.820-0400 m30999| 2015-07-09T14:14:17.819-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db47.coll47", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.823-0400 m30999| 2015-07-09T14:14:17.822-0400 I SHARDING [conn1] distributed lock 'db47.coll47/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eb9f9ca4787b9985d1d63 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.824-0400 m30999| 2015-07-09T14:14:17.823-0400 I SHARDING [conn1] enable sharding on: db47.coll47 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.824-0400 m30999| 2015-07-09T14:14:17.823-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:17.823-0400-559eb9f9ca4787b9985d1d64", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465657823), what: "shardCollection.start", ns: "db47.coll47", details: { shardKey: { _id: "hashed" }, collection: "db47.coll47", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.831-0400 m31101| 2015-07-09T14:14:17.830-0400 I INDEX [repl writer worker 7] build index on: db47.coll47 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db47.coll47" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.832-0400 m31101| 2015-07-09T14:14:17.830-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.836-0400 m31101| 2015-07-09T14:14:17.835-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.837-0400 m31102| 2015-07-09T14:14:17.837-0400 I INDEX [repl writer worker 8] build index on: db47.coll47 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db47.coll47" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.837-0400 m31102| 2015-07-09T14:14:17.837-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.843-0400 m31102| 2015-07-09T14:14:17.843-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.879-0400 m30999| 2015-07-09T14:14:17.879-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db47.coll47 using new epoch 559eb9f9ca4787b9985d1d65 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.940-0400 m30999| 2015-07-09T14:14:17.939-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db47.coll47: 0ms sequenceNumber: 205 version: 1|1||559eb9f9ca4787b9985d1d65 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.995-0400 m30999| 2015-07-09T14:14:17.994-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db47.coll47: 0ms sequenceNumber: 206 version: 1|1||559eb9f9ca4787b9985d1d65 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.997-0400 m31100| 2015-07-09T14:14:17.996-0400 I SHARDING [conn57] remotely refreshing metadata for db47.coll47 with requested shard version 1|1||559eb9f9ca4787b9985d1d65, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.998-0400 m31100| 2015-07-09T14:14:17.998-0400 I SHARDING [conn57] collection db47.coll47 was previously unsharded, new metadata loaded with shard version 1|1||559eb9f9ca4787b9985d1d65 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.999-0400 m31100| 2015-07-09T14:14:17.998-0400 I SHARDING [conn57] collection version was loaded at version 1|1||559eb9f9ca4787b9985d1d65, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:17.999-0400 m30999| 2015-07-09T14:14:17.998-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:17.998-0400-559eb9f9ca4787b9985d1d66", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465657998), what: "shardCollection", ns: "db47.coll47", details: { version: "1|1||559eb9f9ca4787b9985d1d65" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.053-0400 m30999| 2015-07-09T14:14:18.052-0400 I SHARDING [conn1] distributed lock 'db47.coll47/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.054-0400 m30999| 2015-07-09T14:14:18.053-0400 I SHARDING [conn1] moving chunk ns: db47.coll47 moving ( ns: db47.coll47, shard: test-rs0, lastmod: 1|1||000000000000000000000000, min: { _id: 0 }, max: { _id: MaxKey }) test-rs0 -> test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.054-0400 m31100| 2015-07-09T14:14:18.054-0400 I SHARDING [conn34] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.055-0400 m31100| 2015-07-09T14:14:18.055-0400 I SHARDING [conn34] received moveChunk request: { moveChunk: "db47.coll47", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb9f9ca4787b9985d1d65') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.059-0400 m31100| 2015-07-09T14:14:18.059-0400 I SHARDING [conn34] distributed lock 'db47.coll47/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb9fa792e00bb672749f6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.059-0400 m31100| 2015-07-09T14:14:18.059-0400 I SHARDING [conn34] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:18.059-0400-559eb9fa792e00bb672749f7", server: "bs-osx108-8", clientAddr: "127.0.0.1:62636", time: new Date(1436465658059), what: "moveChunk.start", ns: "db47.coll47", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.112-0400 m31100| 2015-07-09T14:14:18.112-0400 I SHARDING [conn34] remotely refreshing metadata for db47.coll47 based on current shard version 1|1||559eb9f9ca4787b9985d1d65, current metadata version is 1|1||559eb9f9ca4787b9985d1d65 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.114-0400 m31100| 2015-07-09T14:14:18.113-0400 I SHARDING [conn34] metadata of collection db47.coll47 already up to date (shard version : 1|1||559eb9f9ca4787b9985d1d65, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.114-0400 m31100| 2015-07-09T14:14:18.113-0400 I SHARDING [conn34] moveChunk request accepted at version 1|1||559eb9f9ca4787b9985d1d65 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.114-0400 m31100| 2015-07-09T14:14:18.114-0400 I SHARDING [conn34] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.115-0400 m31200| 2015-07-09T14:14:18.114-0400 I SHARDING [conn16] remotely refreshing metadata for db47.coll47, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.116-0400 m31200| 2015-07-09T14:14:18.116-0400 I SHARDING [conn16] collection db47.coll47 was previously unsharded, new metadata loaded with shard version 0|0||559eb9f9ca4787b9985d1d65 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.117-0400 m31200| 2015-07-09T14:14:18.116-0400 I SHARDING [conn16] collection version was loaded at version 1|1||559eb9f9ca4787b9985d1d65, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.117-0400 m31200| 2015-07-09T14:14:18.116-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: 0 } -> { _id: MaxKey } for collection db47.coll47 from test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 at epoch 559eb9f9ca4787b9985d1d65 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.118-0400 m31100| 2015-07-09T14:14:18.118-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db47.coll47", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.122-0400 m31100| 2015-07-09T14:14:18.121-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db47.coll47", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.126-0400 m31100| 2015-07-09T14:14:18.126-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db47.coll47", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.129-0400 m31200| 2015-07-09T14:14:18.129-0400 I INDEX [migrateThread] build index on: db47.coll47 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db47.coll47" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.130-0400 m31200| 2015-07-09T14:14:18.129-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.137-0400 m31100| 2015-07-09T14:14:18.136-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db47.coll47", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.140-0400 m31200| 2015-07-09T14:14:18.140-0400 I INDEX [migrateThread] build index on: db47.coll47 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db47.coll47" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.140-0400 m31200| 2015-07-09T14:14:18.140-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.153-0400 m31200| 2015-07-09T14:14:18.152-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.153-0400 m31200| 2015-07-09T14:14:18.153-0400 I SHARDING [migrateThread] Deleter starting delete for: db47.coll47 from { _id: 0 } -> { _id: MaxKey }, with opId: 75348 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.154-0400 m31100| 2015-07-09T14:14:18.153-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db47.coll47", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.154-0400 m31200| 2015-07-09T14:14:18.154-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db47.coll47 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.161-0400 m31202| 2015-07-09T14:14:18.160-0400 I INDEX [repl writer worker 2] build index on: db47.coll47 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db47.coll47" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.162-0400 m31202| 2015-07-09T14:14:18.160-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.163-0400 m31201| 2015-07-09T14:14:18.163-0400 I INDEX [repl writer worker 11] build index on: db47.coll47 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db47.coll47" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.163-0400 m31201| 2015-07-09T14:14:18.163-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.166-0400 m31202| 2015-07-09T14:14:18.165-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.167-0400 m31200| 2015-07-09T14:14:18.166-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.168-0400 m31200| 2015-07-09T14:14:18.166-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db47.coll47' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.170-0400 m31201| 2015-07-09T14:14:18.170-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.188-0400 m31100| 2015-07-09T14:14:18.187-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db47.coll47", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.188-0400 m31100| 2015-07-09T14:14:18.187-0400 I SHARDING [conn34] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.189-0400 m31100| 2015-07-09T14:14:18.188-0400 I SHARDING [conn34] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.189-0400 m31100| 2015-07-09T14:14:18.189-0400 I SHARDING [conn34] moveChunk setting version to: 2|0||559eb9f9ca4787b9985d1d65 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.191-0400 m31200| 2015-07-09T14:14:18.191-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db47.coll47' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.191-0400 m31200| 2015-07-09T14:14:18.191-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:18.191-0400-559eb9fad5a107a5b9c0db43", server: "bs-osx108-8", clientAddr: "", time: new Date(1436465658191), what: "moveChunk.to", ns: "db47.coll47", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 5: 36, step 2 of 5: 12, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 24, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.245-0400 m31100| 2015-07-09T14:14:18.244-0400 I SHARDING [conn34] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db47.coll47", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.245-0400 m31100| 2015-07-09T14:14:18.245-0400 I SHARDING [conn34] moveChunk updating self version to: 2|1||559eb9f9ca4787b9985d1d65 through { _id: MinKey } -> { _id: 0 } for collection 'db47.coll47' [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.247-0400 m31100| 2015-07-09T14:14:18.246-0400 I SHARDING [conn34] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:18.246-0400-559eb9fa792e00bb672749f8", server: "bs-osx108-8", clientAddr: "127.0.0.1:62636", time: new Date(1436465658246), what: "moveChunk.commit", ns: "db47.coll47", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.300-0400 m31100| 2015-07-09T14:14:18.300-0400 I SHARDING [conn34] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.300-0400 m31100| 2015-07-09T14:14:18.300-0400 I SHARDING [conn34] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.301-0400 m31100| 2015-07-09T14:14:18.300-0400 I SHARDING [conn34] Deleter starting delete for: db47.coll47 from { _id: 0 } -> { _id: MaxKey }, with opId: 118395 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.301-0400 m31100| 2015-07-09T14:14:18.300-0400 I SHARDING [conn34] rangeDeleter deleted 0 documents for db47.coll47 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.301-0400 m31100| 2015-07-09T14:14:18.300-0400 I SHARDING [conn34] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.302-0400 m31100| 2015-07-09T14:14:18.301-0400 I SHARDING [conn34] distributed lock 'db47.coll47/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.302-0400 m31100| 2015-07-09T14:14:18.301-0400 I SHARDING [conn34] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:18.301-0400-559eb9fa792e00bb672749f9", server: "bs-osx108-8", clientAddr: "127.0.0.1:62636", time: new Date(1436465658301), what: "moveChunk.from", ns: "db47.coll47", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 6: 0, step 2 of 6: 58, step 3 of 6: 2, step 4 of 6: 70, step 5 of 6: 112, step 6 of 6: 0, to: "test-rs1", from: "test-rs0", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.357-0400 m31100| 2015-07-09T14:14:18.356-0400 I COMMAND [conn34] command db47.coll47 command: moveChunk { moveChunk: "db47.coll47", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eb9f9ca4787b9985d1d65') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 301ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.358-0400 m30999| 2015-07-09T14:14:18.358-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db47.coll47: 0ms sequenceNumber: 207 version: 2|1||559eb9f9ca4787b9985d1d65 based on: 1|1||559eb9f9ca4787b9985d1d65 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.360-0400 m31100| 2015-07-09T14:14:18.359-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db47.coll47", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb9f9ca4787b9985d1d65') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.363-0400 m31100| 2015-07-09T14:14:18.363-0400 I SHARDING [conn34] distributed lock 'db47.coll47/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eb9fa792e00bb672749fa [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.364-0400 m31100| 2015-07-09T14:14:18.363-0400 I SHARDING [conn34] remotely refreshing metadata for db47.coll47 based on current shard version 2|0||559eb9f9ca4787b9985d1d65, current metadata version is 2|0||559eb9f9ca4787b9985d1d65 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.365-0400 m31100| 2015-07-09T14:14:18.365-0400 I SHARDING [conn34] updating metadata for db47.coll47 from shard version 2|0||559eb9f9ca4787b9985d1d65 to shard version 2|1||559eb9f9ca4787b9985d1d65 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.365-0400 m31100| 2015-07-09T14:14:18.365-0400 I SHARDING [conn34] collection version was loaded at version 2|1||559eb9f9ca4787b9985d1d65, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.365-0400 m31100| 2015-07-09T14:14:18.365-0400 I SHARDING [conn34] splitChunk accepted at version 2|1||559eb9f9ca4787b9985d1d65 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.366-0400 m31100| 2015-07-09T14:14:18.366-0400 I SHARDING [conn34] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:18.366-0400-559eb9fa792e00bb672749fb", server: "bs-osx108-8", clientAddr: "127.0.0.1:62636", time: new Date(1436465658366), what: "split", ns: "db47.coll47", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559eb9f9ca4787b9985d1d65') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559eb9f9ca4787b9985d1d65') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.421-0400 m31100| 2015-07-09T14:14:18.420-0400 I SHARDING [conn34] distributed lock 'db47.coll47/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.422-0400 m30999| 2015-07-09T14:14:18.422-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db47.coll47: 0ms sequenceNumber: 208 version: 2|3||559eb9f9ca4787b9985d1d65 based on: 2|1||559eb9f9ca4787b9985d1d65 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.423-0400 m31200| 2015-07-09T14:14:18.422-0400 I SHARDING [conn84] received splitChunk request: { splitChunk: "db47.coll47", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eb9f9ca4787b9985d1d65') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.428-0400 m31200| 2015-07-09T14:14:18.427-0400 I SHARDING [conn84] distributed lock 'db47.coll47/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eb9fad5a107a5b9c0db44 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.428-0400 m31200| 2015-07-09T14:14:18.427-0400 I SHARDING [conn84] remotely refreshing metadata for db47.coll47 based on current shard version 0|0||559eb9f9ca4787b9985d1d65, current metadata version is 1|1||559eb9f9ca4787b9985d1d65 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.429-0400 m31200| 2015-07-09T14:14:18.428-0400 I SHARDING [conn84] updating metadata for db47.coll47 from shard version 0|0||559eb9f9ca4787b9985d1d65 to shard version 2|0||559eb9f9ca4787b9985d1d65 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.429-0400 m31200| 2015-07-09T14:14:18.428-0400 I SHARDING [conn84] collection version was loaded at version 2|3||559eb9f9ca4787b9985d1d65, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.429-0400 m31200| 2015-07-09T14:14:18.428-0400 I SHARDING [conn84] splitChunk accepted at version 2|0||559eb9f9ca4787b9985d1d65 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.430-0400 m31200| 2015-07-09T14:14:18.429-0400 I SHARDING [conn84] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:18.429-0400-559eb9fad5a107a5b9c0db45", server: "bs-osx108-8", clientAddr: "127.0.0.1:63007", time: new Date(1436465658429), what: "split", ns: "db47.coll47", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559eb9f9ca4787b9985d1d65') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559eb9f9ca4787b9985d1d65') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.485-0400 m31200| 2015-07-09T14:14:18.484-0400 I SHARDING [conn84] distributed lock 'db47.coll47/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.487-0400 m30999| 2015-07-09T14:14:18.487-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db47.coll47: 1ms sequenceNumber: 209 version: 2|5||559eb9f9ca4787b9985d1d65 based on: 2|3||559eb9f9ca4787b9985d1d65 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.582-0400 m31200| 2015-07-09T14:14:18.581-0400 I INDEX [conn35] build index on: db47.coll47 properties: { v: 1, key: { c: 1.0 }, name: "c_1", ns: "db47.coll47" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.583-0400 m31200| 2015-07-09T14:14:18.582-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.584-0400 m31100| 2015-07-09T14:14:18.583-0400 I INDEX [conn57] build index on: db47.coll47 properties: { v: 1, key: { c: 1.0 }, name: "c_1", ns: "db47.coll47" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.584-0400 m31100| 2015-07-09T14:14:18.583-0400 I INDEX [conn57] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.590-0400 m31200| 2015-07-09T14:14:18.589-0400 I INDEX [conn35] build index done. scanned 109 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.592-0400 m31100| 2015-07-09T14:14:18.591-0400 I INDEX [conn57] build index done. scanned 91 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.600-0400 m31202| 2015-07-09T14:14:18.599-0400 I INDEX [repl writer worker 13] build index on: db47.coll47 properties: { v: 1, key: { c: 1.0 }, name: "c_1", ns: "db47.coll47" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.601-0400 m31200| 2015-07-09T14:14:18.599-0400 I INDEX [conn35] build index on: db47.coll47 properties: { v: 1, key: { d: 1.0 }, name: "d_1", ns: "db47.coll47" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.601-0400 m31200| 2015-07-09T14:14:18.600-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.601-0400 m31202| 2015-07-09T14:14:18.600-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.602-0400 m31100| 2015-07-09T14:14:18.601-0400 I INDEX [conn57] build index on: db47.coll47 properties: { v: 1, key: { d: 1.0 }, name: "d_1", ns: "db47.coll47" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.602-0400 m31100| 2015-07-09T14:14:18.601-0400 I INDEX [conn57] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.602-0400 m31102| 2015-07-09T14:14:18.601-0400 I INDEX [repl writer worker 8] build index on: db47.coll47 properties: { v: 1, key: { c: 1.0 }, name: "c_1", ns: "db47.coll47" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.602-0400 m31102| 2015-07-09T14:14:18.601-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.602-0400 m31201| 2015-07-09T14:14:18.601-0400 I INDEX [repl writer worker 11] build index on: db47.coll47 properties: { v: 1, key: { c: 1.0 }, name: "c_1", ns: "db47.coll47" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.603-0400 m31201| 2015-07-09T14:14:18.601-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.610-0400 m31101| 2015-07-09T14:14:18.609-0400 I INDEX [repl writer worker 2] build index on: db47.coll47 properties: { v: 1, key: { c: 1.0 }, name: "c_1", ns: "db47.coll47" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.610-0400 m31101| 2015-07-09T14:14:18.609-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.610-0400 m31102| 2015-07-09T14:14:18.609-0400 I INDEX [repl writer worker 8] build index done. scanned 91 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.612-0400 m31200| 2015-07-09T14:14:18.611-0400 I INDEX [conn35] build index done. scanned 109 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.614-0400 m31202| 2015-07-09T14:14:18.614-0400 I INDEX [repl writer worker 13] build index done. scanned 109 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.621-0400 m31100| 2015-07-09T14:14:18.621-0400 I INDEX [conn57] build index done. scanned 91 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.622-0400 Using 5 threads (requested 5) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.625-0400 m31202| 2015-07-09T14:14:18.625-0400 I INDEX [repl writer worker 2] build index on: db47.coll47 properties: { v: 1, key: { d: 1.0 }, name: "d_1", ns: "db47.coll47" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.625-0400 m31202| 2015-07-09T14:14:18.625-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.626-0400 m31101| 2015-07-09T14:14:18.624-0400 I INDEX [repl writer worker 2] build index done. scanned 91 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.626-0400 m31201| 2015-07-09T14:14:18.624-0400 I INDEX [repl writer worker 11] build index done. scanned 109 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.652-0400 m31102| 2015-07-09T14:14:18.652-0400 I INDEX [repl writer worker 15] build index on: db47.coll47 properties: { v: 1, key: { d: 1.0 }, name: "d_1", ns: "db47.coll47" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.653-0400 m31102| 2015-07-09T14:14:18.652-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.665-0400 m31201| 2015-07-09T14:14:18.664-0400 I INDEX [repl writer worker 13] build index on: db47.coll47 properties: { v: 1, key: { d: 1.0 }, name: "d_1", ns: "db47.coll47" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.666-0400 m31201| 2015-07-09T14:14:18.664-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.667-0400 m31202| 2015-07-09T14:14:18.667-0400 I INDEX [repl writer worker 2] build index done. scanned 109 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.679-0400 m31101| 2015-07-09T14:14:18.678-0400 I INDEX [repl writer worker 11] build index on: db47.coll47 properties: { v: 1, key: { d: 1.0 }, name: "d_1", ns: "db47.coll47" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.679-0400 m31101| 2015-07-09T14:14:18.678-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.680-0400 m31102| 2015-07-09T14:14:18.678-0400 I INDEX [repl writer worker 15] build index done. scanned 91 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.683-0400 m31201| 2015-07-09T14:14:18.683-0400 I INDEX [repl writer worker 13] build index done. scanned 109 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.691-0400 m31101| 2015-07-09T14:14:18.691-0400 I INDEX [repl writer worker 11] build index done. scanned 91 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.706-0400 m30998| 2015-07-09T14:14:18.706-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63623 #294 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.707-0400 m30998| 2015-07-09T14:14:18.706-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63624 #295 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.712-0400 m30998| 2015-07-09T14:14:18.711-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63625 #296 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.720-0400 m30999| 2015-07-09T14:14:18.720-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63626 #295 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.724-0400 m30999| 2015-07-09T14:14:18.723-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63627 #296 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.734-0400 setting random seed: 1939852326177 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.736-0400 setting random seed: 9115258292295 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.736-0400 setting random seed: 4227520665153 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.737-0400 setting random seed: 3098061238415 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.737-0400 setting random seed: 132934344001 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:18.738-0400 m30998| 2015-07-09T14:14:18.738-0400 I SHARDING [conn294] ChunkManager: time to load chunks for db47.coll47: 0ms sequenceNumber: 59 version: 2|5||559eb9f9ca4787b9985d1d65 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:22.453-0400 m30998| 2015-07-09T14:14:22.453-0400 I NETWORK [conn295] end connection 127.0.0.1:63624 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:23.557-0400 m30999| 2015-07-09T14:14:23.556-0400 I NETWORK [conn296] end connection 127.0.0.1:63627 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:23.589-0400 m30998| 2015-07-09T14:14:23.589-0400 I NETWORK [conn296] end connection 127.0.0.1:63625 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:23.840-0400 m30998| 2015-07-09T14:14:23.840-0400 I NETWORK [conn294] end connection 127.0.0.1:63623 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.260-0400 m30999| 2015-07-09T14:14:24.260-0400 I NETWORK [conn295] end connection 127.0.0.1:63626 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.273-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.273-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.274-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.274-0400 jstests/concurrency/fsm_workloads/yield_fetch.js: Workload completed in 5638 ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.274-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.274-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.274-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.274-0400 m30999| 2015-07-09T14:14:24.274-0400 I COMMAND [conn1] DROP: db47.coll47 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.274-0400 m30999| 2015-07-09T14:14:24.274-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:24.274-0400-559eba00ca4787b9985d1d67", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465664274), what: "dropCollection.start", ns: "db47.coll47", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.330-0400 m30999| 2015-07-09T14:14:24.329-0400 I SHARDING [conn1] distributed lock 'db47.coll47/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba00ca4787b9985d1d68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.331-0400 m31100| 2015-07-09T14:14:24.331-0400 I COMMAND [conn34] CMD: drop db47.coll47 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.334-0400 m31200| 2015-07-09T14:14:24.334-0400 I COMMAND [conn84] CMD: drop db47.coll47 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.336-0400 m31101| 2015-07-09T14:14:24.336-0400 I COMMAND [repl writer worker 14] CMD: drop db47.coll47 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.336-0400 m31102| 2015-07-09T14:14:24.336-0400 I COMMAND [repl writer worker 3] CMD: drop db47.coll47 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.338-0400 m31201| 2015-07-09T14:14:24.338-0400 I COMMAND [repl writer worker 10] CMD: drop db47.coll47 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.339-0400 m31202| 2015-07-09T14:14:24.338-0400 I COMMAND [repl writer worker 7] CMD: drop db47.coll47 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.392-0400 m31100| 2015-07-09T14:14:24.392-0400 I SHARDING [conn34] remotely refreshing metadata for db47.coll47 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559eb9f9ca4787b9985d1d65, current metadata version is 2|3||559eb9f9ca4787b9985d1d65 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.394-0400 m31100| 2015-07-09T14:14:24.393-0400 W SHARDING [conn34] no chunks found when reloading db47.coll47, previous version was 0|0||559eb9f9ca4787b9985d1d65, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.394-0400 m31100| 2015-07-09T14:14:24.393-0400 I SHARDING [conn34] dropping metadata for db47.coll47 at shard version 2|3||559eb9f9ca4787b9985d1d65, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.395-0400 m31200| 2015-07-09T14:14:24.395-0400 I SHARDING [conn84] remotely refreshing metadata for db47.coll47 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559eb9f9ca4787b9985d1d65, current metadata version is 2|5||559eb9f9ca4787b9985d1d65 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.396-0400 m31200| 2015-07-09T14:14:24.396-0400 W SHARDING [conn84] no chunks found when reloading db47.coll47, previous version was 0|0||559eb9f9ca4787b9985d1d65, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.397-0400 m31200| 2015-07-09T14:14:24.396-0400 I SHARDING [conn84] dropping metadata for db47.coll47 at shard version 2|5||559eb9f9ca4787b9985d1d65, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.398-0400 m30999| 2015-07-09T14:14:24.397-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:24.397-0400-559eba00ca4787b9985d1d69", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465664397), what: "dropCollection", ns: "db47.coll47", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.451-0400 m30999| 2015-07-09T14:14:24.451-0400 I SHARDING [conn1] distributed lock 'db47.coll47/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.507-0400 m30999| 2015-07-09T14:14:24.506-0400 I COMMAND [conn1] DROP DATABASE: db47 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.507-0400 m30999| 2015-07-09T14:14:24.506-0400 I SHARDING [conn1] DBConfig::dropDatabase: db47 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.507-0400 m30999| 2015-07-09T14:14:24.506-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:24.506-0400-559eba00ca4787b9985d1d6a", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465664506), what: "dropDatabase.start", ns: "db47", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.613-0400 m30999| 2015-07-09T14:14:24.613-0400 I SHARDING [conn1] DBConfig::dropDatabase: db47 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.614-0400 m31100| 2015-07-09T14:14:24.614-0400 I COMMAND [conn28] dropDatabase db47 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.614-0400 m31100| 2015-07-09T14:14:24.614-0400 I COMMAND [conn28] dropDatabase db47 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.615-0400 m30999| 2015-07-09T14:14:24.614-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:24.614-0400-559eba00ca4787b9985d1d6b", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465664614), what: "dropDatabase", ns: "db47", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.616-0400 m31101| 2015-07-09T14:14:24.615-0400 I COMMAND [repl writer worker 12] dropDatabase db47 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.616-0400 m31101| 2015-07-09T14:14:24.615-0400 I COMMAND [repl writer worker 12] dropDatabase db47 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.616-0400 m31102| 2015-07-09T14:14:24.615-0400 I COMMAND [repl writer worker 14] dropDatabase db47 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.616-0400 m31102| 2015-07-09T14:14:24.615-0400 I COMMAND [repl writer worker 14] dropDatabase db47 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.706-0400 m31100| 2015-07-09T14:14:24.706-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.707-0400 m31102| 2015-07-09T14:14:24.707-0400 I COMMAND [repl writer worker 1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.708-0400 m31101| 2015-07-09T14:14:24.708-0400 I COMMAND [repl writer worker 3] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.740-0400 m31200| 2015-07-09T14:14:24.740-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.744-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.744-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.744-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.744-0400 jstests/concurrency/fsm_workloads/drop_database.js [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.744-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.744-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.744-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.744-0400 m31201| 2015-07-09T14:14:24.744-0400 I COMMAND [repl writer worker 3] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.745-0400 m31202| 2015-07-09T14:14:24.744-0400 I COMMAND [repl writer worker 14] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.751-0400 m30999| 2015-07-09T14:14:24.750-0400 I SHARDING [conn1] distributed lock 'db48/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba00ca4787b9985d1d6c [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.754-0400 m30999| 2015-07-09T14:14:24.754-0400 I SHARDING [conn1] Placing [db48] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.754-0400 m30999| 2015-07-09T14:14:24.754-0400 I SHARDING [conn1] Enabling sharding for database [db48] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.808-0400 m30999| 2015-07-09T14:14:24.808-0400 I SHARDING [conn1] distributed lock 'db48/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.831-0400 m31100| 2015-07-09T14:14:24.831-0400 I INDEX [conn70] build index on: db48.coll48 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db48.coll48" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.831-0400 m31100| 2015-07-09T14:14:24.831-0400 I INDEX [conn70] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.839-0400 m31100| 2015-07-09T14:14:24.839-0400 I INDEX [conn70] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.840-0400 m30999| 2015-07-09T14:14:24.840-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db48.coll48", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.843-0400 m30999| 2015-07-09T14:14:24.843-0400 I SHARDING [conn1] distributed lock 'db48.coll48/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba00ca4787b9985d1d6d [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.844-0400 m30999| 2015-07-09T14:14:24.844-0400 I SHARDING [conn1] enable sharding on: db48.coll48 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.845-0400 m30999| 2015-07-09T14:14:24.844-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:24.844-0400-559eba00ca4787b9985d1d6e", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465664844), what: "shardCollection.start", ns: "db48.coll48", details: { shardKey: { _id: "hashed" }, collection: "db48.coll48", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.850-0400 m31101| 2015-07-09T14:14:24.849-0400 I INDEX [repl writer worker 13] build index on: db48.coll48 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db48.coll48" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.850-0400 m31101| 2015-07-09T14:14:24.849-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.856-0400 m31101| 2015-07-09T14:14:24.856-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.859-0400 m31102| 2015-07-09T14:14:24.859-0400 I INDEX [repl writer worker 2] build index on: db48.coll48 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db48.coll48" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.859-0400 m31102| 2015-07-09T14:14:24.859-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.862-0400 m31102| 2015-07-09T14:14:24.862-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:24.897-0400 m30999| 2015-07-09T14:14:24.897-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db48.coll48 using new epoch 559eba00ca4787b9985d1d6f [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.005-0400 m30999| 2015-07-09T14:14:25.004-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db48.coll48: 0ms sequenceNumber: 210 version: 1|1||559eba00ca4787b9985d1d6f based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.061-0400 m30999| 2015-07-09T14:14:25.060-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db48.coll48: 0ms sequenceNumber: 211 version: 1|1||559eba00ca4787b9985d1d6f based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.062-0400 m31100| 2015-07-09T14:14:25.062-0400 I SHARDING [conn60] remotely refreshing metadata for db48.coll48 with requested shard version 1|1||559eba00ca4787b9985d1d6f, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.064-0400 m31100| 2015-07-09T14:14:25.063-0400 I SHARDING [conn60] collection db48.coll48 was previously unsharded, new metadata loaded with shard version 1|1||559eba00ca4787b9985d1d6f [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.064-0400 m31100| 2015-07-09T14:14:25.064-0400 I SHARDING [conn60] collection version was loaded at version 1|1||559eba00ca4787b9985d1d6f, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.065-0400 m30999| 2015-07-09T14:14:25.064-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:25.064-0400-559eba01ca4787b9985d1d70", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465665064), what: "shardCollection", ns: "db48.coll48", details: { version: "1|1||559eba00ca4787b9985d1d6f" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.117-0400 m30999| 2015-07-09T14:14:25.117-0400 I SHARDING [conn1] distributed lock 'db48.coll48/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.118-0400 m30999| 2015-07-09T14:14:25.117-0400 I SHARDING [conn1] moving chunk ns: db48.coll48 moving ( ns: db48.coll48, shard: test-rs0, lastmod: 1|1||000000000000000000000000, min: { _id: 0 }, max: { _id: MaxKey }) test-rs0 -> test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.118-0400 m31100| 2015-07-09T14:14:25.118-0400 I SHARDING [conn34] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.119-0400 m31100| 2015-07-09T14:14:25.119-0400 I SHARDING [conn34] received moveChunk request: { moveChunk: "db48.coll48", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eba00ca4787b9985d1d6f') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.124-0400 m31100| 2015-07-09T14:14:25.123-0400 I SHARDING [conn34] distributed lock 'db48.coll48/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eba01792e00bb672749fd [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.124-0400 m31100| 2015-07-09T14:14:25.123-0400 I SHARDING [conn34] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:25.123-0400-559eba01792e00bb672749fe", server: "bs-osx108-8", clientAddr: "127.0.0.1:62636", time: new Date(1436465665123), what: "moveChunk.start", ns: "db48.coll48", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.177-0400 m31100| 2015-07-09T14:14:25.176-0400 I SHARDING [conn34] remotely refreshing metadata for db48.coll48 based on current shard version 1|1||559eba00ca4787b9985d1d6f, current metadata version is 1|1||559eba00ca4787b9985d1d6f [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.178-0400 m31100| 2015-07-09T14:14:25.177-0400 I SHARDING [conn34] metadata of collection db48.coll48 already up to date (shard version : 1|1||559eba00ca4787b9985d1d6f, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.178-0400 m31100| 2015-07-09T14:14:25.178-0400 I SHARDING [conn34] moveChunk request accepted at version 1|1||559eba00ca4787b9985d1d6f [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.178-0400 m31100| 2015-07-09T14:14:25.178-0400 I SHARDING [conn34] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.179-0400 m31200| 2015-07-09T14:14:25.179-0400 I SHARDING [conn16] remotely refreshing metadata for db48.coll48, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.180-0400 m31200| 2015-07-09T14:14:25.180-0400 I SHARDING [conn16] collection db48.coll48 was previously unsharded, new metadata loaded with shard version 0|0||559eba00ca4787b9985d1d6f [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.180-0400 m31200| 2015-07-09T14:14:25.180-0400 I SHARDING [conn16] collection version was loaded at version 1|1||559eba00ca4787b9985d1d6f, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.181-0400 m31200| 2015-07-09T14:14:25.180-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: 0 } -> { _id: MaxKey } for collection db48.coll48 from test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 at epoch 559eba00ca4787b9985d1d6f [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.183-0400 m31100| 2015-07-09T14:14:25.183-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db48.coll48", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.187-0400 m31100| 2015-07-09T14:14:25.186-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db48.coll48", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.191-0400 m31100| 2015-07-09T14:14:25.191-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db48.coll48", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.196-0400 m31200| 2015-07-09T14:14:25.195-0400 I INDEX [migrateThread] build index on: db48.coll48 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db48.coll48" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.196-0400 m31200| 2015-07-09T14:14:25.195-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.201-0400 m31100| 2015-07-09T14:14:25.200-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db48.coll48", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.205-0400 m31200| 2015-07-09T14:14:25.205-0400 I INDEX [migrateThread] build index on: db48.coll48 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db48.coll48" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.206-0400 m31200| 2015-07-09T14:14:25.205-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.218-0400 m31100| 2015-07-09T14:14:25.218-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db48.coll48", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.253-0400 m31100| 2015-07-09T14:14:25.252-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db48.coll48", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.271-0400 m31200| 2015-07-09T14:14:25.271-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.273-0400 m31200| 2015-07-09T14:14:25.272-0400 I SHARDING [migrateThread] Deleter starting delete for: db48.coll48 from { _id: 0 } -> { _id: MaxKey }, with opId: 84976 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.274-0400 m31200| 2015-07-09T14:14:25.273-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db48.coll48 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.280-0400 m31202| 2015-07-09T14:14:25.280-0400 I INDEX [repl writer worker 6] build index on: db48.coll48 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db48.coll48" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.281-0400 m31202| 2015-07-09T14:14:25.280-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.282-0400 m31201| 2015-07-09T14:14:25.280-0400 I INDEX [repl writer worker 1] build index on: db48.coll48 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db48.coll48" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.282-0400 m31201| 2015-07-09T14:14:25.280-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.288-0400 m31201| 2015-07-09T14:14:25.287-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.290-0400 m31200| 2015-07-09T14:14:25.290-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.291-0400 m31200| 2015-07-09T14:14:25.290-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db48.coll48' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.291-0400 m31202| 2015-07-09T14:14:25.290-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.319-0400 m31100| 2015-07-09T14:14:25.318-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db48.coll48", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.319-0400 m31100| 2015-07-09T14:14:25.318-0400 I SHARDING [conn34] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.319-0400 m31100| 2015-07-09T14:14:25.319-0400 I SHARDING [conn34] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.319-0400 m31100| 2015-07-09T14:14:25.319-0400 I SHARDING [conn34] moveChunk setting version to: 2|0||559eba00ca4787b9985d1d6f [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.326-0400 m31200| 2015-07-09T14:14:25.326-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db48.coll48' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.327-0400 m31200| 2015-07-09T14:14:25.326-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:25.326-0400-559eba01d5a107a5b9c0db46", server: "bs-osx108-8", clientAddr: "", time: new Date(1436465665326), what: "moveChunk.to", ns: "db48.coll48", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 5: 91, step 2 of 5: 16, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 35, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.381-0400 m31100| 2015-07-09T14:14:25.380-0400 I SHARDING [conn34] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db48.coll48", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.381-0400 m31100| 2015-07-09T14:14:25.380-0400 I SHARDING [conn34] moveChunk updating self version to: 2|1||559eba00ca4787b9985d1d6f through { _id: MinKey } -> { _id: 0 } for collection 'db48.coll48' [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.382-0400 m31100| 2015-07-09T14:14:25.381-0400 I SHARDING [conn34] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:25.381-0400-559eba01792e00bb672749ff", server: "bs-osx108-8", clientAddr: "127.0.0.1:62636", time: new Date(1436465665381), what: "moveChunk.commit", ns: "db48.coll48", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.435-0400 m31100| 2015-07-09T14:14:25.434-0400 I SHARDING [conn34] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.435-0400 m31100| 2015-07-09T14:14:25.435-0400 I SHARDING [conn34] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.435-0400 m31100| 2015-07-09T14:14:25.435-0400 I SHARDING [conn34] Deleter starting delete for: db48.coll48 from { _id: 0 } -> { _id: MaxKey }, with opId: 127696 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.436-0400 m31100| 2015-07-09T14:14:25.435-0400 I SHARDING [conn34] rangeDeleter deleted 0 documents for db48.coll48 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.436-0400 m31100| 2015-07-09T14:14:25.435-0400 I SHARDING [conn34] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.437-0400 m31100| 2015-07-09T14:14:25.436-0400 I SHARDING [conn34] distributed lock 'db48.coll48/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.437-0400 m31100| 2015-07-09T14:14:25.436-0400 I SHARDING [conn34] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:25.436-0400-559eba01792e00bb67274a00", server: "bs-osx108-8", clientAddr: "127.0.0.1:62636", time: new Date(1436465665436), what: "moveChunk.from", ns: "db48.coll48", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 6: 0, step 2 of 6: 58, step 3 of 6: 2, step 4 of 6: 137, step 5 of 6: 116, step 6 of 6: 0, to: "test-rs1", from: "test-rs0", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.490-0400 m31100| 2015-07-09T14:14:25.489-0400 I COMMAND [conn34] command db48.coll48 command: moveChunk { moveChunk: "db48.coll48", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eba00ca4787b9985d1d6f') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 371ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.492-0400 m30999| 2015-07-09T14:14:25.491-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db48.coll48: 0ms sequenceNumber: 212 version: 2|1||559eba00ca4787b9985d1d6f based on: 1|1||559eba00ca4787b9985d1d6f [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.493-0400 m31100| 2015-07-09T14:14:25.492-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db48.coll48", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba00ca4787b9985d1d6f') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.497-0400 m31100| 2015-07-09T14:14:25.497-0400 I SHARDING [conn34] distributed lock 'db48.coll48/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eba01792e00bb67274a01 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.497-0400 m31100| 2015-07-09T14:14:25.497-0400 I SHARDING [conn34] remotely refreshing metadata for db48.coll48 based on current shard version 2|0||559eba00ca4787b9985d1d6f, current metadata version is 2|0||559eba00ca4787b9985d1d6f [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.498-0400 m31100| 2015-07-09T14:14:25.498-0400 I SHARDING [conn34] updating metadata for db48.coll48 from shard version 2|0||559eba00ca4787b9985d1d6f to shard version 2|1||559eba00ca4787b9985d1d6f [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.499-0400 m31100| 2015-07-09T14:14:25.498-0400 I SHARDING [conn34] collection version was loaded at version 2|1||559eba00ca4787b9985d1d6f, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.499-0400 m31100| 2015-07-09T14:14:25.498-0400 I SHARDING [conn34] splitChunk accepted at version 2|1||559eba00ca4787b9985d1d6f [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.500-0400 m31100| 2015-07-09T14:14:25.500-0400 I SHARDING [conn34] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:25.500-0400-559eba01792e00bb67274a02", server: "bs-osx108-8", clientAddr: "127.0.0.1:62636", time: new Date(1436465665500), what: "split", ns: "db48.coll48", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559eba00ca4787b9985d1d6f') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559eba00ca4787b9985d1d6f') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.554-0400 m31100| 2015-07-09T14:14:25.553-0400 I SHARDING [conn34] distributed lock 'db48.coll48/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.556-0400 m30999| 2015-07-09T14:14:25.555-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db48.coll48: 0ms sequenceNumber: 213 version: 2|3||559eba00ca4787b9985d1d6f based on: 2|1||559eba00ca4787b9985d1d6f [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.556-0400 m31200| 2015-07-09T14:14:25.556-0400 I SHARDING [conn84] received splitChunk request: { splitChunk: "db48.coll48", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba00ca4787b9985d1d6f') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.561-0400 m31200| 2015-07-09T14:14:25.560-0400 I SHARDING [conn84] distributed lock 'db48.coll48/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eba01d5a107a5b9c0db47 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.561-0400 m31200| 2015-07-09T14:14:25.560-0400 I SHARDING [conn84] remotely refreshing metadata for db48.coll48 based on current shard version 0|0||559eba00ca4787b9985d1d6f, current metadata version is 1|1||559eba00ca4787b9985d1d6f [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.562-0400 m31200| 2015-07-09T14:14:25.562-0400 I SHARDING [conn84] updating metadata for db48.coll48 from shard version 0|0||559eba00ca4787b9985d1d6f to shard version 2|0||559eba00ca4787b9985d1d6f [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.563-0400 m31200| 2015-07-09T14:14:25.562-0400 I SHARDING [conn84] collection version was loaded at version 2|3||559eba00ca4787b9985d1d6f, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.563-0400 m31200| 2015-07-09T14:14:25.562-0400 I SHARDING [conn84] splitChunk accepted at version 2|0||559eba00ca4787b9985d1d6f [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.565-0400 m31200| 2015-07-09T14:14:25.563-0400 I SHARDING [conn84] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:25.563-0400-559eba01d5a107a5b9c0db48", server: "bs-osx108-8", clientAddr: "127.0.0.1:63007", time: new Date(1436465665563), what: "split", ns: "db48.coll48", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559eba00ca4787b9985d1d6f') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559eba00ca4787b9985d1d6f') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.618-0400 m31200| 2015-07-09T14:14:25.618-0400 I SHARDING [conn84] distributed lock 'db48.coll48/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.620-0400 m30999| 2015-07-09T14:14:25.620-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db48.coll48: 0ms sequenceNumber: 214 version: 2|5||559eba00ca4787b9985d1d6f based on: 2|3||559eba00ca4787b9985d1d6f [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.621-0400 Using 10 threads (requested 10) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.715-0400 m30999| 2015-07-09T14:14:25.715-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63630 #297 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.719-0400 m30998| 2015-07-09T14:14:25.715-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63629 #297 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.720-0400 m30998| 2015-07-09T14:14:25.719-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63633 #298 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.723-0400 m30998| 2015-07-09T14:14:25.723-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63634 #299 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.724-0400 m30999| 2015-07-09T14:14:25.723-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63631 #298 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.724-0400 m30999| 2015-07-09T14:14:25.724-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63632 #299 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.726-0400 m30998| 2015-07-09T14:14:25.726-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63635 #300 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.732-0400 m30999| 2015-07-09T14:14:25.732-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63636 #300 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.736-0400 m30999| 2015-07-09T14:14:25.736-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63637 #301 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.746-0400 m30998| 2015-07-09T14:14:25.746-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63638 #301 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.750-0400 setting random seed: 89513002894 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.751-0400 setting random seed: 7014356749132 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.751-0400 setting random seed: 8625025707297 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.751-0400 setting random seed: 146189942024 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.752-0400 setting random seed: 6327614947222 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.752-0400 setting random seed: 293937562964 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.753-0400 setting random seed: 8224467481486 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.754-0400 setting random seed: 807130043394 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.755-0400 m30998| 2015-07-09T14:14:25.755-0400 I SHARDING [conn298] distributed lock 'drop_database1/bs-osx108-8:30998:1436464535:16807' acquired, ts : 559eba010bd550bed3408aaa [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.755-0400 setting random seed: 208623050712 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.756-0400 m30999| 2015-07-09T14:14:25.755-0400 I SHARDING [conn297] distributed lock 'drop_database6/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba01ca4787b9985d1d71 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.757-0400 m30998| 2015-07-09T14:14:25.757-0400 I SHARDING [conn297] distributed lock 'drop_database9/bs-osx108-8:30998:1436464535:16807' acquired, ts : 559eba010bd550bed3408aa9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.757-0400 setting random seed: 5434645651839 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.759-0400 m31100| 2015-07-09T14:14:25.757-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63639 #152 (95 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.760-0400 m30999| 2015-07-09T14:14:25.758-0400 I SHARDING [conn299] distributed lock 'drop_database4/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba01ca4787b9985d1d72 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.760-0400 m30999| 2015-07-09T14:14:25.759-0400 I SHARDING [conn298] distributed lock 'drop_database0/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba01ca4787b9985d1d73 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.761-0400 m31200| 2015-07-09T14:14:25.760-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63641 #145 (88 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.761-0400 m30999| 2015-07-09T14:14:25.761-0400 I SHARDING [conn301] distributed lock 'drop_database2/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba01ca4787b9985d1d74 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.761-0400 m30999| 2015-07-09T14:14:25.761-0400 I SHARDING [conn297] Placing [drop_database6] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.763-0400 m31100| 2015-07-09T14:14:25.762-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63640 #153 (96 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.763-0400 m31100| 2015-07-09T14:14:25.763-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63642 #154 (97 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.763-0400 m31100| 2015-07-09T14:14:25.763-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63643 #155 (98 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.765-0400 m30999| 2015-07-09T14:14:25.765-0400 I SHARDING [conn298] Placing [drop_database0] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.767-0400 m30998| 2015-07-09T14:14:25.766-0400 I SHARDING [conn298] Placing [drop_database1] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.767-0400 m30998| 2015-07-09T14:14:25.766-0400 I SHARDING [conn299] distributed lock 'drop_database3/bs-osx108-8:30998:1436464535:16807' acquired, ts : 559eba010bd550bed3408aac [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.769-0400 m30999| 2015-07-09T14:14:25.767-0400 I SHARDING [conn299] Placing [drop_database4] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.769-0400 m30998| 2015-07-09T14:14:25.769-0400 I SHARDING [conn297] Placing [drop_database9] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.770-0400 m30999| 2015-07-09T14:14:25.769-0400 I SHARDING [conn301] Placing [drop_database2] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.770-0400 m29000| 2015-07-09T14:14:25.770-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63644 #59 (59 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.771-0400 m30998| 2015-07-09T14:14:25.770-0400 I SHARDING [conn299] Placing [drop_database3] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.772-0400 m29000| 2015-07-09T14:14:25.771-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63645 #60 (60 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.772-0400 m29000| 2015-07-09T14:14:25.772-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63646 #61 (61 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.772-0400 m29000| 2015-07-09T14:14:25.772-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63647 #62 (62 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.774-0400 m29000| 2015-07-09T14:14:25.774-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63648 #63 (63 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.776-0400 m30999| 2015-07-09T14:14:25.775-0400 I SHARDING [conn297] distributed lock 'drop_database6/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.779-0400 m30999| 2015-07-09T14:14:25.778-0400 I SHARDING [conn298] distributed lock 'drop_database0/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.780-0400 m30998| 2015-07-09T14:14:25.779-0400 I SHARDING [conn301] distributed lock 'drop_database7/bs-osx108-8:30998:1436464535:16807' acquired, ts : 559eba010bd550bed3408aab [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.780-0400 m30998| 2015-07-09T14:14:25.779-0400 I SHARDING [conn300] distributed lock 'drop_database5/bs-osx108-8:30998:1436464535:16807' acquired, ts : 559eba010bd550bed3408aad [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.782-0400 m29000| 2015-07-09T14:14:25.781-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63649 #64 (64 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.782-0400 m29000| 2015-07-09T14:14:25.782-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63650 #65 (65 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.783-0400 m30999| 2015-07-09T14:14:25.782-0400 I SHARDING [conn299] distributed lock 'drop_database4/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.783-0400 m30998| 2015-07-09T14:14:25.782-0400 I SHARDING [conn298] distributed lock 'drop_database1/bs-osx108-8:30998:1436464535:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.784-0400 m30999| 2015-07-09T14:14:25.782-0400 I SHARDING [conn301] distributed lock 'drop_database2/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.784-0400 m30998| 2015-07-09T14:14:25.783-0400 I SHARDING [conn300] Placing [drop_database5] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.784-0400 m31200| 2015-07-09T14:14:25.783-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63651 #146 (89 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.785-0400 m30999| 2015-07-09T14:14:25.783-0400 I SHARDING [conn300] distributed lock 'drop_database8/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba01ca4787b9985d1d75 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.785-0400 m30998| 2015-07-09T14:14:25.785-0400 I SHARDING [conn299] distributed lock 'drop_database3/bs-osx108-8:30998:1436464535:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.786-0400 m30998| 2015-07-09T14:14:25.785-0400 I SHARDING [conn297] distributed lock 'drop_database9/bs-osx108-8:30998:1436464535:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.787-0400 m30998| 2015-07-09T14:14:25.786-0400 I SHARDING [conn301] Placing [drop_database7] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.787-0400 m30999| 2015-07-09T14:14:25.786-0400 I SHARDING [conn300] Placing [drop_database8] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.811-0400 m30999| 2015-07-09T14:14:25.810-0400 I COMMAND [conn297] DROP DATABASE: drop_database6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.812-0400 m30999| 2015-07-09T14:14:25.810-0400 I SHARDING [conn297] DBConfig::dropDatabase: drop_database6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.812-0400 m30999| 2015-07-09T14:14:25.810-0400 I SHARDING [conn297] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:25.810-0400-559eba01ca4787b9985d1d76", server: "bs-osx108-8", clientAddr: "127.0.0.1:63630", time: new Date(1436465665810), what: "dropDatabase.start", ns: "drop_database6", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.837-0400 m30999| 2015-07-09T14:14:25.836-0400 I COMMAND [conn298] DROP DATABASE: drop_database0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.837-0400 m30999| 2015-07-09T14:14:25.836-0400 I SHARDING [conn298] DBConfig::dropDatabase: drop_database0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.838-0400 m30999| 2015-07-09T14:14:25.836-0400 I SHARDING [conn298] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:25.836-0400-559eba01ca4787b9985d1d77", server: "bs-osx108-8", clientAddr: "127.0.0.1:63631", time: new Date(1436465665836), what: "dropDatabase.start", ns: "drop_database0", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.842-0400 m30998| 2015-07-09T14:14:25.841-0400 I SHARDING [conn300] distributed lock 'drop_database5/bs-osx108-8:30998:1436464535:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.843-0400 m30999| 2015-07-09T14:14:25.843-0400 I SHARDING [conn300] distributed lock 'drop_database8/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.844-0400 m30998| 2015-07-09T14:14:25.843-0400 I SHARDING [conn301] distributed lock 'drop_database7/bs-osx108-8:30998:1436464535:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.844-0400 m30998| 2015-07-09T14:14:25.844-0400 I COMMAND [conn298] DROP DATABASE: drop_database1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.845-0400 m30998| 2015-07-09T14:14:25.844-0400 I SHARDING [conn298] DBConfig::dropDatabase: drop_database1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.846-0400 m30998| 2015-07-09T14:14:25.845-0400 I SHARDING [conn298] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:25.845-0400-559eba010bd550bed3408aae", server: "bs-osx108-8", clientAddr: "127.0.0.1:63633", time: new Date(1436465665845), what: "dropDatabase.start", ns: "drop_database1", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.882-0400 m30999| 2015-07-09T14:14:25.880-0400 I COMMAND [conn299] DROP DATABASE: drop_database4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.882-0400 m30999| 2015-07-09T14:14:25.880-0400 I SHARDING [conn299] DBConfig::dropDatabase: drop_database4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.882-0400 m30999| 2015-07-09T14:14:25.880-0400 I SHARDING [conn299] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:25.880-0400-559eba01ca4787b9985d1d78", server: "bs-osx108-8", clientAddr: "127.0.0.1:63632", time: new Date(1436465665880), what: "dropDatabase.start", ns: "drop_database4", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.883-0400 m30999| 2015-07-09T14:14:25.883-0400 I COMMAND [conn301] DROP DATABASE: drop_database2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.883-0400 m30999| 2015-07-09T14:14:25.883-0400 I SHARDING [conn301] DBConfig::dropDatabase: drop_database2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.883-0400 m30999| 2015-07-09T14:14:25.883-0400 I SHARDING [conn301] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:25.883-0400-559eba01ca4787b9985d1d79", server: "bs-osx108-8", clientAddr: "127.0.0.1:63637", time: new Date(1436465665883), what: "dropDatabase.start", ns: "drop_database2", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.884-0400 m30998| 2015-07-09T14:14:25.883-0400 I COMMAND [conn299] DROP DATABASE: drop_database3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.884-0400 m30998| 2015-07-09T14:14:25.883-0400 I SHARDING [conn299] DBConfig::dropDatabase: drop_database3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.885-0400 m30998| 2015-07-09T14:14:25.884-0400 I SHARDING [conn299] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:25.884-0400-559eba010bd550bed3408aaf", server: "bs-osx108-8", clientAddr: "127.0.0.1:63634", time: new Date(1436465665884), what: "dropDatabase.start", ns: "drop_database3", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.889-0400 m30998| 2015-07-09T14:14:25.888-0400 I COMMAND [conn297] DROP DATABASE: drop_database9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.889-0400 m30998| 2015-07-09T14:14:25.888-0400 I SHARDING [conn297] DBConfig::dropDatabase: drop_database9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.889-0400 m30998| 2015-07-09T14:14:25.888-0400 I SHARDING [conn297] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:25.888-0400-559eba010bd550bed3408ab0", server: "bs-osx108-8", clientAddr: "127.0.0.1:63629", time: new Date(1436465665888), what: "dropDatabase.start", ns: "drop_database9", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.904-0400 m30999| 2015-07-09T14:14:25.903-0400 I SHARDING [conn297] DBConfig::dropDatabase: drop_database6 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.912-0400 m30999| 2015-07-09T14:14:25.911-0400 I SHARDING [conn298] DBConfig::dropDatabase: drop_database0 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.913-0400 m31100| 2015-07-09T14:14:25.912-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63652 #156 (99 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.927-0400 m30999| 2015-07-09T14:14:25.927-0400 I SHARDING [conn301] DBConfig::dropDatabase: drop_database2 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.928-0400 m31100| 2015-07-09T14:14:25.928-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63653 #157 (100 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.928-0400 m30998| 2015-07-09T14:14:25.928-0400 I SHARDING [conn298] DBConfig::dropDatabase: drop_database1 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.929-0400 m31100| 2015-07-09T14:14:25.929-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63654 #158 (101 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.938-0400 m30998| 2015-07-09T14:14:25.938-0400 I COMMAND [conn300] DROP DATABASE: drop_database5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.939-0400 m30998| 2015-07-09T14:14:25.938-0400 I SHARDING [conn300] DBConfig::dropDatabase: drop_database5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.939-0400 m30998| 2015-07-09T14:14:25.938-0400 I SHARDING [conn300] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:25.938-0400-559eba010bd550bed3408ab1", server: "bs-osx108-8", clientAddr: "127.0.0.1:63635", time: new Date(1436465665938), what: "dropDatabase.start", ns: "drop_database5", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.939-0400 m30998| 2015-07-09T14:14:25.938-0400 I SHARDING [conn299] DBConfig::dropDatabase: drop_database3 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.940-0400 m31100| 2015-07-09T14:14:25.939-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63655 #159 (102 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.953-0400 m31100| 2015-07-09T14:14:25.951-0400 I COMMAND [conn57] command drop_database8.coll48 command: create { create: "coll48" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 105ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.953-0400 m30999| 2015-07-09T14:14:25.951-0400 I SHARDING [conn299] DBConfig::dropDatabase: drop_database4 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.953-0400 m31100| 2015-07-09T14:14:25.953-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63656 #160 (103 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.955-0400 m30999| 2015-07-09T14:14:25.955-0400 I COMMAND [conn300] DROP DATABASE: drop_database8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.956-0400 m30999| 2015-07-09T14:14:25.955-0400 I SHARDING [conn300] DBConfig::dropDatabase: drop_database8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.956-0400 m30999| 2015-07-09T14:14:25.955-0400 I SHARDING [conn300] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:25.955-0400-559eba01ca4787b9985d1d7a", server: "bs-osx108-8", clientAddr: "127.0.0.1:63636", time: new Date(1436465665955), what: "dropDatabase.start", ns: "drop_database8", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.959-0400 m30998| 2015-07-09T14:14:25.959-0400 I SHARDING [conn297] DBConfig::dropDatabase: drop_database9 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.961-0400 m31100| 2015-07-09T14:14:25.961-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63657 #161 (104 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.969-0400 m31100| 2015-07-09T14:14:25.968-0400 I COMMAND [conn161] dropDatabase drop_database9 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.970-0400 m31100| 2015-07-09T14:14:25.968-0400 I COMMAND [conn58] command drop_database7.coll48 command: create { create: "coll48" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 121ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.972-0400 m30998| 2015-07-09T14:14:25.971-0400 I COMMAND [conn301] DROP DATABASE: drop_database7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.972-0400 m30998| 2015-07-09T14:14:25.972-0400 I SHARDING [conn301] DBConfig::dropDatabase: drop_database7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.973-0400 m30998| 2015-07-09T14:14:25.972-0400 I SHARDING [conn301] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:25.972-0400-559eba010bd550bed3408ab2", server: "bs-osx108-8", clientAddr: "127.0.0.1:63638", time: new Date(1436465665972), what: "dropDatabase.start", ns: "drop_database7", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.978-0400 m31100| 2015-07-09T14:14:25.978-0400 I COMMAND [conn161] dropDatabase drop_database9 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.978-0400 m31100| 2015-07-09T14:14:25.978-0400 I COMMAND [conn160] dropDatabase drop_database4 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.979-0400 m30998| 2015-07-09T14:14:25.978-0400 I SHARDING [conn297] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:25.978-0400-559eba010bd550bed3408ab3", server: "bs-osx108-8", clientAddr: "127.0.0.1:63629", time: new Date(1436465665978), what: "dropDatabase", ns: "drop_database9", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.981-0400 m30998| 2015-07-09T14:14:25.981-0400 I SHARDING [conn300] DBConfig::dropDatabase: drop_database5 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.982-0400 m30999| 2015-07-09T14:14:25.982-0400 I SHARDING [conn300] DBConfig::dropDatabase: drop_database8 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.984-0400 m31100| 2015-07-09T14:14:25.983-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63658 #162 (105 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.998-0400 m30998| 2015-07-09T14:14:25.997-0400 I SHARDING [conn301] DBConfig::dropDatabase: drop_database7 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.999-0400 m30998| 2015-07-09T14:14:25.998-0400 I SHARDING [conn297] distributed lock 'drop_database9/bs-osx108-8:30998:1436464535:16807' acquired, ts : 559eba010bd550bed3408ab4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.999-0400 m31100| 2015-07-09T14:14:25.998-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63659 #163 (106 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:25.999-0400 m31100| 2015-07-09T14:14:25.999-0400 I COMMAND [conn160] dropDatabase drop_database4 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.000-0400 m31100| 2015-07-09T14:14:25.999-0400 I COMMAND [conn162] dropDatabase drop_database8 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.000-0400 m30999| 2015-07-09T14:14:25.999-0400 I SHARDING [conn299] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:25.999-0400-559eba01ca4787b9985d1d7b", server: "bs-osx108-8", clientAddr: "127.0.0.1:63632", time: new Date(1436465665999), what: "dropDatabase", ns: "drop_database4", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.004-0400 m31100| 2015-07-09T14:14:26.003-0400 I COMMAND [conn162] dropDatabase drop_database8 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.004-0400 m31100| 2015-07-09T14:14:26.004-0400 I COMMAND [conn163] dropDatabase drop_database7 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.004-0400 m30999| 2015-07-09T14:14:26.004-0400 I SHARDING [conn300] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.004-0400-559eba02ca4787b9985d1d7c", server: "bs-osx108-8", clientAddr: "127.0.0.1:63636", time: new Date(1436465666004), what: "dropDatabase", ns: "drop_database8", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.005-0400 m31100| 2015-07-09T14:14:26.004-0400 I COMMAND [conn163] dropDatabase drop_database7 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.005-0400 m31100| 2015-07-09T14:14:26.005-0400 I COMMAND [conn161] dropDatabase drop_database5 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.007-0400 m30998| 2015-07-09T14:14:26.005-0400 I SHARDING [conn301] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.005-0400-559eba020bd550bed3408ab5", server: "bs-osx108-8", clientAddr: "127.0.0.1:63638", time: new Date(1436465666005), what: "dropDatabase", ns: "drop_database7", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.008-0400 m31100| 2015-07-09T14:14:26.008-0400 I COMMAND [conn161] dropDatabase drop_database5 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.008-0400 m31100| 2015-07-09T14:14:26.008-0400 I COMMAND [conn159] dropDatabase drop_database3 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.009-0400 m30998| 2015-07-09T14:14:26.008-0400 I SHARDING [conn300] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.008-0400-559eba020bd550bed3408ab6", server: "bs-osx108-8", clientAddr: "127.0.0.1:63635", time: new Date(1436465666008), what: "dropDatabase", ns: "drop_database5", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.010-0400 m31100| 2015-07-09T14:14:26.010-0400 I COMMAND [conn159] dropDatabase drop_database3 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.010-0400 m31100| 2015-07-09T14:14:26.010-0400 I COMMAND [conn158] dropDatabase drop_database1 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.011-0400 m30998| 2015-07-09T14:14:26.010-0400 I SHARDING [conn299] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.010-0400-559eba020bd550bed3408ab7", server: "bs-osx108-8", clientAddr: "127.0.0.1:63634", time: new Date(1436465666010), what: "dropDatabase", ns: "drop_database3", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.013-0400 m31100| 2015-07-09T14:14:26.013-0400 I COMMAND [conn158] dropDatabase drop_database1 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.013-0400 m31100| 2015-07-09T14:14:26.013-0400 I COMMAND [conn157] dropDatabase drop_database2 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.014-0400 m30998| 2015-07-09T14:14:26.013-0400 I SHARDING [conn298] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.013-0400-559eba020bd550bed3408ab8", server: "bs-osx108-8", clientAddr: "127.0.0.1:63633", time: new Date(1436465666013), what: "dropDatabase", ns: "drop_database1", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.015-0400 m29000| 2015-07-09T14:14:26.014-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63660 #66 (66 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.017-0400 m31100| 2015-07-09T14:14:26.017-0400 I COMMAND [conn157] dropDatabase drop_database2 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.017-0400 m31100| 2015-07-09T14:14:26.017-0400 I COMMAND [conn156] dropDatabase drop_database0 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.018-0400 m30999| 2015-07-09T14:14:26.017-0400 I SHARDING [conn301] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.017-0400-559eba02ca4787b9985d1d7d", server: "bs-osx108-8", clientAddr: "127.0.0.1:63637", time: new Date(1436465666017), what: "dropDatabase", ns: "drop_database2", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.021-0400 m31100| 2015-07-09T14:14:26.021-0400 I COMMAND [conn156] dropDatabase drop_database0 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.022-0400 m31100| 2015-07-09T14:14:26.021-0400 I COMMAND [conn156] command local.oplog.rs command: dropDatabase { dropDatabase: 1 } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:110 locks:{ Global: { acquireCount: { r: 3, w: 2, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 102666 } }, Database: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 107ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.022-0400 m31100| 2015-07-09T14:14:26.022-0400 I COMMAND [conn28] dropDatabase drop_database6 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.023-0400 m30999| 2015-07-09T14:14:26.022-0400 I SHARDING [conn298] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.022-0400-559eba02ca4787b9985d1d7e", server: "bs-osx108-8", clientAddr: "127.0.0.1:63631", time: new Date(1436465666022), what: "dropDatabase", ns: "drop_database0", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.027-0400 m31100| 2015-07-09T14:14:26.027-0400 I COMMAND [conn28] dropDatabase drop_database6 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.028-0400 m31100| 2015-07-09T14:14:26.027-0400 I COMMAND [conn28] command local.oplog.rs command: dropDatabase { dropDatabase: 1 } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:110 locks:{ Global: { acquireCount: { r: 3, w: 2, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 118175 } }, Database: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 123ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.029-0400 m30999| 2015-07-09T14:14:26.028-0400 I SHARDING [conn297] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.028-0400-559eba02ca4787b9985d1d7f", server: "bs-osx108-8", clientAddr: "127.0.0.1:63630", time: new Date(1436465666028), what: "dropDatabase", ns: "drop_database6", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.030-0400 m29000| 2015-07-09T14:14:26.029-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63661 #67 (67 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.031-0400 m30998| 2015-07-09T14:14:26.030-0400 I SHARDING [conn297] Placing [drop_database9] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.033-0400 m29000| 2015-07-09T14:14:26.032-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63662 #68 (68 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.082-0400 m30999| 2015-07-09T14:14:26.081-0400 I SHARDING [conn299] distributed lock 'drop_database4/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba02ca4787b9985d1d80 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.083-0400 m30998| 2015-07-09T14:14:26.082-0400 I SHARDING [conn297] distributed lock 'drop_database9/bs-osx108-8:30998:1436464535:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.084-0400 m30998| 2015-07-09T14:14:26.083-0400 I SHARDING [conn299] distributed lock 'drop_database3/bs-osx108-8:30998:1436464535:16807' acquired, ts : 559eba020bd550bed3408aba [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.087-0400 m30999| 2015-07-09T14:14:26.084-0400 I SHARDING [conn297] distributed lock 'drop_database6/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba02ca4787b9985d1d81 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.087-0400 m30998| 2015-07-09T14:14:26.085-0400 I SHARDING [conn298] distributed lock 'drop_database1/bs-osx108-8:30998:1436464535:16807' acquired, ts : 559eba020bd550bed3408ab9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.088-0400 m29000| 2015-07-09T14:14:26.087-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63663 #69 (69 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.089-0400 m30999| 2015-07-09T14:14:26.087-0400 I SHARDING [conn298] distributed lock 'drop_database0/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba02ca4787b9985d1d83 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.090-0400 m30999| 2015-07-09T14:14:26.087-0400 I SHARDING [conn301] distributed lock 'drop_database2/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba02ca4787b9985d1d84 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.090-0400 m30999| 2015-07-09T14:14:26.087-0400 I SHARDING [conn300] distributed lock 'drop_database8/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba02ca4787b9985d1d82 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.090-0400 m30999| 2015-07-09T14:14:26.089-0400 I SHARDING [conn299] Placing [drop_database4] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.091-0400 m30998| 2015-07-09T14:14:26.090-0400 I SHARDING [conn300] distributed lock 'drop_database5/bs-osx108-8:30998:1436464535:16807' acquired, ts : 559eba020bd550bed3408abb [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.091-0400 m30998| 2015-07-09T14:14:26.090-0400 I SHARDING [conn299] Placing [drop_database3] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.093-0400 m30998| 2015-07-09T14:14:26.092-0400 I SHARDING [conn301] distributed lock 'drop_database7/bs-osx108-8:30998:1436464535:16807' acquired, ts : 559eba020bd550bed3408abc [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.093-0400 m30999| 2015-07-09T14:14:26.092-0400 I SHARDING [conn297] Placing [drop_database6] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.095-0400 m30998| 2015-07-09T14:14:26.095-0400 I SHARDING [conn298] Placing [drop_database1] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.095-0400 m30999| 2015-07-09T14:14:26.095-0400 I SHARDING [conn301] Placing [drop_database2] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.096-0400 m31200| 2015-07-09T14:14:26.096-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63664 #147 (90 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.098-0400 m30998| 2015-07-09T14:14:26.098-0400 I SHARDING [conn301] Placing [drop_database7] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.099-0400 m30999| 2015-07-09T14:14:26.098-0400 I SHARDING [conn298] Placing [drop_database0] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.109-0400 m31101| 2015-07-09T14:14:26.109-0400 I COMMAND [repl writer worker 12] dropDatabase drop_database9 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.115-0400 m31101| 2015-07-09T14:14:26.114-0400 I COMMAND [repl writer worker 12] dropDatabase drop_database9 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.116-0400 m31101| 2015-07-09T14:14:26.115-0400 I COMMAND [repl writer worker 0] dropDatabase drop_database4 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.118-0400 m31102| 2015-07-09T14:14:26.118-0400 I COMMAND [repl writer worker 14] dropDatabase drop_database9 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.119-0400 m31102| 2015-07-09T14:14:26.119-0400 I COMMAND [repl writer worker 14] dropDatabase drop_database9 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.120-0400 m31102| 2015-07-09T14:14:26.120-0400 I COMMAND [repl writer worker 0] dropDatabase drop_database4 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.121-0400 m31101| 2015-07-09T14:14:26.121-0400 I COMMAND [repl writer worker 0] dropDatabase drop_database4 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.123-0400 m31101| 2015-07-09T14:14:26.122-0400 I COMMAND [repl writer worker 4] dropDatabase drop_database8 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.129-0400 m31102| 2015-07-09T14:14:26.129-0400 I COMMAND [repl writer worker 0] dropDatabase drop_database4 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.130-0400 m31101| 2015-07-09T14:14:26.129-0400 I COMMAND [repl writer worker 4] dropDatabase drop_database8 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.130-0400 m31102| 2015-07-09T14:14:26.130-0400 I COMMAND [repl writer worker 11] dropDatabase drop_database8 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.131-0400 m31101| 2015-07-09T14:14:26.130-0400 I COMMAND [repl writer worker 3] dropDatabase drop_database7 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.132-0400 m30999| 2015-07-09T14:14:26.132-0400 I SHARDING [conn300] Placing [drop_database8] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.133-0400 m30998| 2015-07-09T14:14:26.132-0400 I SHARDING [conn300] Placing [drop_database5] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.133-0400 m30998| 2015-07-09T14:14:26.133-0400 I COMMAND [conn297] DROP DATABASE: drop_database9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.134-0400 m30998| 2015-07-09T14:14:26.133-0400 I SHARDING [conn297] DBConfig::dropDatabase: drop_database9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.134-0400 m30998| 2015-07-09T14:14:26.133-0400 I SHARDING [conn297] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.133-0400-559eba020bd550bed3408abd", server: "bs-osx108-8", clientAddr: "127.0.0.1:63629", time: new Date(1436465666133), what: "dropDatabase.start", ns: "drop_database9", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.135-0400 m31101| 2015-07-09T14:14:26.133-0400 I COMMAND [repl writer worker 3] dropDatabase drop_database7 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.135-0400 m31102| 2015-07-09T14:14:26.134-0400 I COMMAND [repl writer worker 11] dropDatabase drop_database8 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.136-0400 m31101| 2015-07-09T14:14:26.134-0400 I COMMAND [repl writer worker 7] dropDatabase drop_database5 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.136-0400 m31102| 2015-07-09T14:14:26.135-0400 I COMMAND [repl writer worker 1] dropDatabase drop_database7 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.137-0400 m31102| 2015-07-09T14:14:26.136-0400 I COMMAND [repl writer worker 1] dropDatabase drop_database7 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.137-0400 m31102| 2015-07-09T14:14:26.136-0400 I COMMAND [repl writer worker 10] dropDatabase drop_database5 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.137-0400 m31102| 2015-07-09T14:14:26.137-0400 I COMMAND [repl writer worker 10] dropDatabase drop_database5 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.138-0400 m31101| 2015-07-09T14:14:26.137-0400 I COMMAND [repl writer worker 7] dropDatabase drop_database5 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.138-0400 m31102| 2015-07-09T14:14:26.138-0400 I COMMAND [repl writer worker 2] dropDatabase drop_database3 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.139-0400 m31101| 2015-07-09T14:14:26.138-0400 I COMMAND [repl writer worker 13] dropDatabase drop_database3 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.140-0400 m31101| 2015-07-09T14:14:26.140-0400 I COMMAND [repl writer worker 13] dropDatabase drop_database3 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.140-0400 m31101| 2015-07-09T14:14:26.140-0400 I COMMAND [repl writer worker 8] dropDatabase drop_database1 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.141-0400 m31101| 2015-07-09T14:14:26.141-0400 I COMMAND [repl writer worker 8] dropDatabase drop_database1 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.141-0400 m31101| 2015-07-09T14:14:26.141-0400 I COMMAND [repl writer worker 15] dropDatabase drop_database2 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.144-0400 m31102| 2015-07-09T14:14:26.142-0400 I COMMAND [repl writer worker 2] dropDatabase drop_database3 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.144-0400 m31102| 2015-07-09T14:14:26.143-0400 I COMMAND [repl writer worker 4] dropDatabase drop_database1 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.152-0400 m30998| 2015-07-09T14:14:26.152-0400 I SHARDING [conn299] distributed lock 'drop_database3/bs-osx108-8:30998:1436464535:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.157-0400 m31101| 2015-07-09T14:14:26.157-0400 I COMMAND [repl writer worker 15] dropDatabase drop_database2 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.158-0400 m31101| 2015-07-09T14:14:26.158-0400 I COMMAND [repl writer worker 9] dropDatabase drop_database0 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.163-0400 m31102| 2015-07-09T14:14:26.163-0400 I COMMAND [repl writer worker 4] dropDatabase drop_database1 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.164-0400 m31102| 2015-07-09T14:14:26.163-0400 I COMMAND [repl writer worker 5] dropDatabase drop_database2 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.167-0400 m30999| 2015-07-09T14:14:26.166-0400 I SHARDING [conn299] distributed lock 'drop_database4/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.167-0400 m30998| 2015-07-09T14:14:26.166-0400 I SHARDING [conn301] distributed lock 'drop_database7/bs-osx108-8:30998:1436464535:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.176-0400 m30998| 2015-07-09T14:14:26.175-0400 I SHARDING [conn300] distributed lock 'drop_database5/bs-osx108-8:30998:1436464535:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.177-0400 m30998| 2015-07-09T14:14:26.177-0400 I SHARDING [conn298] distributed lock 'drop_database1/bs-osx108-8:30998:1436464535:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.178-0400 m30999| 2015-07-09T14:14:26.177-0400 I SHARDING [conn300] distributed lock 'drop_database8/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.182-0400 m31101| 2015-07-09T14:14:26.180-0400 I COMMAND [repl writer worker 9] dropDatabase drop_database0 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.182-0400 m31101| 2015-07-09T14:14:26.180-0400 I COMMAND [repl writer worker 5] dropDatabase drop_database6 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.182-0400 m31102| 2015-07-09T14:14:26.181-0400 I COMMAND [repl writer worker 5] dropDatabase drop_database2 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.183-0400 m31102| 2015-07-09T14:14:26.183-0400 I COMMAND [repl writer worker 9] dropDatabase drop_database0 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.184-0400 m30998| 2015-07-09T14:14:26.183-0400 I SHARDING [conn297] DBConfig::dropDatabase: drop_database9 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.185-0400 m29000| 2015-07-09T14:14:26.185-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63665 #70 (70 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.186-0400 m30999| 2015-07-09T14:14:26.184-0400 I SHARDING [conn301] distributed lock 'drop_database2/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.191-0400 m31101| 2015-07-09T14:14:26.191-0400 I COMMAND [repl writer worker 5] dropDatabase drop_database6 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.192-0400 m30999| 2015-07-09T14:14:26.191-0400 I SHARDING [conn298] distributed lock 'drop_database0/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.192-0400 m30999| 2015-07-09T14:14:26.192-0400 I SHARDING [conn297] distributed lock 'drop_database6/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.194-0400 m31102| 2015-07-09T14:14:26.194-0400 I COMMAND [repl writer worker 9] dropDatabase drop_database0 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.196-0400 m31102| 2015-07-09T14:14:26.195-0400 I COMMAND [repl writer worker 8] dropDatabase drop_database6 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.203-0400 m31102| 2015-07-09T14:14:26.202-0400 I COMMAND [repl writer worker 8] dropDatabase drop_database6 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.221-0400 m30998| 2015-07-09T14:14:26.220-0400 I COMMAND [conn299] DROP DATABASE: drop_database3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.221-0400 m30998| 2015-07-09T14:14:26.220-0400 I SHARDING [conn299] DBConfig::dropDatabase: drop_database3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.221-0400 m30998| 2015-07-09T14:14:26.220-0400 I SHARDING [conn299] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.220-0400-559eba020bd550bed3408abe", server: "bs-osx108-8", clientAddr: "127.0.0.1:63634", time: new Date(1436465666220), what: "dropDatabase.start", ns: "drop_database3", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.242-0400 m30999| 2015-07-09T14:14:26.241-0400 I COMMAND [conn299] DROP DATABASE: drop_database4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.243-0400 m30999| 2015-07-09T14:14:26.241-0400 I SHARDING [conn299] DBConfig::dropDatabase: drop_database4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.243-0400 m30999| 2015-07-09T14:14:26.241-0400 I SHARDING [conn299] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.241-0400-559eba02ca4787b9985d1d85", server: "bs-osx108-8", clientAddr: "127.0.0.1:63632", time: new Date(1436465666241), what: "dropDatabase.start", ns: "drop_database4", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.246-0400 m30998| 2015-07-09T14:14:26.245-0400 I COMMAND [conn301] DROP DATABASE: drop_database7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.246-0400 m30998| 2015-07-09T14:14:26.245-0400 I SHARDING [conn301] DBConfig::dropDatabase: drop_database7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.247-0400 m30998| 2015-07-09T14:14:26.246-0400 I SHARDING [conn301] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.245-0400-559eba020bd550bed3408abf", server: "bs-osx108-8", clientAddr: "127.0.0.1:63638", time: new Date(1436465666246), what: "dropDatabase.start", ns: "drop_database7", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.251-0400 m30998| 2015-07-09T14:14:26.250-0400 I COMMAND [conn300] DROP DATABASE: drop_database5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.252-0400 m30998| 2015-07-09T14:14:26.250-0400 I SHARDING [conn300] DBConfig::dropDatabase: drop_database5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.253-0400 m30998| 2015-07-09T14:14:26.250-0400 I SHARDING [conn300] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.250-0400-559eba020bd550bed3408ac0", server: "bs-osx108-8", clientAddr: "127.0.0.1:63635", time: new Date(1436465666250), what: "dropDatabase.start", ns: "drop_database5", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.254-0400 m31100| 2015-07-09T14:14:26.253-0400 I COMMAND [conn158] dropDatabase drop_database9 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.255-0400 m30999| 2015-07-09T14:14:26.255-0400 I COMMAND [conn300] DROP DATABASE: drop_database8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.256-0400 m30999| 2015-07-09T14:14:26.255-0400 I SHARDING [conn300] DBConfig::dropDatabase: drop_database8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.256-0400 m31100| 2015-07-09T14:14:26.255-0400 I COMMAND [conn158] dropDatabase drop_database9 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.257-0400 m30999| 2015-07-09T14:14:26.255-0400 I SHARDING [conn300] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.255-0400-559eba02ca4787b9985d1d86", server: "bs-osx108-8", clientAddr: "127.0.0.1:63636", time: new Date(1436465666255), what: "dropDatabase.start", ns: "drop_database8", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.257-0400 m30998| 2015-07-09T14:14:26.256-0400 I SHARDING [conn297] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.256-0400-559eba020bd550bed3408ac1", server: "bs-osx108-8", clientAddr: "127.0.0.1:63629", time: new Date(1436465666256), what: "dropDatabase", ns: "drop_database9", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.307-0400 m30998| 2015-07-09T14:14:26.306-0400 I SHARDING [conn299] DBConfig::dropDatabase: drop_database3 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.325-0400 m30998| 2015-07-09T14:14:26.324-0400 I SHARDING [conn297] distributed lock 'drop_database9/bs-osx108-8:30998:1436464535:16807' acquired, ts : 559eba020bd550bed3408ac2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.327-0400 m31100| 2015-07-09T14:14:26.326-0400 I COMMAND [conn51] command drop_database2.coll48 command: create { create: "coll48" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 69566 } }, Database: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 139ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.328-0400 m31100| 2015-07-09T14:14:26.326-0400 I COMMAND [conn45] command drop_database6.coll48 command: create { create: "coll48" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 60594 } }, Database: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 131ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.328-0400 m30999| 2015-07-09T14:14:26.328-0400 I COMMAND [conn301] DROP DATABASE: drop_database2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.328-0400 m30999| 2015-07-09T14:14:26.328-0400 I SHARDING [conn301] DBConfig::dropDatabase: drop_database2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.329-0400 m30999| 2015-07-09T14:14:26.328-0400 I SHARDING [conn301] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.328-0400-559eba02ca4787b9985d1d87", server: "bs-osx108-8", clientAddr: "127.0.0.1:63637", time: new Date(1436465666328), what: "dropDatabase.start", ns: "drop_database2", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.330-0400 m30999| 2015-07-09T14:14:26.330-0400 I COMMAND [conn297] DROP DATABASE: drop_database6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.330-0400 m30999| 2015-07-09T14:14:26.330-0400 I SHARDING [conn297] DBConfig::dropDatabase: drop_database6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.331-0400 m30999| 2015-07-09T14:14:26.330-0400 I SHARDING [conn297] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.330-0400-559eba02ca4787b9985d1d88", server: "bs-osx108-8", clientAddr: "127.0.0.1:63630", time: new Date(1436465666330), what: "dropDatabase.start", ns: "drop_database6", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.336-0400 m30999| 2015-07-09T14:14:26.336-0400 I SHARDING [conn300] DBConfig::dropDatabase: drop_database8 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.337-0400 m30998| 2015-07-09T14:14:26.334-0400 I SHARDING [conn300] DBConfig::dropDatabase: drop_database5 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.344-0400 m30998| 2015-07-09T14:14:26.343-0400 I SHARDING [conn301] DBConfig::dropDatabase: drop_database7 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.344-0400 m30999| 2015-07-09T14:14:26.343-0400 I SHARDING [conn299] DBConfig::dropDatabase: drop_database4 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.345-0400 m30999| 2015-07-09T14:14:26.345-0400 I SHARDING [conn301] DBConfig::dropDatabase: drop_database2 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.345-0400 m30999| 2015-07-09T14:14:26.345-0400 I SHARDING [conn297] DBConfig::dropDatabase: drop_database6 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.352-0400 m31100| 2015-07-09T14:14:26.350-0400 I COMMAND [conn60] command drop_database0.coll48 command: create { create: "coll48" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 62080 } }, Database: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 156ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.353-0400 m31100| 2015-07-09T14:14:26.351-0400 I COMMAND [conn72] command drop_database1.coll48 command: create { create: "coll48" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 70994 } }, Database: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 166ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.353-0400 m31100| 2015-07-09T14:14:26.352-0400 I COMMAND [conn162] dropDatabase drop_database6 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.353-0400 m30999| 2015-07-09T14:14:26.352-0400 I COMMAND [conn298] DROP DATABASE: drop_database0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.354-0400 m30999| 2015-07-09T14:14:26.352-0400 I SHARDING [conn298] DBConfig::dropDatabase: drop_database0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.354-0400 m30999| 2015-07-09T14:14:26.352-0400 I SHARDING [conn298] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.352-0400-559eba02ca4787b9985d1d89", server: "bs-osx108-8", clientAddr: "127.0.0.1:63631", time: new Date(1436465666352), what: "dropDatabase.start", ns: "drop_database0", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.354-0400 m31100| 2015-07-09T14:14:26.353-0400 I COMMAND [conn162] dropDatabase drop_database6 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.355-0400 m31100| 2015-07-09T14:14:26.354-0400 I COMMAND [conn157] dropDatabase drop_database2 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.355-0400 m30999| 2015-07-09T14:14:26.354-0400 I SHARDING [conn297] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.354-0400-559eba02ca4787b9985d1d8a", server: "bs-osx108-8", clientAddr: "127.0.0.1:63630", time: new Date(1436465666354), what: "dropDatabase", ns: "drop_database6", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.356-0400 m30998| 2015-07-09T14:14:26.355-0400 I COMMAND [conn298] DROP DATABASE: drop_database1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.356-0400 m30998| 2015-07-09T14:14:26.355-0400 I SHARDING [conn298] DBConfig::dropDatabase: drop_database1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.356-0400 m30998| 2015-07-09T14:14:26.355-0400 I SHARDING [conn298] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.355-0400-559eba020bd550bed3408ac3", server: "bs-osx108-8", clientAddr: "127.0.0.1:63633", time: new Date(1436465666355), what: "dropDatabase.start", ns: "drop_database1", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.357-0400 m31100| 2015-07-09T14:14:26.357-0400 I COMMAND [conn157] dropDatabase drop_database2 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.359-0400 m31100| 2015-07-09T14:14:26.357-0400 I COMMAND [conn161] dropDatabase drop_database7 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.359-0400 m30999| 2015-07-09T14:14:26.358-0400 I SHARDING [conn301] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.358-0400-559eba02ca4787b9985d1d8b", server: "bs-osx108-8", clientAddr: "127.0.0.1:63637", time: new Date(1436465666358), what: "dropDatabase", ns: "drop_database2", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.364-0400 m31100| 2015-07-09T14:14:26.363-0400 I COMMAND [conn161] dropDatabase drop_database7 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.364-0400 m31100| 2015-07-09T14:14:26.363-0400 I COMMAND [conn156] dropDatabase drop_database4 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.365-0400 m30998| 2015-07-09T14:14:26.363-0400 I SHARDING [conn301] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.363-0400-559eba020bd550bed3408ac4", server: "bs-osx108-8", clientAddr: "127.0.0.1:63638", time: new Date(1436465666363), what: "dropDatabase", ns: "drop_database7", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.373-0400 m31102| 2015-07-09T14:14:26.372-0400 I COMMAND [repl writer worker 14] dropDatabase drop_database9 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.373-0400 m31100| 2015-07-09T14:14:26.372-0400 I COMMAND [conn156] dropDatabase drop_database4 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.374-0400 m31100| 2015-07-09T14:14:26.373-0400 I COMMAND [conn159] dropDatabase drop_database5 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.375-0400 m30999| 2015-07-09T14:14:26.373-0400 I SHARDING [conn299] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.373-0400-559eba02ca4787b9985d1d8c", server: "bs-osx108-8", clientAddr: "127.0.0.1:63632", time: new Date(1436465666373), what: "dropDatabase", ns: "drop_database4", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.375-0400 m31100| 2015-07-09T14:14:26.374-0400 I COMMAND [conn159] dropDatabase drop_database5 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.376-0400 m31100| 2015-07-09T14:14:26.375-0400 I COMMAND [conn28] dropDatabase drop_database8 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.376-0400 m30998| 2015-07-09T14:14:26.375-0400 I SHARDING [conn300] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.375-0400-559eba020bd550bed3408ac5", server: "bs-osx108-8", clientAddr: "127.0.0.1:63635", time: new Date(1436465666375), what: "dropDatabase", ns: "drop_database5", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.376-0400 m31101| 2015-07-09T14:14:26.375-0400 I COMMAND [repl writer worker 12] dropDatabase drop_database9 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.377-0400 m31102| 2015-07-09T14:14:26.375-0400 I COMMAND [repl writer worker 14] dropDatabase drop_database9 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.377-0400 m31100| 2015-07-09T14:14:26.376-0400 I COMMAND [conn28] dropDatabase drop_database8 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.377-0400 m31100| 2015-07-09T14:14:26.377-0400 I COMMAND [conn158] dropDatabase drop_database3 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.377-0400 m30999| 2015-07-09T14:14:26.377-0400 I SHARDING [conn300] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.377-0400-559eba02ca4787b9985d1d8d", server: "bs-osx108-8", clientAddr: "127.0.0.1:63636", time: new Date(1436465666377), what: "dropDatabase", ns: "drop_database8", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.378-0400 m31101| 2015-07-09T14:14:26.377-0400 I COMMAND [repl writer worker 12] dropDatabase drop_database9 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.379-0400 m31100| 2015-07-09T14:14:26.378-0400 I COMMAND [conn158] dropDatabase drop_database3 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.380-0400 m30998| 2015-07-09T14:14:26.378-0400 I SHARDING [conn299] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.378-0400-559eba020bd550bed3408ac6", server: "bs-osx108-8", clientAddr: "127.0.0.1:63634", time: new Date(1436465666378), what: "dropDatabase", ns: "drop_database3", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.383-0400 m30998| 2015-07-09T14:14:26.383-0400 I SHARDING [conn297] Placing [drop_database9] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.429-0400 m30999| 2015-07-09T14:14:26.429-0400 I SHARDING [conn298] DBConfig::dropDatabase: drop_database0 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.430-0400 m31100| 2015-07-09T14:14:26.429-0400 I COMMAND [conn28] dropDatabase drop_database0 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.438-0400 m31100| 2015-07-09T14:14:26.437-0400 I COMMAND [conn28] dropDatabase drop_database0 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.439-0400 m30999| 2015-07-09T14:14:26.438-0400 I SHARDING [conn298] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.438-0400-559eba02ca4787b9985d1d90", server: "bs-osx108-8", clientAddr: "127.0.0.1:63631", time: new Date(1436465666438), what: "dropDatabase", ns: "drop_database0", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.446-0400 m30998| 2015-07-09T14:14:26.443-0400 I SHARDING [conn298] DBConfig::dropDatabase: drop_database1 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.447-0400 m31100| 2015-07-09T14:14:26.444-0400 I COMMAND [conn158] dropDatabase drop_database1 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.448-0400 m30999| 2015-07-09T14:14:26.444-0400 I SHARDING [conn297] distributed lock 'drop_database6/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba02ca4787b9985d1d8e [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.448-0400 m31102| 2015-07-09T14:14:26.447-0400 I COMMAND [repl writer worker 2] dropDatabase drop_database6 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.449-0400 m30998| 2015-07-09T14:14:26.447-0400 I SHARDING [conn297] distributed lock 'drop_database9/bs-osx108-8:30998:1436464535:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.450-0400 m31102| 2015-07-09T14:14:26.449-0400 I COMMAND [repl writer worker 2] dropDatabase drop_database6 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.451-0400 m31100| 2015-07-09T14:14:26.449-0400 I COMMAND [conn158] dropDatabase drop_database1 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.452-0400 m31102| 2015-07-09T14:14:26.452-0400 I COMMAND [repl writer worker 4] dropDatabase drop_database2 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.452-0400 m30998| 2015-07-09T14:14:26.452-0400 I SHARDING [conn298] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.452-0400-559eba020bd550bed3408aca", server: "bs-osx108-8", clientAddr: "127.0.0.1:63633", time: new Date(1436465666452), what: "dropDatabase", ns: "drop_database1", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.455-0400 m30999| 2015-07-09T14:14:26.453-0400 I SHARDING [conn300] distributed lock 'drop_database8/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba02ca4787b9985d1d8f [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.455-0400 m30999| 2015-07-09T14:14:26.453-0400 I SHARDING [conn301] distributed lock 'drop_database2/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba02ca4787b9985d1d91 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.455-0400 m30999| 2015-07-09T14:14:26.454-0400 I SHARDING [conn299] distributed lock 'drop_database4/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba02ca4787b9985d1d92 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.455-0400 m30998| 2015-07-09T14:14:26.454-0400 I SHARDING [conn300] distributed lock 'drop_database5/bs-osx108-8:30998:1436464535:16807' acquired, ts : 559eba020bd550bed3408ac7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.456-0400 m30998| 2015-07-09T14:14:26.454-0400 I SHARDING [conn299] distributed lock 'drop_database3/bs-osx108-8:30998:1436464535:16807' acquired, ts : 559eba020bd550bed3408ac8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.456-0400 m30999| 2015-07-09T14:14:26.454-0400 I SHARDING [conn297] Placing [drop_database6] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.456-0400 m31102| 2015-07-09T14:14:26.456-0400 I COMMAND [repl writer worker 4] dropDatabase drop_database2 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.457-0400 m31102| 2015-07-09T14:14:26.456-0400 I COMMAND [repl writer worker 5] dropDatabase drop_database7 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.458-0400 m31102| 2015-07-09T14:14:26.458-0400 I COMMAND [repl writer worker 5] dropDatabase drop_database7 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.460-0400 m31102| 2015-07-09T14:14:26.459-0400 I COMMAND [repl writer worker 9] dropDatabase drop_database4 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.461-0400 m30998| 2015-07-09T14:14:26.461-0400 I SHARDING [conn301] distributed lock 'drop_database7/bs-osx108-8:30998:1436464535:16807' acquired, ts : 559eba020bd550bed3408ac9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.462-0400 m31100| 2015-07-09T14:14:26.462-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63666 #164 (107 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.464-0400 m30999| 2015-07-09T14:14:26.463-0400 I SHARDING [conn298] distributed lock 'drop_database0/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba02ca4787b9985d1d93 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.464-0400 m31102| 2015-07-09T14:14:26.464-0400 I COMMAND [repl writer worker 9] dropDatabase drop_database4 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.466-0400 m30999| 2015-07-09T14:14:26.465-0400 I SHARDING [conn297] distributed lock 'drop_database6/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.467-0400 m31102| 2015-07-09T14:14:26.465-0400 I COMMAND [repl writer worker 8] dropDatabase drop_database5 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.467-0400 m31100| 2015-07-09T14:14:26.466-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63667 #165 (108 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.468-0400 m30998| 2015-07-09T14:14:26.467-0400 I SHARDING [conn298] distributed lock 'drop_database1/bs-osx108-8:30998:1436464535:16807' acquired, ts : 559eba020bd550bed3408acb [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.469-0400 m31100| 2015-07-09T14:14:26.469-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63668 #166 (109 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.470-0400 m31102| 2015-07-09T14:14:26.469-0400 I COMMAND [repl writer worker 8] dropDatabase drop_database5 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.471-0400 m31102| 2015-07-09T14:14:26.470-0400 I COMMAND [repl writer worker 15] dropDatabase drop_database8 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.472-0400 m31101| 2015-07-09T14:14:26.470-0400 I COMMAND [repl writer worker 13] dropDatabase drop_database6 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.472-0400 m31101| 2015-07-09T14:14:26.472-0400 I COMMAND [repl writer worker 13] dropDatabase drop_database6 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.473-0400 m31101| 2015-07-09T14:14:26.473-0400 I COMMAND [repl writer worker 8] dropDatabase drop_database2 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.474-0400 m31102| 2015-07-09T14:14:26.473-0400 I COMMAND [repl writer worker 15] dropDatabase drop_database8 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.475-0400 m31102| 2015-07-09T14:14:26.474-0400 I COMMAND [repl writer worker 7] dropDatabase drop_database3 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.475-0400 m31101| 2015-07-09T14:14:26.474-0400 I COMMAND [repl writer worker 8] dropDatabase drop_database2 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.476-0400 m30998| 2015-07-09T14:14:26.475-0400 I COMMAND [conn297] DROP DATABASE: drop_database9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.476-0400 m30998| 2015-07-09T14:14:26.475-0400 I SHARDING [conn297] DBConfig::dropDatabase: drop_database9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.477-0400 m31101| 2015-07-09T14:14:26.475-0400 I COMMAND [repl writer worker 15] dropDatabase drop_database7 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.477-0400 m30998| 2015-07-09T14:14:26.475-0400 I SHARDING [conn297] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.475-0400-559eba020bd550bed3408acc", server: "bs-osx108-8", clientAddr: "127.0.0.1:63629", time: new Date(1436465666475), what: "dropDatabase.start", ns: "drop_database9", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.478-0400 m31102| 2015-07-09T14:14:26.477-0400 I COMMAND [repl writer worker 7] dropDatabase drop_database3 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.478-0400 m31101| 2015-07-09T14:14:26.477-0400 I COMMAND [repl writer worker 15] dropDatabase drop_database7 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.478-0400 m31102| 2015-07-09T14:14:26.478-0400 I COMMAND [repl writer worker 12] dropDatabase drop_database0 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.478-0400 m31101| 2015-07-09T14:14:26.478-0400 I COMMAND [repl writer worker 9] dropDatabase drop_database4 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.479-0400 m31102| 2015-07-09T14:14:26.479-0400 I COMMAND [repl writer worker 12] dropDatabase drop_database0 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.479-0400 m31101| 2015-07-09T14:14:26.479-0400 I COMMAND [repl writer worker 9] dropDatabase drop_database4 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.480-0400 m31102| 2015-07-09T14:14:26.479-0400 I COMMAND [repl writer worker 13] dropDatabase drop_database1 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.480-0400 m31101| 2015-07-09T14:14:26.480-0400 I COMMAND [repl writer worker 5] dropDatabase drop_database5 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.481-0400 m31101| 2015-07-09T14:14:26.481-0400 I COMMAND [repl writer worker 5] dropDatabase drop_database5 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.485-0400 m31102| 2015-07-09T14:14:26.484-0400 I COMMAND [repl writer worker 13] dropDatabase drop_database1 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.485-0400 m31101| 2015-07-09T14:14:26.484-0400 I COMMAND [repl writer worker 2] dropDatabase drop_database8 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.485-0400 m30999| 2015-07-09T14:14:26.484-0400 I COMMAND [conn297] DROP DATABASE: drop_database6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.485-0400 m30999| 2015-07-09T14:14:26.484-0400 I SHARDING [conn297] DBConfig::dropDatabase: drop_database6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.486-0400 m30999| 2015-07-09T14:14:26.484-0400 I SHARDING [conn297] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.484-0400-559eba02ca4787b9985d1d94", server: "bs-osx108-8", clientAddr: "127.0.0.1:63630", time: new Date(1436465666484), what: "dropDatabase.start", ns: "drop_database6", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.486-0400 m31101| 2015-07-09T14:14:26.485-0400 I COMMAND [repl writer worker 2] dropDatabase drop_database8 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.487-0400 m31101| 2015-07-09T14:14:26.486-0400 I COMMAND [repl writer worker 11] dropDatabase drop_database3 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.490-0400 m30999| 2015-07-09T14:14:26.490-0400 I SHARDING [conn301] Placing [drop_database2] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.491-0400 m31200| 2015-07-09T14:14:26.490-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63669 #148 (91 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.492-0400 m30999| 2015-07-09T14:14:26.490-0400 I SHARDING [conn299] Placing [drop_database4] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.492-0400 m30998| 2015-07-09T14:14:26.490-0400 I SHARDING [conn299] Placing [drop_database3] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.492-0400 m31101| 2015-07-09T14:14:26.491-0400 I COMMAND [repl writer worker 11] dropDatabase drop_database3 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.492-0400 m30998| 2015-07-09T14:14:26.491-0400 I SHARDING [conn300] Placing [drop_database5] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.493-0400 m30998| 2015-07-09T14:14:26.491-0400 I SHARDING [conn301] Placing [drop_database7] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.493-0400 m30999| 2015-07-09T14:14:26.492-0400 I SHARDING [conn298] Placing [drop_database0] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.493-0400 m31101| 2015-07-09T14:14:26.493-0400 I COMMAND [repl writer worker 1] dropDatabase drop_database0 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.493-0400 m30999| 2015-07-09T14:14:26.493-0400 I SHARDING [conn300] Placing [drop_database8] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.494-0400 m30998| 2015-07-09T14:14:26.493-0400 I SHARDING [conn298] Placing [drop_database1] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.495-0400 m31101| 2015-07-09T14:14:26.495-0400 I COMMAND [repl writer worker 1] dropDatabase drop_database0 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.496-0400 m31101| 2015-07-09T14:14:26.496-0400 I COMMAND [repl writer worker 10] dropDatabase drop_database1 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.497-0400 m31101| 2015-07-09T14:14:26.497-0400 I COMMAND [repl writer worker 10] dropDatabase drop_database1 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.540-0400 m30998| 2015-07-09T14:14:26.539-0400 I SHARDING [conn298] distributed lock 'drop_database1/bs-osx108-8:30998:1436464535:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.546-0400 m30999| 2015-07-09T14:14:26.545-0400 I SHARDING [conn301] distributed lock 'drop_database2/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.546-0400 m30998| 2015-07-09T14:14:26.545-0400 I SHARDING [conn297] DBConfig::dropDatabase: drop_database9 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.546-0400 m30999| 2015-07-09T14:14:26.545-0400 I SHARDING [conn297] DBConfig::dropDatabase: drop_database6 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.553-0400 m30999| 2015-07-09T14:14:26.552-0400 I SHARDING [conn299] distributed lock 'drop_database4/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.553-0400 m30998| 2015-07-09T14:14:26.553-0400 I SHARDING [conn300] distributed lock 'drop_database5/bs-osx108-8:30998:1436464535:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.554-0400 m30999| 2015-07-09T14:14:26.553-0400 I SHARDING [conn298] distributed lock 'drop_database0/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.554-0400 m30999| 2015-07-09T14:14:26.553-0400 I SHARDING [conn300] distributed lock 'drop_database8/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.554-0400 m30998| 2015-07-09T14:14:26.553-0400 I SHARDING [conn299] distributed lock 'drop_database3/bs-osx108-8:30998:1436464535:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.556-0400 m30998| 2015-07-09T14:14:26.556-0400 I SHARDING [conn301] distributed lock 'drop_database7/bs-osx108-8:30998:1436464535:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.561-0400 m31100| 2015-07-09T14:14:26.561-0400 I COMMAND [conn28] dropDatabase drop_database6 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.562-0400 m30998| 2015-07-09T14:14:26.562-0400 I COMMAND [conn298] DROP DATABASE: drop_database1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.562-0400 m30998| 2015-07-09T14:14:26.562-0400 I SHARDING [conn298] DBConfig::dropDatabase: drop_database1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.562-0400 m30998| 2015-07-09T14:14:26.562-0400 I SHARDING [conn298] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.562-0400-559eba020bd550bed3408acd", server: "bs-osx108-8", clientAddr: "127.0.0.1:63633", time: new Date(1436465666562), what: "dropDatabase.start", ns: "drop_database1", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.565-0400 m31100| 2015-07-09T14:14:26.565-0400 I COMMAND [conn28] dropDatabase drop_database6 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.566-0400 m31100| 2015-07-09T14:14:26.565-0400 I COMMAND [conn158] dropDatabase drop_database9 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.566-0400 m30999| 2015-07-09T14:14:26.565-0400 I SHARDING [conn297] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.565-0400-559eba02ca4787b9985d1d95", server: "bs-osx108-8", clientAddr: "127.0.0.1:63630", time: new Date(1436465666565), what: "dropDatabase", ns: "drop_database6", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.568-0400 m31100| 2015-07-09T14:14:26.568-0400 I COMMAND [conn158] dropDatabase drop_database9 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.569-0400 m30998| 2015-07-09T14:14:26.569-0400 I SHARDING [conn297] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.569-0400-559eba020bd550bed3408ace", server: "bs-osx108-8", clientAddr: "127.0.0.1:63629", time: new Date(1436465666569), what: "dropDatabase", ns: "drop_database9", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.596-0400 m31101| 2015-07-09T14:14:26.595-0400 I COMMAND [repl writer worker 0] dropDatabase drop_database6 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.596-0400 m31101| 2015-07-09T14:14:26.596-0400 I COMMAND [repl writer worker 0] dropDatabase drop_database6 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.597-0400 m31101| 2015-07-09T14:14:26.597-0400 I COMMAND [repl writer worker 4] dropDatabase drop_database9 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.601-0400 m31101| 2015-07-09T14:14:26.599-0400 I COMMAND [repl writer worker 4] dropDatabase drop_database9 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.606-0400 m31102| 2015-07-09T14:14:26.606-0400 I COMMAND [repl writer worker 0] dropDatabase drop_database6 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.607-0400 m31102| 2015-07-09T14:14:26.607-0400 I COMMAND [repl writer worker 0] dropDatabase drop_database6 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.608-0400 m31102| 2015-07-09T14:14:26.608-0400 I COMMAND [repl writer worker 11] dropDatabase drop_database9 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.611-0400 m31102| 2015-07-09T14:14:26.610-0400 I COMMAND [repl writer worker 11] dropDatabase drop_database9 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.634-0400 m30998| 2015-07-09T14:14:26.630-0400 I SHARDING [conn298] DBConfig::dropDatabase: drop_database1 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.634-0400 m30999| 2015-07-09T14:14:26.632-0400 I SHARDING [conn297] distributed lock 'drop_database6/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba02ca4787b9985d1d96 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.634-0400 m30998| 2015-07-09T14:14:26.633-0400 I SHARDING [conn297] distributed lock 'drop_database9/bs-osx108-8:30998:1436464535:16807' acquired, ts : 559eba020bd550bed3408acf [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.660-0400 m30999| 2015-07-09T14:14:26.659-0400 I COMMAND [conn298] DROP DATABASE: drop_database0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.660-0400 m30999| 2015-07-09T14:14:26.659-0400 I SHARDING [conn298] DBConfig::dropDatabase: drop_database0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.661-0400 m30999| 2015-07-09T14:14:26.659-0400 I SHARDING [conn298] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.659-0400-559eba02ca4787b9985d1d97", server: "bs-osx108-8", clientAddr: "127.0.0.1:63631", time: new Date(1436465666659), what: "dropDatabase.start", ns: "drop_database0", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.669-0400 m31100| 2015-07-09T14:14:26.668-0400 I COMMAND [conn60] command drop_database2.coll48 command: create { create: "coll48" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 22194 } }, Database: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 121ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.670-0400 m31100| 2015-07-09T14:14:26.668-0400 I COMMAND [conn46] command drop_database3.coll48 command: create { create: "coll48" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 11776 } }, Database: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 110ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.671-0400 m30999| 2015-07-09T14:14:26.670-0400 I COMMAND [conn301] DROP DATABASE: drop_database2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.671-0400 m30999| 2015-07-09T14:14:26.670-0400 I SHARDING [conn301] DBConfig::dropDatabase: drop_database2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.671-0400 m30999| 2015-07-09T14:14:26.670-0400 I SHARDING [conn301] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.670-0400-559eba02ca4787b9985d1d98", server: "bs-osx108-8", clientAddr: "127.0.0.1:63637", time: new Date(1436465666670), what: "dropDatabase.start", ns: "drop_database2", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.671-0400 m30998| 2015-07-09T14:14:26.670-0400 I COMMAND [conn299] DROP DATABASE: drop_database3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.672-0400 m30998| 2015-07-09T14:14:26.670-0400 I SHARDING [conn299] DBConfig::dropDatabase: drop_database3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.672-0400 m30998| 2015-07-09T14:14:26.670-0400 I SHARDING [conn299] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.670-0400-559eba020bd550bed3408ad0", server: "bs-osx108-8", clientAddr: "127.0.0.1:63634", time: new Date(1436465666670), what: "dropDatabase.start", ns: "drop_database3", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.676-0400 m31100| 2015-07-09T14:14:26.675-0400 I COMMAND [conn45] command drop_database4.coll48 command: create { create: "coll48" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 15115 } }, Database: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 121ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.677-0400 m30999| 2015-07-09T14:14:26.677-0400 I COMMAND [conn299] DROP DATABASE: drop_database4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.678-0400 m30999| 2015-07-09T14:14:26.677-0400 I SHARDING [conn299] DBConfig::dropDatabase: drop_database4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.678-0400 m30999| 2015-07-09T14:14:26.677-0400 I SHARDING [conn299] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.677-0400-559eba02ca4787b9985d1d99", server: "bs-osx108-8", clientAddr: "127.0.0.1:63632", time: new Date(1436465666677), what: "dropDatabase.start", ns: "drop_database4", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.694-0400 m31100| 2015-07-09T14:14:26.693-0400 I COMMAND [conn48] command drop_database5.coll48 command: create { create: "coll48" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 13856 } }, Database: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 137ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.696-0400 m30998| 2015-07-09T14:14:26.695-0400 I COMMAND [conn300] DROP DATABASE: drop_database5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.696-0400 m30998| 2015-07-09T14:14:26.695-0400 I SHARDING [conn300] DBConfig::dropDatabase: drop_database5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.696-0400 m30998| 2015-07-09T14:14:26.695-0400 I SHARDING [conn300] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.695-0400-559eba020bd550bed3408ad1", server: "bs-osx108-8", clientAddr: "127.0.0.1:63635", time: new Date(1436465666695), what: "dropDatabase.start", ns: "drop_database5", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.711-0400 m31100| 2015-07-09T14:14:26.701-0400 I COMMAND [conn20] command drop_database8.coll48 command: create { create: "coll48" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 11736 } }, Database: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 144ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.712-0400 m31100| 2015-07-09T14:14:26.703-0400 I COMMAND [conn158] dropDatabase drop_database1 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.712-0400 m31100| 2015-07-09T14:14:26.703-0400 I COMMAND [conn58] command drop_database7.coll48 command: create { create: "coll48" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 9324 } }, Database: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 143ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.713-0400 m30998| 2015-07-09T14:14:26.705-0400 I COMMAND [conn301] DROP DATABASE: drop_database7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.713-0400 m30999| 2015-07-09T14:14:26.705-0400 I COMMAND [conn300] DROP DATABASE: drop_database8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.713-0400 m30999| 2015-07-09T14:14:26.705-0400 I SHARDING [conn300] DBConfig::dropDatabase: drop_database8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.713-0400 m30999| 2015-07-09T14:14:26.705-0400 I SHARDING [conn300] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.705-0400-559eba02ca4787b9985d1d9a", server: "bs-osx108-8", clientAddr: "127.0.0.1:63636", time: new Date(1436465666705), what: "dropDatabase.start", ns: "drop_database8", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.715-0400 m30998| 2015-07-09T14:14:26.705-0400 I SHARDING [conn301] DBConfig::dropDatabase: drop_database7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.716-0400 m30998| 2015-07-09T14:14:26.705-0400 I SHARDING [conn301] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.705-0400-559eba020bd550bed3408ad2", server: "bs-osx108-8", clientAddr: "127.0.0.1:63638", time: new Date(1436465666705), what: "dropDatabase.start", ns: "drop_database7", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.716-0400 m31100| 2015-07-09T14:14:26.706-0400 I COMMAND [conn158] dropDatabase drop_database1 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.716-0400 m30998| 2015-07-09T14:14:26.708-0400 I SHARDING [conn298] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.708-0400-559eba020bd550bed3408ad3", server: "bs-osx108-8", clientAddr: "127.0.0.1:63633", time: new Date(1436465666708), what: "dropDatabase", ns: "drop_database1", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.716-0400 m30999| 2015-07-09T14:14:26.709-0400 I SHARDING [conn297] Placing [drop_database6] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.716-0400 m30998| 2015-07-09T14:14:26.713-0400 I SHARDING [conn297] Placing [drop_database9] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.722-0400 m30999| 2015-07-09T14:14:26.721-0400 I SHARDING [conn298] DBConfig::dropDatabase: drop_database0 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.722-0400 m30999| 2015-07-09T14:14:26.721-0400 I SHARDING [conn297] distributed lock 'drop_database6/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.727-0400 m30998| 2015-07-09T14:14:26.726-0400 I SHARDING [conn297] distributed lock 'drop_database9/bs-osx108-8:30998:1436464535:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.733-0400 m31100| 2015-07-09T14:14:26.733-0400 I COMMAND [conn28] dropDatabase drop_database0 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.744-0400 m31100| 2015-07-09T14:14:26.740-0400 I COMMAND [conn28] dropDatabase drop_database0 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.744-0400 m30999| 2015-07-09T14:14:26.743-0400 I SHARDING [conn298] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.743-0400-559eba02ca4787b9985d1d9b", server: "bs-osx108-8", clientAddr: "127.0.0.1:63631", time: new Date(1436465666743), what: "dropDatabase", ns: "drop_database0", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.751-0400 m30998| 2015-07-09T14:14:26.749-0400 I SHARDING [conn300] DBConfig::dropDatabase: drop_database5 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.751-0400 m30998| 2015-07-09T14:14:26.749-0400 I SHARDING [conn301] DBConfig::dropDatabase: drop_database7 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.760-0400 m30999| 2015-07-09T14:14:26.759-0400 I SHARDING [conn299] DBConfig::dropDatabase: drop_database4 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.763-0400 m30999| 2015-07-09T14:14:26.763-0400 I SHARDING [conn300] DBConfig::dropDatabase: drop_database8 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.766-0400 m30999| 2015-07-09T14:14:26.765-0400 I SHARDING [conn301] DBConfig::dropDatabase: drop_database2 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.767-0400 m30998| 2015-07-09T14:14:26.766-0400 I SHARDING [conn299] DBConfig::dropDatabase: drop_database3 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.773-0400 m30999| 2015-07-09T14:14:26.772-0400 I SHARDING [conn298] distributed lock 'drop_database0/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba02ca4787b9985d1d9c [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.774-0400 m30998| 2015-07-09T14:14:26.773-0400 I SHARDING [conn298] distributed lock 'drop_database1/bs-osx108-8:30998:1436464535:16807' acquired, ts : 559eba020bd550bed3408ad4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.790-0400 m30999| 2015-07-09T14:14:26.789-0400 I COMMAND [conn297] DROP DATABASE: drop_database6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.790-0400 m30999| 2015-07-09T14:14:26.789-0400 I SHARDING [conn297] DBConfig::dropDatabase: drop_database6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.791-0400 m30999| 2015-07-09T14:14:26.790-0400 I SHARDING [conn297] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.789-0400-559eba02ca4787b9985d1d9d", server: "bs-osx108-8", clientAddr: "127.0.0.1:63630", time: new Date(1436465666790), what: "dropDatabase.start", ns: "drop_database6", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.794-0400 m31100| 2015-07-09T14:14:26.793-0400 I COMMAND [conn161] dropDatabase drop_database3 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.795-0400 m31100| 2015-07-09T14:14:26.795-0400 I COMMAND [conn161] dropDatabase drop_database3 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.796-0400 m31100| 2015-07-09T14:14:26.795-0400 I COMMAND [conn157] dropDatabase drop_database2 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.796-0400 m30998| 2015-07-09T14:14:26.795-0400 I SHARDING [conn299] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.795-0400-559eba020bd550bed3408ad5", server: "bs-osx108-8", clientAddr: "127.0.0.1:63634", time: new Date(1436465666795), what: "dropDatabase", ns: "drop_database3", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.797-0400 m30998| 2015-07-09T14:14:26.796-0400 I COMMAND [conn297] DROP DATABASE: drop_database9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.797-0400 m30998| 2015-07-09T14:14:26.796-0400 I SHARDING [conn297] DBConfig::dropDatabase: drop_database9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.797-0400 m30998| 2015-07-09T14:14:26.796-0400 I SHARDING [conn297] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.796-0400-559eba020bd550bed3408ad6", server: "bs-osx108-8", clientAddr: "127.0.0.1:63629", time: new Date(1436465666796), what: "dropDatabase.start", ns: "drop_database9", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.798-0400 m31100| 2015-07-09T14:14:26.798-0400 I COMMAND [conn157] dropDatabase drop_database2 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.798-0400 m30999| 2015-07-09T14:14:26.798-0400 I SHARDING [conn301] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.798-0400-559eba02ca4787b9985d1d9e", server: "bs-osx108-8", clientAddr: "127.0.0.1:63637", time: new Date(1436465666798), what: "dropDatabase", ns: "drop_database2", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.799-0400 m31100| 2015-07-09T14:14:26.798-0400 I COMMAND [conn156] dropDatabase drop_database8 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.800-0400 m31100| 2015-07-09T14:14:26.799-0400 I COMMAND [conn156] dropDatabase drop_database8 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.802-0400 m31100| 2015-07-09T14:14:26.800-0400 I COMMAND [conn28] dropDatabase drop_database4 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.803-0400 m31100| 2015-07-09T14:14:26.801-0400 I COMMAND [conn28] dropDatabase drop_database4 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.803-0400 m30999| 2015-07-09T14:14:26.801-0400 I SHARDING [conn300] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.801-0400-559eba02ca4787b9985d1d9f", server: "bs-osx108-8", clientAddr: "127.0.0.1:63636", time: new Date(1436465666801), what: "dropDatabase", ns: "drop_database8", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.803-0400 m30999| 2015-07-09T14:14:26.801-0400 I SHARDING [conn299] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.801-0400-559eba02ca4787b9985d1da0", server: "bs-osx108-8", clientAddr: "127.0.0.1:63632", time: new Date(1436465666801), what: "dropDatabase", ns: "drop_database4", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.804-0400 m31100| 2015-07-09T14:14:26.801-0400 I COMMAND [conn159] dropDatabase drop_database7 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.810-0400 m31100| 2015-07-09T14:14:26.809-0400 I COMMAND [conn159] dropDatabase drop_database7 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.810-0400 m31100| 2015-07-09T14:14:26.809-0400 I COMMAND [conn158] dropDatabase drop_database5 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.811-0400 m30998| 2015-07-09T14:14:26.809-0400 I SHARDING [conn301] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.809-0400-559eba020bd550bed3408ad7", server: "bs-osx108-8", clientAddr: "127.0.0.1:63638", time: new Date(1436465666809), what: "dropDatabase", ns: "drop_database7", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.815-0400 m31100| 2015-07-09T14:14:26.814-0400 I COMMAND [conn158] dropDatabase drop_database5 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.817-0400 m30998| 2015-07-09T14:14:26.816-0400 I SHARDING [conn300] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.815-0400-559eba020bd550bed3408ad8", server: "bs-osx108-8", clientAddr: "127.0.0.1:63635", time: new Date(1436465666816), what: "dropDatabase", ns: "drop_database5", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.822-0400 m30998| 2015-07-09T14:14:26.821-0400 I SHARDING [conn298] Placing [drop_database1] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.823-0400 m30999| 2015-07-09T14:14:26.823-0400 I SHARDING [conn298] Placing [drop_database0] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.839-0400 m31102| 2015-07-09T14:14:26.839-0400 I COMMAND [repl writer worker 15] dropDatabase drop_database1 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.843-0400 m31102| 2015-07-09T14:14:26.842-0400 I COMMAND [repl writer worker 15] dropDatabase drop_database1 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.844-0400 m31102| 2015-07-09T14:14:26.843-0400 I COMMAND [repl writer worker 7] dropDatabase drop_database0 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.846-0400 m31102| 2015-07-09T14:14:26.845-0400 I COMMAND [repl writer worker 7] dropDatabase drop_database0 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.867-0400 m30999| 2015-07-09T14:14:26.865-0400 I SHARDING [conn298] distributed lock 'drop_database0/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.868-0400 m31101| 2015-07-09T14:14:26.868-0400 I COMMAND [repl writer worker 2] dropDatabase drop_database1 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.869-0400 m30998| 2015-07-09T14:14:26.869-0400 I SHARDING [conn297] DBConfig::dropDatabase: drop_database9 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.875-0400 m31101| 2015-07-09T14:14:26.872-0400 I COMMAND [repl writer worker 2] dropDatabase drop_database1 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.875-0400 m31101| 2015-07-09T14:14:26.874-0400 I COMMAND [repl writer worker 11] dropDatabase drop_database0 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.877-0400 m30998| 2015-07-09T14:14:26.876-0400 I SHARDING [conn298] distributed lock 'drop_database1/bs-osx108-8:30998:1436464535:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.878-0400 m30999| 2015-07-09T14:14:26.877-0400 I SHARDING [conn299] distributed lock 'drop_database4/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba02ca4787b9985d1da1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.882-0400 m31101| 2015-07-09T14:14:26.881-0400 I COMMAND [repl writer worker 11] dropDatabase drop_database0 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.885-0400 m30999| 2015-07-09T14:14:26.883-0400 I SHARDING [conn297] DBConfig::dropDatabase: drop_database6 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.890-0400 m31100| 2015-07-09T14:14:26.888-0400 I COMMAND [conn28] dropDatabase drop_database6 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.891-0400 m30999| 2015-07-09T14:14:26.889-0400 I SHARDING [conn301] distributed lock 'drop_database2/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba02ca4787b9985d1da3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.891-0400 m30998| 2015-07-09T14:14:26.889-0400 I SHARDING [conn300] distributed lock 'drop_database5/bs-osx108-8:30998:1436464535:16807' acquired, ts : 559eba020bd550bed3408adb [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.891-0400 m30998| 2015-07-09T14:14:26.890-0400 I SHARDING [conn301] distributed lock 'drop_database7/bs-osx108-8:30998:1436464535:16807' acquired, ts : 559eba020bd550bed3408ada [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.894-0400 m30999| 2015-07-09T14:14:26.891-0400 I SHARDING [conn300] distributed lock 'drop_database8/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba02ca4787b9985d1da2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.895-0400 m31102| 2015-07-09T14:14:26.893-0400 I COMMAND [repl writer worker 6] dropDatabase drop_database3 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.896-0400 m30999| 2015-07-09T14:14:26.894-0400 I COMMAND [conn298] DROP DATABASE: drop_database0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.896-0400 m30998| 2015-07-09T14:14:26.894-0400 I SHARDING [conn299] distributed lock 'drop_database3/bs-osx108-8:30998:1436464535:16807' acquired, ts : 559eba020bd550bed3408ad9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.896-0400 m30999| 2015-07-09T14:14:26.894-0400 I SHARDING [conn298] DBConfig::dropDatabase: drop_database0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.899-0400 m30999| 2015-07-09T14:14:26.894-0400 I SHARDING [conn298] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.894-0400-559eba02ca4787b9985d1da4", server: "bs-osx108-8", clientAddr: "127.0.0.1:63631", time: new Date(1436465666894), what: "dropDatabase.start", ns: "drop_database0", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.899-0400 m31102| 2015-07-09T14:14:26.895-0400 I COMMAND [repl writer worker 6] dropDatabase drop_database3 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.899-0400 m31102| 2015-07-09T14:14:26.896-0400 I COMMAND [repl writer worker 3] dropDatabase drop_database2 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.899-0400 m31100| 2015-07-09T14:14:26.897-0400 I COMMAND [conn28] dropDatabase drop_database6 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.899-0400 m31100| 2015-07-09T14:14:26.897-0400 I COMMAND [conn158] dropDatabase drop_database9 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.900-0400 m30999| 2015-07-09T14:14:26.897-0400 I SHARDING [conn297] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.897-0400-559eba02ca4787b9985d1da5", server: "bs-osx108-8", clientAddr: "127.0.0.1:63630", time: new Date(1436465666897), what: "dropDatabase", ns: "drop_database6", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.900-0400 m31102| 2015-07-09T14:14:26.898-0400 I COMMAND [repl writer worker 3] dropDatabase drop_database2 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.900-0400 m31102| 2015-07-09T14:14:26.899-0400 I COMMAND [repl writer worker 14] dropDatabase drop_database8 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.900-0400 m31100| 2015-07-09T14:14:26.900-0400 I COMMAND [conn158] dropDatabase drop_database9 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.901-0400 m31102| 2015-07-09T14:14:26.900-0400 I COMMAND [repl writer worker 14] dropDatabase drop_database8 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.902-0400 m31102| 2015-07-09T14:14:26.901-0400 I COMMAND [repl writer worker 0] dropDatabase drop_database4 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.902-0400 m30998| 2015-07-09T14:14:26.901-0400 I SHARDING [conn297] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.901-0400-559eba020bd550bed3408adc", server: "bs-osx108-8", clientAddr: "127.0.0.1:63629", time: new Date(1436465666901), what: "dropDatabase", ns: "drop_database9", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.907-0400 m31102| 2015-07-09T14:14:26.906-0400 I COMMAND [repl writer worker 0] dropDatabase drop_database4 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.908-0400 m31102| 2015-07-09T14:14:26.907-0400 I COMMAND [repl writer worker 11] dropDatabase drop_database7 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.910-0400 m31102| 2015-07-09T14:14:26.908-0400 I COMMAND [repl writer worker 11] dropDatabase drop_database7 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.910-0400 m31102| 2015-07-09T14:14:26.909-0400 I COMMAND [repl writer worker 1] dropDatabase drop_database5 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.910-0400 m31101| 2015-07-09T14:14:26.910-0400 I COMMAND [repl writer worker 6] dropDatabase drop_database3 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.912-0400 m31101| 2015-07-09T14:14:26.912-0400 I COMMAND [repl writer worker 6] dropDatabase drop_database3 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.913-0400 m31101| 2015-07-09T14:14:26.912-0400 I COMMAND [repl writer worker 14] dropDatabase drop_database2 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.913-0400 m31102| 2015-07-09T14:14:26.913-0400 I COMMAND [repl writer worker 1] dropDatabase drop_database5 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.917-0400 m31101| 2015-07-09T14:14:26.915-0400 I COMMAND [repl writer worker 14] dropDatabase drop_database2 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.917-0400 m31101| 2015-07-09T14:14:26.916-0400 I COMMAND [repl writer worker 12] dropDatabase drop_database8 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.919-0400 m31101| 2015-07-09T14:14:26.918-0400 I COMMAND [repl writer worker 12] dropDatabase drop_database8 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.921-0400 m31101| 2015-07-09T14:14:26.920-0400 I COMMAND [repl writer worker 0] dropDatabase drop_database4 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.922-0400 m30998| 2015-07-09T14:14:26.921-0400 I COMMAND [conn298] DROP DATABASE: drop_database1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.922-0400 m30998| 2015-07-09T14:14:26.921-0400 I SHARDING [conn298] DBConfig::dropDatabase: drop_database1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.923-0400 m30998| 2015-07-09T14:14:26.921-0400 I SHARDING [conn298] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.921-0400-559eba020bd550bed3408add", server: "bs-osx108-8", clientAddr: "127.0.0.1:63633", time: new Date(1436465666921), what: "dropDatabase.start", ns: "drop_database1", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.924-0400 m30999| 2015-07-09T14:14:26.924-0400 I SHARDING [conn299] Placing [drop_database4] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.925-0400 m30998| 2015-07-09T14:14:26.925-0400 I SHARDING [conn300] Placing [drop_database5] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.926-0400 m31200| 2015-07-09T14:14:26.925-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63670 #149 (92 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.926-0400 m30999| 2015-07-09T14:14:26.925-0400 I SHARDING [conn300] Placing [drop_database8] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.926-0400 m30998| 2015-07-09T14:14:26.926-0400 I SHARDING [conn299] Placing [drop_database3] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.927-0400 m30999| 2015-07-09T14:14:26.926-0400 I SHARDING [conn301] Placing [drop_database2] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.927-0400 m31101| 2015-07-09T14:14:26.926-0400 I COMMAND [repl writer worker 0] dropDatabase drop_database4 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.927-0400 m30998| 2015-07-09T14:14:26.926-0400 I SHARDING [conn301] Placing [drop_database7] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.927-0400 m31101| 2015-07-09T14:14:26.927-0400 I COMMAND [repl writer worker 4] dropDatabase drop_database7 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.929-0400 m31101| 2015-07-09T14:14:26.929-0400 I COMMAND [repl writer worker 4] dropDatabase drop_database7 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.930-0400 m31101| 2015-07-09T14:14:26.930-0400 I COMMAND [repl writer worker 3] dropDatabase drop_database5 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.931-0400 m31101| 2015-07-09T14:14:26.931-0400 I COMMAND [repl writer worker 3] dropDatabase drop_database5 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.932-0400 m31102| 2015-07-09T14:14:26.932-0400 I COMMAND [repl writer worker 2] dropDatabase drop_database6 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.936-0400 m31102| 2015-07-09T14:14:26.935-0400 I COMMAND [repl writer worker 2] dropDatabase drop_database6 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.936-0400 m31102| 2015-07-09T14:14:26.936-0400 I COMMAND [repl writer worker 4] dropDatabase drop_database9 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.939-0400 m31102| 2015-07-09T14:14:26.938-0400 I COMMAND [repl writer worker 4] dropDatabase drop_database9 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.942-0400 m31101| 2015-07-09T14:14:26.941-0400 I COMMAND [repl writer worker 13] dropDatabase drop_database6 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.943-0400 m31101| 2015-07-09T14:14:26.943-0400 I COMMAND [repl writer worker 13] dropDatabase drop_database6 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.943-0400 m31101| 2015-07-09T14:14:26.943-0400 I COMMAND [repl writer worker 8] dropDatabase drop_database9 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.947-0400 m31101| 2015-07-09T14:14:26.946-0400 I COMMAND [repl writer worker 8] dropDatabase drop_database9 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.975-0400 m30999| 2015-07-09T14:14:26.974-0400 I SHARDING [conn298] DBConfig::dropDatabase: drop_database0 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.976-0400 m30998| 2015-07-09T14:14:26.975-0400 I SHARDING [conn300] distributed lock 'drop_database5/bs-osx108-8:30998:1436464535:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.977-0400 m31100| 2015-07-09T14:14:26.975-0400 I COMMAND [conn28] dropDatabase drop_database0 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.982-0400 m30999| 2015-07-09T14:14:26.979-0400 I SHARDING [conn297] distributed lock 'drop_database6/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba02ca4787b9985d1da6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.984-0400 m30998| 2015-07-09T14:14:26.979-0400 I SHARDING [conn297] distributed lock 'drop_database9/bs-osx108-8:30998:1436464535:16807' acquired, ts : 559eba020bd550bed3408ade [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.984-0400 m31100| 2015-07-09T14:14:26.980-0400 I COMMAND [conn28] dropDatabase drop_database0 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.985-0400 m30999| 2015-07-09T14:14:26.980-0400 I SHARDING [conn298] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:26.980-0400-559eba02ca4787b9985d1da7", server: "bs-osx108-8", clientAddr: "127.0.0.1:63631", time: new Date(1436465666980), what: "dropDatabase", ns: "drop_database0", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.985-0400 m31102| 2015-07-09T14:14:26.981-0400 I COMMAND [repl writer worker 9] dropDatabase drop_database0 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.987-0400 m31101| 2015-07-09T14:14:26.981-0400 I COMMAND [repl writer worker 9] dropDatabase drop_database0 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.987-0400 m30999| 2015-07-09T14:14:26.983-0400 I SHARDING [conn299] distributed lock 'drop_database4/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.988-0400 m31101| 2015-07-09T14:14:26.983-0400 I COMMAND [repl writer worker 9] dropDatabase drop_database0 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.988-0400 m30999| 2015-07-09T14:14:26.984-0400 I SHARDING [conn300] distributed lock 'drop_database8/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.988-0400 m30998| 2015-07-09T14:14:26.986-0400 I SHARDING [conn301] distributed lock 'drop_database7/bs-osx108-8:30998:1436464535:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:26.988-0400 m31102| 2015-07-09T14:14:26.988-0400 I COMMAND [repl writer worker 9] dropDatabase drop_database0 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.002-0400 m30999| 2015-07-09T14:14:27.001-0400 I SHARDING [conn301] distributed lock 'drop_database2/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.006-0400 m30998| 2015-07-09T14:14:27.006-0400 I SHARDING [conn298] DBConfig::dropDatabase: drop_database1 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.007-0400 m30999| 2015-07-09T14:14:27.006-0400 I SHARDING [conn298] distributed lock 'drop_database0/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba02ca4787b9985d1da8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.007-0400 m30998| 2015-07-09T14:14:27.006-0400 I SHARDING [conn299] distributed lock 'drop_database3/bs-osx108-8:30998:1436464535:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.028-0400 m30998| 2015-07-09T14:14:27.028-0400 I COMMAND [conn300] DROP DATABASE: drop_database5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.028-0400 m30998| 2015-07-09T14:14:27.028-0400 I SHARDING [conn300] DBConfig::dropDatabase: drop_database5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.029-0400 m30998| 2015-07-09T14:14:27.028-0400 I SHARDING [conn300] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.028-0400-559eba030bd550bed3408adf", server: "bs-osx108-8", clientAddr: "127.0.0.1:63635", time: new Date(1436465667028), what: "dropDatabase.start", ns: "drop_database5", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.036-0400 m30999| 2015-07-09T14:14:27.035-0400 I COMMAND [conn299] DROP DATABASE: drop_database4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.036-0400 m30999| 2015-07-09T14:14:27.035-0400 I SHARDING [conn299] DBConfig::dropDatabase: drop_database4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.037-0400 m30999| 2015-07-09T14:14:27.035-0400 I SHARDING [conn299] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.035-0400-559eba03ca4787b9985d1da9", server: "bs-osx108-8", clientAddr: "127.0.0.1:63632", time: new Date(1436465667035), what: "dropDatabase.start", ns: "drop_database4", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.039-0400 m30999| 2015-07-09T14:14:27.038-0400 I SHARDING [conn297] Placing [drop_database6] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.040-0400 m30999| 2015-07-09T14:14:27.040-0400 I COMMAND [conn300] DROP DATABASE: drop_database8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.041-0400 m30999| 2015-07-09T14:14:27.040-0400 I SHARDING [conn300] DBConfig::dropDatabase: drop_database8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.041-0400 m30999| 2015-07-09T14:14:27.040-0400 I SHARDING [conn300] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.040-0400-559eba03ca4787b9985d1daa", server: "bs-osx108-8", clientAddr: "127.0.0.1:63636", time: new Date(1436465667040), what: "dropDatabase.start", ns: "drop_database8", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.055-0400 m30998| 2015-07-09T14:14:27.054-0400 I COMMAND [conn301] DROP DATABASE: drop_database7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.055-0400 m30998| 2015-07-09T14:14:27.054-0400 I SHARDING [conn301] DBConfig::dropDatabase: drop_database7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.056-0400 m30998| 2015-07-09T14:14:27.054-0400 I SHARDING [conn301] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.054-0400-559eba030bd550bed3408ae0", server: "bs-osx108-8", clientAddr: "127.0.0.1:63638", time: new Date(1436465667054), what: "dropDatabase.start", ns: "drop_database7", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.057-0400 m30999| 2015-07-09T14:14:27.055-0400 I COMMAND [conn301] DROP DATABASE: drop_database2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.057-0400 m30999| 2015-07-09T14:14:27.055-0400 I SHARDING [conn301] DBConfig::dropDatabase: drop_database2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.057-0400 m30999| 2015-07-09T14:14:27.055-0400 I SHARDING [conn301] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.055-0400-559eba03ca4787b9985d1dab", server: "bs-osx108-8", clientAddr: "127.0.0.1:63637", time: new Date(1436465667055), what: "dropDatabase.start", ns: "drop_database2", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.076-0400 m31100| 2015-07-09T14:14:27.075-0400 I COMMAND [conn158] dropDatabase drop_database1 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.077-0400 m30998| 2015-07-09T14:14:27.076-0400 I SHARDING [conn297] Placing [drop_database9] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.078-0400 m30998| 2015-07-09T14:14:27.078-0400 I COMMAND [conn299] DROP DATABASE: drop_database3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.079-0400 m30998| 2015-07-09T14:14:27.078-0400 I SHARDING [conn299] DBConfig::dropDatabase: drop_database3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.079-0400 m30998| 2015-07-09T14:14:27.078-0400 I SHARDING [conn299] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.078-0400-559eba030bd550bed3408ae1", server: "bs-osx108-8", clientAddr: "127.0.0.1:63634", time: new Date(1436465667078), what: "dropDatabase.start", ns: "drop_database3", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.081-0400 m31100| 2015-07-09T14:14:27.079-0400 I COMMAND [conn158] dropDatabase drop_database1 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.083-0400 m30998| 2015-07-09T14:14:27.080-0400 I SHARDING [conn298] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.080-0400-559eba030bd550bed3408ae2", server: "bs-osx108-8", clientAddr: "127.0.0.1:63633", time: new Date(1436465667080), what: "dropDatabase", ns: "drop_database1", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.084-0400 m30999| 2015-07-09T14:14:27.084-0400 I SHARDING [conn298] Placing [drop_database0] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.113-0400 m30998| 2015-07-09T14:14:27.112-0400 I SHARDING [conn300] DBConfig::dropDatabase: drop_database5 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.113-0400 m31100| 2015-07-09T14:14:27.112-0400 I COMMAND [conn158] dropDatabase drop_database5 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.115-0400 m30998| 2015-07-09T14:14:27.112-0400 I SHARDING [conn297] distributed lock 'drop_database9/bs-osx108-8:30998:1436464535:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.115-0400 m31100| 2015-07-09T14:14:27.114-0400 I COMMAND [conn158] dropDatabase drop_database5 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.116-0400 m30998| 2015-07-09T14:14:27.115-0400 I SHARDING [conn300] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.115-0400-559eba030bd550bed3408ae3", server: "bs-osx108-8", clientAddr: "127.0.0.1:63635", time: new Date(1436465667115), what: "dropDatabase", ns: "drop_database5", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.120-0400 m30999| 2015-07-09T14:14:27.118-0400 I SHARDING [conn297] distributed lock 'drop_database6/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.121-0400 m30999| 2015-07-09T14:14:27.120-0400 I SHARDING [conn301] DBConfig::dropDatabase: drop_database2 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.121-0400 m30999| 2015-07-09T14:14:27.121-0400 I SHARDING [conn300] DBConfig::dropDatabase: drop_database8 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.126-0400 m30998| 2015-07-09T14:14:27.125-0400 I SHARDING [conn299] DBConfig::dropDatabase: drop_database3 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.127-0400 m30999| 2015-07-09T14:14:27.127-0400 I SHARDING [conn299] DBConfig::dropDatabase: drop_database4 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.128-0400 m30999| 2015-07-09T14:14:27.127-0400 I SHARDING [conn298] distributed lock 'drop_database0/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.139-0400 m30998| 2015-07-09T14:14:27.138-0400 I SHARDING [conn300] distributed lock 'drop_database5/bs-osx108-8:30998:1436464535:16807' acquired, ts : 559eba030bd550bed3408ae5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.143-0400 m30998| 2015-07-09T14:14:27.143-0400 I SHARDING [conn301] DBConfig::dropDatabase: drop_database7 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.145-0400 m30998| 2015-07-09T14:14:27.144-0400 I COMMAND [conn297] DROP DATABASE: drop_database9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.145-0400 m30998| 2015-07-09T14:14:27.144-0400 I SHARDING [conn297] DBConfig::dropDatabase: drop_database9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.146-0400 m30998| 2015-07-09T14:14:27.144-0400 I SHARDING [conn297] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.144-0400-559eba030bd550bed3408ae6", server: "bs-osx108-8", clientAddr: "127.0.0.1:63629", time: new Date(1436465667144), what: "dropDatabase.start", ns: "drop_database9", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.146-0400 m30998| 2015-07-09T14:14:27.144-0400 I SHARDING [conn298] distributed lock 'drop_database1/bs-osx108-8:30998:1436464535:16807' acquired, ts : 559eba030bd550bed3408ae4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.162-0400 m31100| 2015-07-09T14:14:27.161-0400 I COMMAND [conn159] dropDatabase drop_database7 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.164-0400 m31102| 2015-07-09T14:14:27.164-0400 I COMMAND [repl writer worker 3] dropDatabase drop_database1 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.165-0400 m30999| 2015-07-09T14:14:27.164-0400 I COMMAND [conn297] DROP DATABASE: drop_database6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.165-0400 m30999| 2015-07-09T14:14:27.164-0400 I SHARDING [conn297] DBConfig::dropDatabase: drop_database6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.165-0400 m30999| 2015-07-09T14:14:27.164-0400 I SHARDING [conn297] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.164-0400-559eba03ca4787b9985d1dac", server: "bs-osx108-8", clientAddr: "127.0.0.1:63630", time: new Date(1436465667164), what: "dropDatabase.start", ns: "drop_database6", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.166-0400 m31100| 2015-07-09T14:14:27.166-0400 I COMMAND [conn159] dropDatabase drop_database7 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.166-0400 m31100| 2015-07-09T14:14:27.166-0400 I COMMAND [conn157] dropDatabase drop_database4 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.167-0400 m31102| 2015-07-09T14:14:27.166-0400 I COMMAND [repl writer worker 3] dropDatabase drop_database1 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.167-0400 m30998| 2015-07-09T14:14:27.166-0400 I SHARDING [conn301] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.166-0400-559eba030bd550bed3408ae7", server: "bs-osx108-8", clientAddr: "127.0.0.1:63638", time: new Date(1436465667166), what: "dropDatabase", ns: "drop_database7", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.167-0400 m31101| 2015-07-09T14:14:27.167-0400 I COMMAND [repl writer worker 14] dropDatabase drop_database1 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.168-0400 m31102| 2015-07-09T14:14:27.168-0400 I COMMAND [repl writer worker 14] dropDatabase drop_database5 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.168-0400 m31101| 2015-07-09T14:14:27.168-0400 I COMMAND [repl writer worker 14] dropDatabase drop_database1 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.169-0400 m31101| 2015-07-09T14:14:27.169-0400 I COMMAND [repl writer worker 12] dropDatabase drop_database5 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.170-0400 m31100| 2015-07-09T14:14:27.169-0400 I COMMAND [conn157] dropDatabase drop_database4 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.170-0400 m31100| 2015-07-09T14:14:27.170-0400 I COMMAND [conn158] dropDatabase drop_database3 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.170-0400 m30999| 2015-07-09T14:14:27.170-0400 I SHARDING [conn299] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.170-0400-559eba03ca4787b9985d1dad", server: "bs-osx108-8", clientAddr: "127.0.0.1:63632", time: new Date(1436465667170), what: "dropDatabase", ns: "drop_database4", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.170-0400 m31102| 2015-07-09T14:14:27.170-0400 I COMMAND [repl writer worker 14] dropDatabase drop_database5 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.171-0400 m31100| 2015-07-09T14:14:27.171-0400 I COMMAND [conn158] dropDatabase drop_database3 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.171-0400 m31101| 2015-07-09T14:14:27.171-0400 I COMMAND [repl writer worker 12] dropDatabase drop_database5 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.172-0400 m31100| 2015-07-09T14:14:27.171-0400 I COMMAND [conn156] dropDatabase drop_database8 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.172-0400 m30998| 2015-07-09T14:14:27.172-0400 I SHARDING [conn299] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.172-0400-559eba030bd550bed3408ae8", server: "bs-osx108-8", clientAddr: "127.0.0.1:63634", time: new Date(1436465667172), what: "dropDatabase", ns: "drop_database3", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.173-0400 m31100| 2015-07-09T14:14:27.173-0400 I COMMAND [conn156] dropDatabase drop_database8 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.173-0400 m31100| 2015-07-09T14:14:27.173-0400 I COMMAND [conn28] dropDatabase drop_database2 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.173-0400 m30999| 2015-07-09T14:14:27.173-0400 I SHARDING [conn300] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.173-0400-559eba03ca4787b9985d1dae", server: "bs-osx108-8", clientAddr: "127.0.0.1:63636", time: new Date(1436465667173), what: "dropDatabase", ns: "drop_database8", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.174-0400 m31100| 2015-07-09T14:14:27.174-0400 I COMMAND [conn28] dropDatabase drop_database2 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.175-0400 m30999| 2015-07-09T14:14:27.174-0400 I SHARDING [conn301] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.174-0400-559eba03ca4787b9985d1daf", server: "bs-osx108-8", clientAddr: "127.0.0.1:63637", time: new Date(1436465667174), what: "dropDatabase", ns: "drop_database2", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.197-0400 m30998| 2015-07-09T14:14:27.197-0400 I SHARDING [conn300] Placing [drop_database5] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.198-0400 m30999| 2015-07-09T14:14:27.197-0400 I COMMAND [conn298] DROP DATABASE: drop_database0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.198-0400 m30999| 2015-07-09T14:14:27.197-0400 I SHARDING [conn298] DBConfig::dropDatabase: drop_database0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.198-0400 m30998| 2015-07-09T14:14:27.197-0400 I SHARDING [conn298] Placing [drop_database1] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.200-0400 m30999| 2015-07-09T14:14:27.197-0400 I SHARDING [conn298] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.197-0400-559eba03ca4787b9985d1db0", server: "bs-osx108-8", clientAddr: "127.0.0.1:63631", time: new Date(1436465667197), what: "dropDatabase.start", ns: "drop_database0", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.212-0400 m31101| 2015-07-09T14:14:27.212-0400 I COMMAND [repl writer worker 3] dropDatabase drop_database7 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.214-0400 m31101| 2015-07-09T14:14:27.214-0400 I COMMAND [repl writer worker 3] dropDatabase drop_database7 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.215-0400 m31101| 2015-07-09T14:14:27.215-0400 I COMMAND [repl writer worker 7] dropDatabase drop_database4 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.220-0400 m31101| 2015-07-09T14:14:27.219-0400 I COMMAND [repl writer worker 7] dropDatabase drop_database4 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.221-0400 m31102| 2015-07-09T14:14:27.220-0400 I COMMAND [repl writer worker 1] dropDatabase drop_database7 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.222-0400 m31101| 2015-07-09T14:14:27.221-0400 I COMMAND [repl writer worker 13] dropDatabase drop_database3 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.223-0400 m30999| 2015-07-09T14:14:27.222-0400 I SHARDING [conn301] distributed lock 'drop_database2/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba03ca4787b9985d1db1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.225-0400 m30998| 2015-07-09T14:14:27.223-0400 I SHARDING [conn297] DBConfig::dropDatabase: drop_database9 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.225-0400 m31101| 2015-07-09T14:14:27.224-0400 I COMMAND [repl writer worker 13] dropDatabase drop_database3 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.226-0400 m31100| 2015-07-09T14:14:27.224-0400 I COMMAND [conn158] dropDatabase drop_database9 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.226-0400 m31102| 2015-07-09T14:14:27.224-0400 I COMMAND [repl writer worker 1] dropDatabase drop_database7 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.227-0400 m31101| 2015-07-09T14:14:27.225-0400 I COMMAND [repl writer worker 8] dropDatabase drop_database8 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.227-0400 m30999| 2015-07-09T14:14:27.227-0400 I SHARDING [conn301] Placing [drop_database2] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.228-0400 m31102| 2015-07-09T14:14:27.227-0400 I COMMAND [repl writer worker 10] dropDatabase drop_database4 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.228-0400 m30998| 2015-07-09T14:14:27.228-0400 I SHARDING [conn298] distributed lock 'drop_database1/bs-osx108-8:30998:1436464535:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.230-0400 m31100| 2015-07-09T14:14:27.229-0400 I COMMAND [conn158] dropDatabase drop_database9 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.231-0400 m31102| 2015-07-09T14:14:27.229-0400 I COMMAND [repl writer worker 10] dropDatabase drop_database4 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.231-0400 m31101| 2015-07-09T14:14:27.229-0400 I COMMAND [repl writer worker 8] dropDatabase drop_database8 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.231-0400 m30998| 2015-07-09T14:14:27.230-0400 I SHARDING [conn297] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.230-0400-559eba030bd550bed3408aeb", server: "bs-osx108-8", clientAddr: "127.0.0.1:63629", time: new Date(1436465667230), what: "dropDatabase", ns: "drop_database9", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.233-0400 m31102| 2015-07-09T14:14:27.233-0400 I COMMAND [repl writer worker 2] dropDatabase drop_database3 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.234-0400 m31101| 2015-07-09T14:14:27.231-0400 I COMMAND [repl writer worker 15] dropDatabase drop_database2 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.239-0400 m30999| 2015-07-09T14:14:27.238-0400 I SHARDING [conn299] distributed lock 'drop_database4/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba03ca4787b9985d1db2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.241-0400 m31101| 2015-07-09T14:14:27.240-0400 I COMMAND [repl writer worker 15] dropDatabase drop_database2 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.242-0400 m31102| 2015-07-09T14:14:27.241-0400 I COMMAND [repl writer worker 2] dropDatabase drop_database3 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.242-0400 m31102| 2015-07-09T14:14:27.241-0400 I COMMAND [repl writer worker 4] dropDatabase drop_database8 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.244-0400 m30999| 2015-07-09T14:14:27.243-0400 I SHARDING [conn297] DBConfig::dropDatabase: drop_database6 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.246-0400 m31102| 2015-07-09T14:14:27.244-0400 I COMMAND [repl writer worker 4] dropDatabase drop_database8 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.247-0400 m30999| 2015-07-09T14:14:27.245-0400 I SHARDING [conn298] DBConfig::dropDatabase: drop_database0 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.248-0400 m30998| 2015-07-09T14:14:27.245-0400 I SHARDING [conn300] distributed lock 'drop_database5/bs-osx108-8:30998:1436464535:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.248-0400 m30998| 2015-07-09T14:14:27.246-0400 I SHARDING [conn299] distributed lock 'drop_database3/bs-osx108-8:30998:1436464535:16807' acquired, ts : 559eba030bd550bed3408aea [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.248-0400 m31102| 2015-07-09T14:14:27.246-0400 I COMMAND [repl writer worker 5] dropDatabase drop_database2 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.248-0400 m30998| 2015-07-09T14:14:27.247-0400 I SHARDING [conn301] distributed lock 'drop_database7/bs-osx108-8:30998:1436464535:16807' acquired, ts : 559eba030bd550bed3408ae9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.249-0400 m30999| 2015-07-09T14:14:27.249-0400 I SHARDING [conn301] distributed lock 'drop_database2/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.252-0400 m31102| 2015-07-09T14:14:27.251-0400 I COMMAND [repl writer worker 5] dropDatabase drop_database2 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.252-0400 m30999| 2015-07-09T14:14:27.251-0400 I SHARDING [conn300] distributed lock 'drop_database8/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba03ca4787b9985d1db3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.258-0400 m30998| 2015-07-09T14:14:27.257-0400 I COMMAND [conn298] DROP DATABASE: drop_database1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.258-0400 m30998| 2015-07-09T14:14:27.257-0400 I SHARDING [conn297] distributed lock 'drop_database9/bs-osx108-8:30998:1436464535:16807' acquired, ts : 559eba030bd550bed3408aec [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.259-0400 m30998| 2015-07-09T14:14:27.257-0400 I SHARDING [conn298] DBConfig::dropDatabase: drop_database1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.259-0400 m30998| 2015-07-09T14:14:27.257-0400 I SHARDING [conn298] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.257-0400-559eba030bd550bed3408aed", server: "bs-osx108-8", clientAddr: "127.0.0.1:63633", time: new Date(1436465667257), what: "dropDatabase.start", ns: "drop_database1", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.266-0400 m31101| 2015-07-09T14:14:27.265-0400 I COMMAND [repl writer worker 5] dropDatabase drop_database9 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.267-0400 m31101| 2015-07-09T14:14:27.267-0400 I COMMAND [repl writer worker 5] dropDatabase drop_database9 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.269-0400 m31102| 2015-07-09T14:14:27.269-0400 I COMMAND [repl writer worker 8] dropDatabase drop_database9 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.272-0400 m30998| 2015-07-09T14:14:27.272-0400 I COMMAND [conn300] DROP DATABASE: drop_database5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.272-0400 m30998| 2015-07-09T14:14:27.272-0400 I SHARDING [conn300] DBConfig::dropDatabase: drop_database5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.273-0400 m30998| 2015-07-09T14:14:27.272-0400 I SHARDING [conn300] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.272-0400-559eba030bd550bed3408aee", server: "bs-osx108-8", clientAddr: "127.0.0.1:63635", time: new Date(1436465667272), what: "dropDatabase.start", ns: "drop_database5", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.276-0400 m31102| 2015-07-09T14:14:27.276-0400 I COMMAND [repl writer worker 8] dropDatabase drop_database9 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.290-0400 m31100| 2015-07-09T14:14:27.290-0400 I COMMAND [conn156] dropDatabase drop_database0 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.292-0400 m30999| 2015-07-09T14:14:27.290-0400 I SHARDING [conn299] Placing [drop_database4] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.293-0400 m30999| 2015-07-09T14:14:27.293-0400 I COMMAND [conn301] DROP DATABASE: drop_database2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.294-0400 m30999| 2015-07-09T14:14:27.293-0400 I SHARDING [conn301] DBConfig::dropDatabase: drop_database2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.294-0400 m30999| 2015-07-09T14:14:27.293-0400 I SHARDING [conn301] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.293-0400-559eba03ca4787b9985d1db4", server: "bs-osx108-8", clientAddr: "127.0.0.1:63637", time: new Date(1436465667293), what: "dropDatabase.start", ns: "drop_database2", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.296-0400 m30998| 2015-07-09T14:14:27.295-0400 I SHARDING [conn299] Placing [drop_database3] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.296-0400 m30998| 2015-07-09T14:14:27.295-0400 I SHARDING [conn301] Placing [drop_database7] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.297-0400 m31100| 2015-07-09T14:14:27.297-0400 I COMMAND [conn156] dropDatabase drop_database0 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.298-0400 m31100| 2015-07-09T14:14:27.297-0400 I COMMAND [conn28] dropDatabase drop_database6 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.298-0400 m30999| 2015-07-09T14:14:27.297-0400 I SHARDING [conn298] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.297-0400-559eba03ca4787b9985d1db5", server: "bs-osx108-8", clientAddr: "127.0.0.1:63631", time: new Date(1436465667297), what: "dropDatabase", ns: "drop_database0", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.301-0400 m31100| 2015-07-09T14:14:27.301-0400 I COMMAND [conn28] dropDatabase drop_database6 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.303-0400 m30999| 2015-07-09T14:14:27.302-0400 I SHARDING [conn297] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.302-0400-559eba03ca4787b9985d1db6", server: "bs-osx108-8", clientAddr: "127.0.0.1:63630", time: new Date(1436465667302), what: "dropDatabase", ns: "drop_database6", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.305-0400 m30999| 2015-07-09T14:14:27.304-0400 I SHARDING [conn300] Placing [drop_database8] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.306-0400 m30998| 2015-07-09T14:14:27.305-0400 I SHARDING [conn297] Placing [drop_database9] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.324-0400 m31101| 2015-07-09T14:14:27.323-0400 I COMMAND [repl writer worker 10] dropDatabase drop_database0 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.327-0400 m30999| 2015-07-09T14:14:27.326-0400 I SHARDING [conn300] distributed lock 'drop_database8/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.330-0400 m30998| 2015-07-09T14:14:27.329-0400 I SHARDING [conn301] distributed lock 'drop_database7/bs-osx108-8:30998:1436464535:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.331-0400 m30999| 2015-07-09T14:14:27.326-0400 I SHARDING [conn299] distributed lock 'drop_database4/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.333-0400 m31101| 2015-07-09T14:14:27.332-0400 I COMMAND [repl writer worker 10] dropDatabase drop_database0 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.336-0400 m31101| 2015-07-09T14:14:27.335-0400 I COMMAND [repl writer worker 6] dropDatabase drop_database6 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.336-0400 m31102| 2015-07-09T14:14:27.335-0400 I COMMAND [repl writer worker 13] dropDatabase drop_database0 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.336-0400 m31102| 2015-07-09T14:14:27.336-0400 I COMMAND [repl writer worker 13] dropDatabase drop_database0 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.338-0400 m31102| 2015-07-09T14:14:27.337-0400 I COMMAND [repl writer worker 6] dropDatabase drop_database6 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.340-0400 m30999| 2015-07-09T14:14:27.337-0400 I SHARDING [conn301] DBConfig::dropDatabase: drop_database2 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.341-0400 m30998| 2015-07-09T14:14:27.337-0400 I SHARDING [conn297] distributed lock 'drop_database9/bs-osx108-8:30998:1436464535:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.341-0400 m30998| 2015-07-09T14:14:27.338-0400 I SHARDING [conn298] DBConfig::dropDatabase: drop_database1 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.341-0400 m31101| 2015-07-09T14:14:27.341-0400 I COMMAND [repl writer worker 6] dropDatabase drop_database6 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.344-0400 m31102| 2015-07-09T14:14:27.342-0400 I COMMAND [repl writer worker 6] dropDatabase drop_database6 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.347-0400 m30999| 2015-07-09T14:14:27.346-0400 I SHARDING [conn298] distributed lock 'drop_database0/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba03ca4787b9985d1db7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.348-0400 m30999| 2015-07-09T14:14:27.346-0400 I SHARDING [conn297] distributed lock 'drop_database6/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba03ca4787b9985d1db8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.348-0400 m30998| 2015-07-09T14:14:27.347-0400 I SHARDING [conn299] distributed lock 'drop_database3/bs-osx108-8:30998:1436464535:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.348-0400 m30998| 2015-07-09T14:14:27.347-0400 I SHARDING [conn300] DBConfig::dropDatabase: drop_database5 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.363-0400 m30999| 2015-07-09T14:14:27.363-0400 I COMMAND [conn300] DROP DATABASE: drop_database8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.363-0400 m30999| 2015-07-09T14:14:27.363-0400 I SHARDING [conn300] DBConfig::dropDatabase: drop_database8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.364-0400 m30999| 2015-07-09T14:14:27.363-0400 I SHARDING [conn300] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.363-0400-559eba03ca4787b9985d1db9", server: "bs-osx108-8", clientAddr: "127.0.0.1:63636", time: new Date(1436465667363), what: "dropDatabase.start", ns: "drop_database8", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.368-0400 m30998| 2015-07-09T14:14:27.367-0400 I COMMAND [conn301] DROP DATABASE: drop_database7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.368-0400 m30998| 2015-07-09T14:14:27.368-0400 I SHARDING [conn301] DBConfig::dropDatabase: drop_database7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.368-0400 m30998| 2015-07-09T14:14:27.368-0400 I SHARDING [conn301] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.368-0400-559eba030bd550bed3408aef", server: "bs-osx108-8", clientAddr: "127.0.0.1:63638", time: new Date(1436465667368), what: "dropDatabase.start", ns: "drop_database7", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.370-0400 m31100| 2015-07-09T14:14:27.369-0400 I COMMAND [conn159] dropDatabase drop_database5 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.373-0400 m31100| 2015-07-09T14:14:27.372-0400 I COMMAND [conn159] dropDatabase drop_database5 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.373-0400 m30999| 2015-07-09T14:14:27.372-0400 I COMMAND [conn299] DROP DATABASE: drop_database4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.373-0400 m30999| 2015-07-09T14:14:27.373-0400 I SHARDING [conn299] DBConfig::dropDatabase: drop_database4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.373-0400 m30999| 2015-07-09T14:14:27.373-0400 I SHARDING [conn299] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.373-0400-559eba03ca4787b9985d1dba", server: "bs-osx108-8", clientAddr: "127.0.0.1:63632", time: new Date(1436465667373), what: "dropDatabase.start", ns: "drop_database4", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.374-0400 m31100| 2015-07-09T14:14:27.373-0400 I COMMAND [conn158] dropDatabase drop_database1 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.374-0400 m30998| 2015-07-09T14:14:27.373-0400 I SHARDING [conn300] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.373-0400-559eba030bd550bed3408af0", server: "bs-osx108-8", clientAddr: "127.0.0.1:63635", time: new Date(1436465667373), what: "dropDatabase", ns: "drop_database5", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.374-0400 m31100| 2015-07-09T14:14:27.374-0400 I COMMAND [conn158] dropDatabase drop_database1 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.374-0400 m31100| 2015-07-09T14:14:27.374-0400 I COMMAND [conn28] dropDatabase drop_database2 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.375-0400 m30998| 2015-07-09T14:14:27.374-0400 I SHARDING [conn298] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.374-0400-559eba030bd550bed3408af1", server: "bs-osx108-8", clientAddr: "127.0.0.1:63633", time: new Date(1436465667374), what: "dropDatabase", ns: "drop_database1", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.375-0400 m31100| 2015-07-09T14:14:27.375-0400 I COMMAND [conn28] dropDatabase drop_database2 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.376-0400 m30999| 2015-07-09T14:14:27.376-0400 I SHARDING [conn301] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.376-0400-559eba03ca4787b9985d1dbb", server: "bs-osx108-8", clientAddr: "127.0.0.1:63637", time: new Date(1436465667376), what: "dropDatabase", ns: "drop_database2", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.402-0400 m30998| 2015-07-09T14:14:27.402-0400 I COMMAND [conn297] DROP DATABASE: drop_database9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.403-0400 m30998| 2015-07-09T14:14:27.402-0400 I SHARDING [conn297] DBConfig::dropDatabase: drop_database9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.403-0400 m30998| 2015-07-09T14:14:27.402-0400 I SHARDING [conn297] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.402-0400-559eba030bd550bed3408af2", server: "bs-osx108-8", clientAddr: "127.0.0.1:63629", time: new Date(1436465667402), what: "dropDatabase.start", ns: "drop_database9", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.419-0400 m30999| 2015-07-09T14:14:27.418-0400 I SHARDING [conn297] Placing [drop_database6] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.421-0400 m30999| 2015-07-09T14:14:27.418-0400 I SHARDING [conn298] Placing [drop_database0] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.421-0400 m30998| 2015-07-09T14:14:27.419-0400 I COMMAND [conn299] DROP DATABASE: drop_database3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.421-0400 m30998| 2015-07-09T14:14:27.419-0400 I SHARDING [conn299] DBConfig::dropDatabase: drop_database3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.422-0400 m30998| 2015-07-09T14:14:27.419-0400 I SHARDING [conn299] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.419-0400-559eba030bd550bed3408af3", server: "bs-osx108-8", clientAddr: "127.0.0.1:63634", time: new Date(1436465667419), what: "dropDatabase.start", ns: "drop_database3", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.422-0400 m29000| 2015-07-09T14:14:27.419-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63671 #71 (71 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.426-0400 m30998| 2015-07-09T14:14:27.425-0400 I SHARDING [conn301] DBConfig::dropDatabase: drop_database7 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.426-0400 m31100| 2015-07-09T14:14:27.425-0400 I COMMAND [conn158] dropDatabase drop_database7 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.427-0400 m31100| 2015-07-09T14:14:27.427-0400 I COMMAND [conn158] dropDatabase drop_database7 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.428-0400 m30998| 2015-07-09T14:14:27.428-0400 I SHARDING [conn301] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.428-0400-559eba030bd550bed3408af4", server: "bs-osx108-8", clientAddr: "127.0.0.1:63638", time: new Date(1436465667428), what: "dropDatabase", ns: "drop_database7", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.441-0400 m30999| 2015-07-09T14:14:27.440-0400 I SHARDING [conn299] DBConfig::dropDatabase: drop_database4 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.441-0400 m31100| 2015-07-09T14:14:27.441-0400 I COMMAND [conn28] dropDatabase drop_database4 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.442-0400 m31101| 2015-07-09T14:14:27.442-0400 I COMMAND [repl writer worker 4] dropDatabase drop_database5 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.444-0400 m31102| 2015-07-09T14:14:27.443-0400 I COMMAND [repl writer worker 11] dropDatabase drop_database5 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.445-0400 m31100| 2015-07-09T14:14:27.445-0400 I COMMAND [conn28] dropDatabase drop_database4 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.445-0400 m31101| 2015-07-09T14:14:27.445-0400 I COMMAND [repl writer worker 4] dropDatabase drop_database5 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.448-0400 m30999| 2015-07-09T14:14:27.446-0400 I SHARDING [conn299] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.446-0400-559eba03ca4787b9985d1dbd", server: "bs-osx108-8", clientAddr: "127.0.0.1:63632", time: new Date(1436465667446), what: "dropDatabase", ns: "drop_database4", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.448-0400 m31100| 2015-07-09T14:14:27.446-0400 I COMMAND [conn158] dropDatabase drop_database9 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.449-0400 m30999| 2015-07-09T14:14:27.447-0400 I SHARDING [conn297] distributed lock 'drop_database6/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.451-0400 m30998| 2015-07-09T14:14:27.446-0400 I SHARDING [conn297] DBConfig::dropDatabase: drop_database9 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.451-0400 m31101| 2015-07-09T14:14:27.449-0400 I COMMAND [repl writer worker 3] dropDatabase drop_database1 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.452-0400 m31102| 2015-07-09T14:14:27.449-0400 I COMMAND [repl writer worker 11] dropDatabase drop_database5 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.452-0400 m31100| 2015-07-09T14:14:27.450-0400 I COMMAND [conn158] dropDatabase drop_database9 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.452-0400 m30999| 2015-07-09T14:14:27.450-0400 I SHARDING [conn300] DBConfig::dropDatabase: drop_database8 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.452-0400 m31100| 2015-07-09T14:14:27.450-0400 I COMMAND [conn28] dropDatabase drop_database8 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.453-0400 m30999| 2015-07-09T14:14:27.451-0400 I SHARDING [conn298] distributed lock 'drop_database0/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.453-0400 m30998| 2015-07-09T14:14:27.451-0400 I SHARDING [conn297] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.451-0400-559eba030bd550bed3408af8", server: "bs-osx108-8", clientAddr: "127.0.0.1:63629", time: new Date(1436465667451), what: "dropDatabase", ns: "drop_database9", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.453-0400 m31102| 2015-07-09T14:14:27.451-0400 I COMMAND [repl writer worker 1] dropDatabase drop_database1 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.454-0400 m31101| 2015-07-09T14:14:27.454-0400 I COMMAND [repl writer worker 3] dropDatabase drop_database1 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.459-0400 m31101| 2015-07-09T14:14:27.455-0400 I COMMAND [repl writer worker 7] dropDatabase drop_database2 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.459-0400 m31102| 2015-07-09T14:14:27.455-0400 I COMMAND [repl writer worker 1] dropDatabase drop_database1 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.459-0400 m31100| 2015-07-09T14:14:27.455-0400 I COMMAND [conn28] dropDatabase drop_database8 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.459-0400 m30998| 2015-07-09T14:14:27.456-0400 I SHARDING [conn301] distributed lock 'drop_database7/bs-osx108-8:30998:1436464535:16807' acquired, ts : 559eba030bd550bed3408af7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.459-0400 m30998| 2015-07-09T14:14:27.456-0400 I SHARDING [conn300] distributed lock 'drop_database5/bs-osx108-8:30998:1436464535:16807' acquired, ts : 559eba030bd550bed3408af5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.460-0400 m30998| 2015-07-09T14:14:27.456-0400 I SHARDING [conn298] distributed lock 'drop_database1/bs-osx108-8:30998:1436464535:16807' acquired, ts : 559eba030bd550bed3408af6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.460-0400 m30999| 2015-07-09T14:14:27.457-0400 I SHARDING [conn300] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.457-0400-559eba03ca4787b9985d1dbf", server: "bs-osx108-8", clientAddr: "127.0.0.1:63636", time: new Date(1436465667457), what: "dropDatabase", ns: "drop_database8", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.461-0400 m31102| 2015-07-09T14:14:27.457-0400 I COMMAND [repl writer worker 10] dropDatabase drop_database2 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.462-0400 m31102| 2015-07-09T14:14:27.460-0400 I COMMAND [repl writer worker 10] dropDatabase drop_database2 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.462-0400 m30998| 2015-07-09T14:14:27.460-0400 I SHARDING [conn299] DBConfig::dropDatabase: drop_database3 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.463-0400 m30999| 2015-07-09T14:14:27.462-0400 I SHARDING [conn299] distributed lock 'drop_database4/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba03ca4787b9985d1dbe [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.465-0400 m31101| 2015-07-09T14:14:27.464-0400 I COMMAND [repl writer worker 7] dropDatabase drop_database2 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.469-0400 m30999| 2015-07-09T14:14:27.468-0400 I SHARDING [conn301] distributed lock 'drop_database2/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba03ca4787b9985d1dbc [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.471-0400 m30998| 2015-07-09T14:14:27.470-0400 I SHARDING [conn297] distributed lock 'drop_database9/bs-osx108-8:30998:1436464535:16807' acquired, ts : 559eba030bd550bed3408af9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.473-0400 m30999| 2015-07-09T14:14:27.472-0400 I SHARDING [conn300] distributed lock 'drop_database8/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba03ca4787b9985d1dc0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.493-0400 m30999| 2015-07-09T14:14:27.493-0400 I COMMAND [conn297] DROP DATABASE: drop_database6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.493-0400 m30999| 2015-07-09T14:14:27.493-0400 I SHARDING [conn297] DBConfig::dropDatabase: drop_database6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.494-0400 m30999| 2015-07-09T14:14:27.493-0400 I SHARDING [conn297] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.493-0400-559eba03ca4787b9985d1dc1", server: "bs-osx108-8", clientAddr: "127.0.0.1:63630", time: new Date(1436465667493), what: "dropDatabase.start", ns: "drop_database6", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.503-0400 m31101| 2015-07-09T14:14:27.503-0400 I COMMAND [repl writer worker 15] dropDatabase drop_database7 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.506-0400 m30998| 2015-07-09T14:14:27.506-0400 I SHARDING [conn300] Placing [drop_database5] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.508-0400 m31100| 2015-07-09T14:14:27.507-0400 I COMMAND [conn158] dropDatabase drop_database3 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.510-0400 m30998| 2015-07-09T14:14:27.509-0400 I SHARDING [conn298] Placing [drop_database1] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.510-0400 m31101| 2015-07-09T14:14:27.510-0400 I COMMAND [repl writer worker 15] dropDatabase drop_database7 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.511-0400 m30998| 2015-07-09T14:14:27.510-0400 I SHARDING [conn301] Placing [drop_database7] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.512-0400 m30999| 2015-07-09T14:14:27.511-0400 I COMMAND [conn298] DROP DATABASE: drop_database0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.512-0400 m30999| 2015-07-09T14:14:27.511-0400 I SHARDING [conn299] Placing [drop_database4] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.512-0400 m30999| 2015-07-09T14:14:27.511-0400 I SHARDING [conn298] DBConfig::dropDatabase: drop_database0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.514-0400 m30999| 2015-07-09T14:14:27.511-0400 I SHARDING [conn298] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.511-0400-559eba03ca4787b9985d1dc2", server: "bs-osx108-8", clientAddr: "127.0.0.1:63631", time: new Date(1436465667511), what: "dropDatabase.start", ns: "drop_database0", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.514-0400 m31101| 2015-07-09T14:14:27.512-0400 I COMMAND [repl writer worker 9] dropDatabase drop_database4 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.515-0400 m31102| 2015-07-09T14:14:27.512-0400 I COMMAND [repl writer worker 5] dropDatabase drop_database7 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.515-0400 m31100| 2015-07-09T14:14:27.513-0400 I COMMAND [conn158] dropDatabase drop_database3 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.515-0400 m30998| 2015-07-09T14:14:27.513-0400 I SHARDING [conn299] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.513-0400-559eba030bd550bed3408afa", server: "bs-osx108-8", clientAddr: "127.0.0.1:63634", time: new Date(1436465667513), what: "dropDatabase", ns: "drop_database3", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.516-0400 m31102| 2015-07-09T14:14:27.516-0400 I COMMAND [repl writer worker 5] dropDatabase drop_database7 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.517-0400 m30999| 2015-07-09T14:14:27.516-0400 I SHARDING [conn301] Placing [drop_database2] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.518-0400 m30998| 2015-07-09T14:14:27.517-0400 I SHARDING [conn297] Placing [drop_database9] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.518-0400 m31102| 2015-07-09T14:14:27.517-0400 I COMMAND [repl writer worker 9] dropDatabase drop_database4 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.518-0400 m30999| 2015-07-09T14:14:27.518-0400 I SHARDING [conn300] Placing [drop_database8] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.520-0400 m31102| 2015-07-09T14:14:27.519-0400 I COMMAND [repl writer worker 9] dropDatabase drop_database4 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.520-0400 m31101| 2015-07-09T14:14:27.519-0400 I COMMAND [repl writer worker 9] dropDatabase drop_database4 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.521-0400 m31101| 2015-07-09T14:14:27.520-0400 I COMMAND [repl writer worker 5] dropDatabase drop_database9 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.522-0400 m31102| 2015-07-09T14:14:27.520-0400 I COMMAND [repl writer worker 8] dropDatabase drop_database9 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.522-0400 m31101| 2015-07-09T14:14:27.521-0400 I COMMAND [repl writer worker 5] dropDatabase drop_database9 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.522-0400 m31101| 2015-07-09T14:14:27.522-0400 I COMMAND [repl writer worker 2] dropDatabase drop_database8 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.523-0400 m31102| 2015-07-09T14:14:27.522-0400 I COMMAND [repl writer worker 8] dropDatabase drop_database9 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.523-0400 m31102| 2015-07-09T14:14:27.523-0400 I COMMAND [repl writer worker 15] dropDatabase drop_database8 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.523-0400 m31101| 2015-07-09T14:14:27.523-0400 I COMMAND [repl writer worker 2] dropDatabase drop_database8 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.525-0400 m31102| 2015-07-09T14:14:27.524-0400 I COMMAND [repl writer worker 15] dropDatabase drop_database8 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.559-0400 m31101| 2015-07-09T14:14:27.558-0400 I COMMAND [repl writer worker 10] dropDatabase drop_database3 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.560-0400 m31102| 2015-07-09T14:14:27.559-0400 I COMMAND [repl writer worker 13] dropDatabase drop_database3 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.560-0400 m30999| 2015-07-09T14:14:27.560-0400 I SHARDING [conn301] distributed lock 'drop_database2/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.561-0400 m30999| 2015-07-09T14:14:27.560-0400 I SHARDING [conn298] DBConfig::dropDatabase: drop_database0 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.561-0400 m31100| 2015-07-09T14:14:27.560-0400 I COMMAND [conn28] dropDatabase drop_database0 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.562-0400 m29000| 2015-07-09T14:14:27.562-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63672 #72 (72 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.563-0400 m31102| 2015-07-09T14:14:27.563-0400 I COMMAND [repl writer worker 13] dropDatabase drop_database3 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.563-0400 m31100| 2015-07-09T14:14:27.563-0400 I COMMAND [conn28] dropDatabase drop_database0 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.564-0400 m31101| 2015-07-09T14:14:27.563-0400 I COMMAND [repl writer worker 10] dropDatabase drop_database3 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.565-0400 m30998| 2015-07-09T14:14:27.564-0400 I SHARDING [conn300] distributed lock 'drop_database5/bs-osx108-8:30998:1436464535:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.565-0400 m30999| 2015-07-09T14:14:27.564-0400 I SHARDING [conn300] distributed lock 'drop_database8/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.566-0400 m30999| 2015-07-09T14:14:27.564-0400 I SHARDING [conn299] distributed lock 'drop_database4/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.566-0400 m30999| 2015-07-09T14:14:27.564-0400 I SHARDING [conn298] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.564-0400-559eba03ca4787b9985d1dc3", server: "bs-osx108-8", clientAddr: "127.0.0.1:63631", time: new Date(1436465667564), what: "dropDatabase", ns: "drop_database0", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.566-0400 m31102| 2015-07-09T14:14:27.564-0400 I COMMAND [repl writer worker 6] dropDatabase drop_database0 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.567-0400 m30998| 2015-07-09T14:14:27.564-0400 I SHARDING [conn297] distributed lock 'drop_database9/bs-osx108-8:30998:1436464535:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.567-0400 m30999| 2015-07-09T14:14:27.564-0400 I SHARDING [conn297] DBConfig::dropDatabase: drop_database6 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.567-0400 m30998| 2015-07-09T14:14:27.566-0400 I SHARDING [conn298] distributed lock 'drop_database1/bs-osx108-8:30998:1436464535:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.567-0400 m30998| 2015-07-09T14:14:27.566-0400 I SHARDING [conn301] distributed lock 'drop_database7/bs-osx108-8:30998:1436464535:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.568-0400 m31102| 2015-07-09T14:14:27.568-0400 I COMMAND [repl writer worker 6] dropDatabase drop_database0 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.570-0400 m30998| 2015-07-09T14:14:27.569-0400 I SHARDING [conn299] distributed lock 'drop_database3/bs-osx108-8:30998:1436464535:16807' acquired, ts : 559eba030bd550bed3408afb [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.576-0400 m30999| 2015-07-09T14:14:27.574-0400 I SHARDING [conn298] distributed lock 'drop_database0/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba03ca4787b9985d1dc4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.577-0400 m31101| 2015-07-09T14:14:27.576-0400 I COMMAND [repl writer worker 6] dropDatabase drop_database0 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.577-0400 m31101| 2015-07-09T14:14:27.577-0400 I COMMAND [repl writer worker 6] dropDatabase drop_database0 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.586-0400 m30999| 2015-07-09T14:14:27.586-0400 I COMMAND [conn301] DROP DATABASE: drop_database2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.586-0400 m30999| 2015-07-09T14:14:27.586-0400 I SHARDING [conn301] DBConfig::dropDatabase: drop_database2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.587-0400 m30999| 2015-07-09T14:14:27.586-0400 I SHARDING [conn301] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.586-0400-559eba03ca4787b9985d1dc5", server: "bs-osx108-8", clientAddr: "127.0.0.1:63637", time: new Date(1436465667586), what: "dropDatabase.start", ns: "drop_database2", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.638-0400 m30998| 2015-07-09T14:14:27.638-0400 I COMMAND [conn301] DROP DATABASE: drop_database7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.638-0400 m30999| 2015-07-09T14:14:27.638-0400 I COMMAND [conn299] DROP DATABASE: drop_database4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.639-0400 m30999| 2015-07-09T14:14:27.638-0400 I SHARDING [conn299] DBConfig::dropDatabase: drop_database4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.639-0400 m30999| 2015-07-09T14:14:27.638-0400 I SHARDING [conn299] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.638-0400-559eba03ca4787b9985d1dc6", server: "bs-osx108-8", clientAddr: "127.0.0.1:63632", time: new Date(1436465667638), what: "dropDatabase.start", ns: "drop_database4", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.639-0400 m30998| 2015-07-09T14:14:27.638-0400 I SHARDING [conn301] DBConfig::dropDatabase: drop_database7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.639-0400 m30998| 2015-07-09T14:14:27.638-0400 I SHARDING [conn301] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.638-0400-559eba030bd550bed3408afc", server: "bs-osx108-8", clientAddr: "127.0.0.1:63638", time: new Date(1436465667638), what: "dropDatabase.start", ns: "drop_database7", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.644-0400 m30998| 2015-07-09T14:14:27.644-0400 I COMMAND [conn297] DROP DATABASE: drop_database9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.644-0400 m30998| 2015-07-09T14:14:27.644-0400 I SHARDING [conn297] DBConfig::dropDatabase: drop_database9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.645-0400 m30998| 2015-07-09T14:14:27.644-0400 I SHARDING [conn297] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.644-0400-559eba030bd550bed3408afd", server: "bs-osx108-8", clientAddr: "127.0.0.1:63629", time: new Date(1436465667644), what: "dropDatabase.start", ns: "drop_database9", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.656-0400 m30998| 2015-07-09T14:14:27.655-0400 I COMMAND [conn298] DROP DATABASE: drop_database1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.657-0400 m30998| 2015-07-09T14:14:27.655-0400 I SHARDING [conn298] DBConfig::dropDatabase: drop_database1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.657-0400 m30998| 2015-07-09T14:14:27.655-0400 I SHARDING [conn298] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.655-0400-559eba030bd550bed3408afe", server: "bs-osx108-8", clientAddr: "127.0.0.1:63633", time: new Date(1436465667655), what: "dropDatabase.start", ns: "drop_database1", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.657-0400 m30999| 2015-07-09T14:14:27.655-0400 I COMMAND [conn300] DROP DATABASE: drop_database8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.657-0400 m30999| 2015-07-09T14:14:27.655-0400 I SHARDING [conn300] DBConfig::dropDatabase: drop_database8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.658-0400 m30999| 2015-07-09T14:14:27.655-0400 I SHARDING [conn300] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.655-0400-559eba03ca4787b9985d1dc7", server: "bs-osx108-8", clientAddr: "127.0.0.1:63636", time: new Date(1436465667655), what: "dropDatabase.start", ns: "drop_database8", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.658-0400 m31100| 2015-07-09T14:14:27.658-0400 I COMMAND [conn28] dropDatabase drop_database6 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.659-0400 m30999| 2015-07-09T14:14:27.659-0400 I SHARDING [conn298] Placing [drop_database0] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.659-0400 m30998| 2015-07-09T14:14:27.659-0400 I COMMAND [conn300] DROP DATABASE: drop_database5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.660-0400 m30998| 2015-07-09T14:14:27.659-0400 I SHARDING [conn300] DBConfig::dropDatabase: drop_database5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.660-0400 m30998| 2015-07-09T14:14:27.659-0400 I SHARDING [conn299] Placing [drop_database3] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.660-0400 m30998| 2015-07-09T14:14:27.659-0400 I SHARDING [conn300] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.659-0400-559eba030bd550bed3408aff", server: "bs-osx108-8", clientAddr: "127.0.0.1:63635", time: new Date(1436465667659), what: "dropDatabase.start", ns: "drop_database5", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.662-0400 m31100| 2015-07-09T14:14:27.661-0400 I COMMAND [conn28] dropDatabase drop_database6 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.662-0400 m30999| 2015-07-09T14:14:27.661-0400 I SHARDING [conn297] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.661-0400-559eba03ca4787b9985d1dc8", server: "bs-osx108-8", clientAddr: "127.0.0.1:63630", time: new Date(1436465667661), what: "dropDatabase", ns: "drop_database6", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.704-0400 m30999| 2015-07-09T14:14:27.703-0400 I SHARDING [conn301] DBConfig::dropDatabase: drop_database2 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.704-0400 m31100| 2015-07-09T14:14:27.704-0400 I COMMAND [conn28] dropDatabase drop_database2 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.715-0400 m31100| 2015-07-09T14:14:27.714-0400 I COMMAND [conn28] dropDatabase drop_database2 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.718-0400 m30999| 2015-07-09T14:14:27.716-0400 I SHARDING [conn301] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.716-0400-559eba03ca4787b9985d1dc9", server: "bs-osx108-8", clientAddr: "127.0.0.1:63637", time: new Date(1436465667716), what: "dropDatabase", ns: "drop_database2", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.728-0400 m30999| 2015-07-09T14:14:27.727-0400 I SHARDING [conn299] DBConfig::dropDatabase: drop_database4 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.738-0400 m31100| 2015-07-09T14:14:27.737-0400 I COMMAND [conn28] dropDatabase drop_database4 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.739-0400 m30998| 2015-07-09T14:14:27.738-0400 I SHARDING [conn300] DBConfig::dropDatabase: drop_database5 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.740-0400 m30998| 2015-07-09T14:14:27.738-0400 I SHARDING [conn297] DBConfig::dropDatabase: drop_database9 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.740-0400 m30999| 2015-07-09T14:14:27.739-0400 I SHARDING [conn298] distributed lock 'drop_database0/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.741-0400 m31100| 2015-07-09T14:14:27.741-0400 I COMMAND [conn28] dropDatabase drop_database4 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.743-0400 m31100| 2015-07-09T14:14:27.743-0400 I COMMAND [conn159] dropDatabase drop_database9 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.743-0400 m30999| 2015-07-09T14:14:27.743-0400 I SHARDING [conn299] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.742-0400-559eba03ca4787b9985d1dcc", server: "bs-osx108-8", clientAddr: "127.0.0.1:63632", time: new Date(1436465667742), what: "dropDatabase", ns: "drop_database4", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.748-0400 m30999| 2015-07-09T14:14:27.746-0400 I SHARDING [conn300] DBConfig::dropDatabase: drop_database8 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.748-0400 m30998| 2015-07-09T14:14:27.746-0400 I SHARDING [conn301] DBConfig::dropDatabase: drop_database7 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.752-0400 m30998| 2015-07-09T14:14:27.751-0400 I SHARDING [conn299] distributed lock 'drop_database3/bs-osx108-8:30998:1436464535:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.753-0400 m31100| 2015-07-09T14:14:27.751-0400 I COMMAND [conn159] dropDatabase drop_database9 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.753-0400 m31100| 2015-07-09T14:14:27.752-0400 I COMMAND [conn161] dropDatabase drop_database7 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.754-0400 m30999| 2015-07-09T14:14:27.752-0400 I SHARDING [conn301] distributed lock 'drop_database2/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba03ca4787b9985d1dcb [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.754-0400 m30998| 2015-07-09T14:14:27.752-0400 I SHARDING [conn297] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.752-0400-559eba030bd550bed3408b00", server: "bs-osx108-8", clientAddr: "127.0.0.1:63629", time: new Date(1436465667752), what: "dropDatabase", ns: "drop_database9", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.763-0400 m30999| 2015-07-09T14:14:27.761-0400 I SHARDING [conn297] distributed lock 'drop_database6/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba03ca4787b9985d1dca [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.771-0400 m31100| 2015-07-09T14:14:27.769-0400 I COMMAND [conn161] dropDatabase drop_database7 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.771-0400 m30998| 2015-07-09T14:14:27.769-0400 I SHARDING [conn298] DBConfig::dropDatabase: drop_database1 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.771-0400 m31100| 2015-07-09T14:14:27.770-0400 I COMMAND [conn159] dropDatabase drop_database1 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.772-0400 m30998| 2015-07-09T14:14:27.770-0400 I SHARDING [conn301] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.770-0400-559eba030bd550bed3408b02", server: "bs-osx108-8", clientAddr: "127.0.0.1:63638", time: new Date(1436465667770), what: "dropDatabase", ns: "drop_database7", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.772-0400 m30998| 2015-07-09T14:14:27.771-0400 I SHARDING [conn297] distributed lock 'drop_database9/bs-osx108-8:30998:1436464535:16807' acquired, ts : 559eba030bd550bed3408b01 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.781-0400 m31100| 2015-07-09T14:14:27.780-0400 I COMMAND [conn159] dropDatabase drop_database1 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.782-0400 m30999| 2015-07-09T14:14:27.781-0400 I SHARDING [conn299] distributed lock 'drop_database4/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba03ca4787b9985d1dcd [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.782-0400 m31100| 2015-07-09T14:14:27.781-0400 I COMMAND [conn28] dropDatabase drop_database8 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.783-0400 m30998| 2015-07-09T14:14:27.782-0400 I SHARDING [conn298] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.782-0400-559eba030bd550bed3408b03", server: "bs-osx108-8", clientAddr: "127.0.0.1:63633", time: new Date(1436465667782), what: "dropDatabase", ns: "drop_database1", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.784-0400 m31100| 2015-07-09T14:14:27.783-0400 I COMMAND [conn28] dropDatabase drop_database8 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.784-0400 m31100| 2015-07-09T14:14:27.783-0400 I COMMAND [conn158] dropDatabase drop_database5 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.784-0400 m30999| 2015-07-09T14:14:27.784-0400 I SHARDING [conn300] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.784-0400-559eba03ca4787b9985d1dce", server: "bs-osx108-8", clientAddr: "127.0.0.1:63636", time: new Date(1436465667784), what: "dropDatabase", ns: "drop_database8", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.786-0400 m31100| 2015-07-09T14:14:27.785-0400 I COMMAND [conn158] dropDatabase drop_database5 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.788-0400 m30998| 2015-07-09T14:14:27.786-0400 I SHARDING [conn300] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.786-0400-559eba030bd550bed3408b04", server: "bs-osx108-8", clientAddr: "127.0.0.1:63635", time: new Date(1436465667786), what: "dropDatabase", ns: "drop_database5", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.789-0400 m31102| 2015-07-09T14:14:27.787-0400 I COMMAND [repl writer worker 4] dropDatabase drop_database6 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.792-0400 m31101| 2015-07-09T14:14:27.791-0400 I COMMAND [repl writer worker 8] dropDatabase drop_database6 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.795-0400 m31102| 2015-07-09T14:14:27.795-0400 I COMMAND [repl writer worker 4] dropDatabase drop_database6 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.796-0400 m31102| 2015-07-09T14:14:27.796-0400 I COMMAND [repl writer worker 5] dropDatabase drop_database2 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.796-0400 m31101| 2015-07-09T14:14:27.796-0400 I COMMAND [repl writer worker 8] dropDatabase drop_database6 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.797-0400 m30998| 2015-07-09T14:14:27.797-0400 I SHARDING [conn298] distributed lock 'drop_database1/bs-osx108-8:30998:1436464535:16807' acquired, ts : 559eba030bd550bed3408b06 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.797-0400 m30998| 2015-07-09T14:14:27.797-0400 I SHARDING [conn301] distributed lock 'drop_database7/bs-osx108-8:30998:1436464535:16807' acquired, ts : 559eba030bd550bed3408b05 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.798-0400 m31102| 2015-07-09T14:14:27.798-0400 I COMMAND [repl writer worker 5] dropDatabase drop_database2 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.799-0400 m31101| 2015-07-09T14:14:27.798-0400 I COMMAND [repl writer worker 15] dropDatabase drop_database2 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.799-0400 m31102| 2015-07-09T14:14:27.799-0400 I COMMAND [repl writer worker 9] dropDatabase drop_database4 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.800-0400 m30999| 2015-07-09T14:14:27.799-0400 I SHARDING [conn300] distributed lock 'drop_database8/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba03ca4787b9985d1dcf [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.800-0400 m30998| 2015-07-09T14:14:27.799-0400 I SHARDING [conn300] distributed lock 'drop_database5/bs-osx108-8:30998:1436464535:16807' acquired, ts : 559eba030bd550bed3408b07 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.801-0400 m31101| 2015-07-09T14:14:27.800-0400 I COMMAND [repl writer worker 15] dropDatabase drop_database2 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.801-0400 m31101| 2015-07-09T14:14:27.801-0400 I COMMAND [repl writer worker 9] dropDatabase drop_database4 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.801-0400 m31102| 2015-07-09T14:14:27.801-0400 I COMMAND [repl writer worker 9] dropDatabase drop_database4 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.802-0400 m31102| 2015-07-09T14:14:27.802-0400 I COMMAND [repl writer worker 8] dropDatabase drop_database9 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.804-0400 m31102| 2015-07-09T14:14:27.803-0400 I COMMAND [repl writer worker 8] dropDatabase drop_database9 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.805-0400 m31102| 2015-07-09T14:14:27.805-0400 I COMMAND [repl writer worker 15] dropDatabase drop_database7 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.806-0400 m31101| 2015-07-09T14:14:27.805-0400 I COMMAND [repl writer worker 9] dropDatabase drop_database4 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.806-0400 m31102| 2015-07-09T14:14:27.806-0400 I COMMAND [repl writer worker 15] dropDatabase drop_database7 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.807-0400 m31102| 2015-07-09T14:14:27.806-0400 I COMMAND [repl writer worker 7] dropDatabase drop_database1 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.809-0400 m31102| 2015-07-09T14:14:27.807-0400 I COMMAND [repl writer worker 7] dropDatabase drop_database1 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.810-0400 m31102| 2015-07-09T14:14:27.808-0400 I COMMAND [repl writer worker 12] dropDatabase drop_database8 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.810-0400 m31102| 2015-07-09T14:14:27.809-0400 I COMMAND [repl writer worker 12] dropDatabase drop_database8 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.812-0400 m31101| 2015-07-09T14:14:27.811-0400 I COMMAND [repl writer worker 5] dropDatabase drop_database9 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.813-0400 m31102| 2015-07-09T14:14:27.812-0400 I COMMAND [repl writer worker 13] dropDatabase drop_database5 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.813-0400 m30999| 2015-07-09T14:14:27.812-0400 I COMMAND [conn298] DROP DATABASE: drop_database0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.813-0400 m30999| 2015-07-09T14:14:27.812-0400 I SHARDING [conn298] DBConfig::dropDatabase: drop_database0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.813-0400 m30999| 2015-07-09T14:14:27.812-0400 I SHARDING [conn298] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.812-0400-559eba03ca4787b9985d1dd0", server: "bs-osx108-8", clientAddr: "127.0.0.1:63631", time: new Date(1436465667812), what: "dropDatabase.start", ns: "drop_database0", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.815-0400 m31101| 2015-07-09T14:14:27.814-0400 I COMMAND [repl writer worker 5] dropDatabase drop_database9 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.816-0400 m31102| 2015-07-09T14:14:27.816-0400 I COMMAND [repl writer worker 13] dropDatabase drop_database5 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.817-0400 m31101| 2015-07-09T14:14:27.816-0400 I COMMAND [repl writer worker 2] dropDatabase drop_database7 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.817-0400 m30998| 2015-07-09T14:14:27.817-0400 I COMMAND [conn299] DROP DATABASE: drop_database3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.817-0400 m30998| 2015-07-09T14:14:27.817-0400 I SHARDING [conn299] DBConfig::dropDatabase: drop_database3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.818-0400 m30998| 2015-07-09T14:14:27.817-0400 I SHARDING [conn299] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.817-0400-559eba030bd550bed3408b08", server: "bs-osx108-8", clientAddr: "127.0.0.1:63634", time: new Date(1436465667817), what: "dropDatabase.start", ns: "drop_database3", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.819-0400 m31101| 2015-07-09T14:14:27.818-0400 I COMMAND [repl writer worker 2] dropDatabase drop_database7 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.819-0400 m31101| 2015-07-09T14:14:27.819-0400 I COMMAND [repl writer worker 11] dropDatabase drop_database1 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.820-0400 m30999| 2015-07-09T14:14:27.820-0400 I SHARDING [conn301] Placing [drop_database2] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.824-0400 m30999| 2015-07-09T14:14:27.822-0400 I SHARDING [conn299] Placing [drop_database4] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.824-0400 m30998| 2015-07-09T14:14:27.823-0400 I SHARDING [conn300] Placing [drop_database5] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.825-0400 m30999| 2015-07-09T14:14:27.823-0400 I SHARDING [conn297] Placing [drop_database6] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.825-0400 m30998| 2015-07-09T14:14:27.823-0400 I SHARDING [conn297] Placing [drop_database9] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.825-0400 m30998| 2015-07-09T14:14:27.824-0400 I SHARDING [conn298] Placing [drop_database1] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.825-0400 m30999| 2015-07-09T14:14:27.824-0400 I SHARDING [conn300] Placing [drop_database8] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.825-0400 m30998| 2015-07-09T14:14:27.825-0400 I SHARDING [conn301] Placing [drop_database7] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.825-0400 m31101| 2015-07-09T14:14:27.825-0400 I COMMAND [repl writer worker 11] dropDatabase drop_database1 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.826-0400 m31101| 2015-07-09T14:14:27.826-0400 I COMMAND [repl writer worker 1] dropDatabase drop_database8 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.828-0400 m31101| 2015-07-09T14:14:27.828-0400 I COMMAND [repl writer worker 1] dropDatabase drop_database8 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.829-0400 m31101| 2015-07-09T14:14:27.829-0400 I COMMAND [repl writer worker 10] dropDatabase drop_database5 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.831-0400 m31101| 2015-07-09T14:14:27.830-0400 I COMMAND [repl writer worker 10] dropDatabase drop_database5 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.878-0400 m30998| 2015-07-09T14:14:27.876-0400 I SHARDING [conn299] DBConfig::dropDatabase: drop_database3 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.878-0400 m31100| 2015-07-09T14:14:27.876-0400 I COMMAND [conn158] dropDatabase drop_database3 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.881-0400 m30999| 2015-07-09T14:14:27.877-0400 I SHARDING [conn297] distributed lock 'drop_database6/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.881-0400 m31100| 2015-07-09T14:14:27.878-0400 I COMMAND [conn158] dropDatabase drop_database3 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.881-0400 m30999| 2015-07-09T14:14:27.879-0400 I SHARDING [conn298] DBConfig::dropDatabase: drop_database0 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.882-0400 m30998| 2015-07-09T14:14:27.880-0400 I SHARDING [conn298] distributed lock 'drop_database1/bs-osx108-8:30998:1436464535:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.885-0400 m30998| 2015-07-09T14:14:27.880-0400 I SHARDING [conn299] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.880-0400-559eba030bd550bed3408b09", server: "bs-osx108-8", clientAddr: "127.0.0.1:63634", time: new Date(1436465667880), what: "dropDatabase", ns: "drop_database3", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.886-0400 m31100| 2015-07-09T14:14:27.880-0400 I COMMAND [conn28] dropDatabase drop_database0 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.886-0400 m31101| 2015-07-09T14:14:27.881-0400 I COMMAND [repl writer worker 12] dropDatabase drop_database3 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.886-0400 m30998| 2015-07-09T14:14:27.881-0400 I SHARDING [conn300] distributed lock 'drop_database5/bs-osx108-8:30998:1436464535:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.886-0400 m30999| 2015-07-09T14:14:27.882-0400 I SHARDING [conn299] distributed lock 'drop_database4/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.886-0400 m31102| 2015-07-09T14:14:27.882-0400 I COMMAND [repl writer worker 14] dropDatabase drop_database3 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.886-0400 m30999| 2015-07-09T14:14:27.882-0400 I SHARDING [conn301] distributed lock 'drop_database2/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.886-0400 m30998| 2015-07-09T14:14:27.883-0400 I SHARDING [conn301] distributed lock 'drop_database7/bs-osx108-8:30998:1436464535:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.887-0400 m30998| 2015-07-09T14:14:27.883-0400 I SHARDING [conn297] distributed lock 'drop_database9/bs-osx108-8:30998:1436464535:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.887-0400 m31102| 2015-07-09T14:14:27.884-0400 I COMMAND [repl writer worker 14] dropDatabase drop_database3 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.887-0400 m30999| 2015-07-09T14:14:27.884-0400 I SHARDING [conn300] distributed lock 'drop_database8/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.887-0400 m31101| 2015-07-09T14:14:27.887-0400 I COMMAND [repl writer worker 12] dropDatabase drop_database3 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.888-0400 m31100| 2015-07-09T14:14:27.887-0400 I COMMAND [conn28] dropDatabase drop_database0 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.888-0400 m30998| 2015-07-09T14:14:27.888-0400 I SHARDING [conn299] distributed lock 'drop_database3/bs-osx108-8:30998:1436464535:16807' acquired, ts : 559eba030bd550bed3408b0a [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.889-0400 m30999| 2015-07-09T14:14:27.888-0400 I SHARDING [conn298] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.888-0400-559eba03ca4787b9985d1dd1", server: "bs-osx108-8", clientAddr: "127.0.0.1:63631", time: new Date(1436465667888), what: "dropDatabase", ns: "drop_database0", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.889-0400 m31101| 2015-07-09T14:14:27.889-0400 I COMMAND [repl writer worker 0] dropDatabase drop_database0 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.891-0400 m31101| 2015-07-09T14:14:27.890-0400 I COMMAND [repl writer worker 0] dropDatabase drop_database0 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.894-0400 m31102| 2015-07-09T14:14:27.894-0400 I COMMAND [repl writer worker 0] dropDatabase drop_database0 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.895-0400 m31102| 2015-07-09T14:14:27.895-0400 I COMMAND [repl writer worker 0] dropDatabase drop_database0 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.931-0400 m30998| 2015-07-09T14:14:27.930-0400 I COMMAND [conn300] DROP DATABASE: drop_database5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.931-0400 m30998| 2015-07-09T14:14:27.931-0400 I SHARDING [conn300] DBConfig::dropDatabase: drop_database5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.932-0400 m30998| 2015-07-09T14:14:27.931-0400 I SHARDING [conn300] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.931-0400-559eba030bd550bed3408b0b", server: "bs-osx108-8", clientAddr: "127.0.0.1:63635", time: new Date(1436465667931), what: "dropDatabase.start", ns: "drop_database5", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.959-0400 m30998| 2015-07-09T14:14:27.958-0400 I SHARDING [conn300] DBConfig::dropDatabase: drop_database5 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.959-0400 m30998| 2015-07-09T14:14:27.959-0400 I SHARDING [conn299] Placing [drop_database3] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.959-0400 m30999| 2015-07-09T14:14:27.959-0400 I COMMAND [conn297] DROP DATABASE: drop_database6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.960-0400 m30999| 2015-07-09T14:14:27.959-0400 I SHARDING [conn298] distributed lock 'drop_database0/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba03ca4787b9985d1dd2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.960-0400 m30999| 2015-07-09T14:14:27.959-0400 I SHARDING [conn297] DBConfig::dropDatabase: drop_database6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.960-0400 m30999| 2015-07-09T14:14:27.960-0400 I SHARDING [conn297] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.960-0400-559eba03ca4787b9985d1dd3", server: "bs-osx108-8", clientAddr: "127.0.0.1:63630", time: new Date(1436465667960), what: "dropDatabase.start", ns: "drop_database6", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.966-0400 m30998| 2015-07-09T14:14:27.965-0400 I COMMAND [conn298] DROP DATABASE: drop_database1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.966-0400 m30998| 2015-07-09T14:14:27.965-0400 I SHARDING [conn298] DBConfig::dropDatabase: drop_database1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.966-0400 m30998| 2015-07-09T14:14:27.965-0400 I SHARDING [conn298] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.965-0400-559eba030bd550bed3408b0c", server: "bs-osx108-8", clientAddr: "127.0.0.1:63633", time: new Date(1436465667965), what: "dropDatabase.start", ns: "drop_database1", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.976-0400 m30999| 2015-07-09T14:14:27.975-0400 I COMMAND [conn299] DROP DATABASE: drop_database4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.976-0400 m30999| 2015-07-09T14:14:27.976-0400 I SHARDING [conn299] DBConfig::dropDatabase: drop_database4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.977-0400 m30999| 2015-07-09T14:14:27.976-0400 I SHARDING [conn299] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.976-0400-559eba03ca4787b9985d1dd4", server: "bs-osx108-8", clientAddr: "127.0.0.1:63632", time: new Date(1436465667976), what: "dropDatabase.start", ns: "drop_database4", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.977-0400 m30999| 2015-07-09T14:14:27.977-0400 I COMMAND [conn300] DROP DATABASE: drop_database8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.978-0400 m30999| 2015-07-09T14:14:27.977-0400 I SHARDING [conn300] DBConfig::dropDatabase: drop_database8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.978-0400 m30999| 2015-07-09T14:14:27.977-0400 I SHARDING [conn300] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.977-0400-559eba03ca4787b9985d1dd5", server: "bs-osx108-8", clientAddr: "127.0.0.1:63636", time: new Date(1436465667977), what: "dropDatabase.start", ns: "drop_database8", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.987-0400 m31100| 2015-07-09T14:14:27.986-0400 I COMMAND [conn46] command drop_database7.coll48 command: create { create: "coll48" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 3266 } }, Database: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 101ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.989-0400 m31100| 2015-07-09T14:14:27.989-0400 I COMMAND [conn72] command drop_database9.coll48 command: create { create: "coll48" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 2561 } }, Database: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 102ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.990-0400 m31100| 2015-07-09T14:14:27.989-0400 I COMMAND [conn45] command drop_database2.coll48 command: create { create: "coll48" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 2567 } }, Database: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 103ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.990-0400 m31100| 2015-07-09T14:14:27.989-0400 I COMMAND [conn158] dropDatabase drop_database5 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.990-0400 m30998| 2015-07-09T14:14:27.989-0400 I COMMAND [conn301] DROP DATABASE: drop_database7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.990-0400 m30998| 2015-07-09T14:14:27.989-0400 I SHARDING [conn301] DBConfig::dropDatabase: drop_database7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.990-0400 m30998| 2015-07-09T14:14:27.989-0400 I SHARDING [conn301] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.989-0400-559eba030bd550bed3408b0d", server: "bs-osx108-8", clientAddr: "127.0.0.1:63638", time: new Date(1436465667989), what: "dropDatabase.start", ns: "drop_database7", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.991-0400 m30998| 2015-07-09T14:14:27.990-0400 I COMMAND [conn297] DROP DATABASE: drop_database9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.992-0400 m30998| 2015-07-09T14:14:27.990-0400 I SHARDING [conn297] DBConfig::dropDatabase: drop_database9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.992-0400 m30998| 2015-07-09T14:14:27.990-0400 I SHARDING [conn297] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.990-0400-559eba030bd550bed3408b0e", server: "bs-osx108-8", clientAddr: "127.0.0.1:63629", time: new Date(1436465667990), what: "dropDatabase.start", ns: "drop_database9", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.992-0400 m30999| 2015-07-09T14:14:27.991-0400 I COMMAND [conn301] DROP DATABASE: drop_database2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.992-0400 m30999| 2015-07-09T14:14:27.991-0400 I SHARDING [conn301] DBConfig::dropDatabase: drop_database2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.992-0400 m30999| 2015-07-09T14:14:27.991-0400 I SHARDING [conn301] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.991-0400-559eba03ca4787b9985d1dd6", server: "bs-osx108-8", clientAddr: "127.0.0.1:63637", time: new Date(1436465667991), what: "dropDatabase.start", ns: "drop_database2", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.993-0400 m31100| 2015-07-09T14:14:27.992-0400 I COMMAND [conn158] dropDatabase drop_database5 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.994-0400 m30998| 2015-07-09T14:14:27.993-0400 I SHARDING [conn300] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:27.993-0400-559eba030bd550bed3408b0f", server: "bs-osx108-8", clientAddr: "127.0.0.1:63635", time: new Date(1436465667993), what: "dropDatabase", ns: "drop_database5", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:27.998-0400 m30999| 2015-07-09T14:14:27.997-0400 I SHARDING [conn298] Placing [drop_database0] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.026-0400 m30998| 2015-07-09T14:14:28.026-0400 I SHARDING [conn299] distributed lock 'drop_database3/bs-osx108-8:30998:1436464535:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.033-0400 m30999| 2015-07-09T14:14:28.032-0400 I SHARDING [conn297] DBConfig::dropDatabase: drop_database6 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.040-0400 m30999| 2015-07-09T14:14:28.039-0400 I SHARDING [conn300] DBConfig::dropDatabase: drop_database8 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.057-0400 m30998| 2015-07-09T14:14:28.056-0400 I SHARDING [conn298] DBConfig::dropDatabase: drop_database1 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.058-0400 m30999| 2015-07-09T14:14:28.056-0400 I SHARDING [conn301] DBConfig::dropDatabase: drop_database2 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.059-0400 m31100| 2015-07-09T14:14:28.059-0400 I COMMAND [conn157] dropDatabase drop_database2 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.060-0400 m30999| 2015-07-09T14:14:28.059-0400 I SHARDING [conn298] distributed lock 'drop_database0/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.062-0400 m30998| 2015-07-09T14:14:28.061-0400 I COMMAND [conn299] DROP DATABASE: drop_database3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.062-0400 m30998| 2015-07-09T14:14:28.061-0400 I SHARDING [conn299] DBConfig::dropDatabase: drop_database3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.063-0400 m30998| 2015-07-09T14:14:28.061-0400 I SHARDING [conn299] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:28.061-0400-559eba040bd550bed3408b11", server: "bs-osx108-8", clientAddr: "127.0.0.1:63634", time: new Date(1436465668061), what: "dropDatabase.start", ns: "drop_database3", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.064-0400 m30999| 2015-07-09T14:14:28.062-0400 I SHARDING [conn299] DBConfig::dropDatabase: drop_database4 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.068-0400 m31100| 2015-07-09T14:14:28.068-0400 I COMMAND [conn157] dropDatabase drop_database2 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.068-0400 m30998| 2015-07-09T14:14:28.068-0400 I SHARDING [conn300] distributed lock 'drop_database5/bs-osx108-8:30998:1436464535:16807' acquired, ts : 559eba040bd550bed3408b10 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.068-0400 m31100| 2015-07-09T14:14:28.068-0400 I COMMAND [conn162] dropDatabase drop_database4 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.069-0400 m30999| 2015-07-09T14:14:28.068-0400 I SHARDING [conn301] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:28.068-0400-559eba04ca4787b9985d1dd7", server: "bs-osx108-8", clientAddr: "127.0.0.1:63637", time: new Date(1436465668068), what: "dropDatabase", ns: "drop_database2", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.071-0400 m30998| 2015-07-09T14:14:28.070-0400 I SHARDING [conn297] DBConfig::dropDatabase: drop_database9 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.072-0400 m31100| 2015-07-09T14:14:28.072-0400 I COMMAND [conn162] dropDatabase drop_database4 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.073-0400 m30999| 2015-07-09T14:14:28.072-0400 I SHARDING [conn299] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:28.072-0400-559eba04ca4787b9985d1dd8", server: "bs-osx108-8", clientAddr: "127.0.0.1:63632", time: new Date(1436465668072), what: "dropDatabase", ns: "drop_database4", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.073-0400 m31100| 2015-07-09T14:14:28.072-0400 I COMMAND [conn159] dropDatabase drop_database9 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.075-0400 m31100| 2015-07-09T14:14:28.075-0400 I COMMAND [conn159] dropDatabase drop_database9 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.076-0400 m30998| 2015-07-09T14:14:28.075-0400 I SHARDING [conn297] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:28.075-0400-559eba040bd550bed3408b12", server: "bs-osx108-8", clientAddr: "127.0.0.1:63629", time: new Date(1436465668075), what: "dropDatabase", ns: "drop_database9", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.077-0400 m31100| 2015-07-09T14:14:28.076-0400 I COMMAND [conn158] dropDatabase drop_database1 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.080-0400 m30998| 2015-07-09T14:14:28.077-0400 I SHARDING [conn301] DBConfig::dropDatabase: drop_database7 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.080-0400 m30998| 2015-07-09T14:14:28.078-0400 I SHARDING [conn299] DBConfig::dropDatabase: drop_database3 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.083-0400 m31100| 2015-07-09T14:14:28.082-0400 I COMMAND [conn158] dropDatabase drop_database1 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.085-0400 m31100| 2015-07-09T14:14:28.085-0400 I COMMAND [conn161] dropDatabase drop_database3 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.086-0400 m30998| 2015-07-09T14:14:28.085-0400 I SHARDING [conn298] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:28.085-0400-559eba040bd550bed3408b14", server: "bs-osx108-8", clientAddr: "127.0.0.1:63633", time: new Date(1436465668085), what: "dropDatabase", ns: "drop_database1", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.086-0400 m30998| 2015-07-09T14:14:28.085-0400 I SHARDING [conn297] distributed lock 'drop_database9/bs-osx108-8:30998:1436464535:16807' acquired, ts : 559eba040bd550bed3408b13 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.091-0400 m31100| 2015-07-09T14:14:28.091-0400 I COMMAND [conn161] dropDatabase drop_database3 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.095-0400 m30999| 2015-07-09T14:14:28.092-0400 I SHARDING [conn301] distributed lock 'drop_database2/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba04ca4787b9985d1dd9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.096-0400 m30999| 2015-07-09T14:14:28.093-0400 I SHARDING [conn299] distributed lock 'drop_database4/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba04ca4787b9985d1dda [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.096-0400 m31100| 2015-07-09T14:14:28.094-0400 I COMMAND [conn159] dropDatabase drop_database7 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.097-0400 m30998| 2015-07-09T14:14:28.094-0400 I SHARDING [conn299] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:28.094-0400-559eba040bd550bed3408b16", server: "bs-osx108-8", clientAddr: "127.0.0.1:63634", time: new Date(1436465668094), what: "dropDatabase", ns: "drop_database3", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.097-0400 m31100| 2015-07-09T14:14:28.097-0400 I COMMAND [conn159] dropDatabase drop_database7 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.098-0400 m31100| 2015-07-09T14:14:28.098-0400 I COMMAND [conn156] dropDatabase drop_database8 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.099-0400 m30998| 2015-07-09T14:14:28.098-0400 I SHARDING [conn301] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:28.098-0400-559eba040bd550bed3408b17", server: "bs-osx108-8", clientAddr: "127.0.0.1:63638", time: new Date(1436465668098), what: "dropDatabase", ns: "drop_database7", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.107-0400 m30998| 2015-07-09T14:14:28.105-0400 I SHARDING [conn298] distributed lock 'drop_database1/bs-osx108-8:30998:1436464535:16807' acquired, ts : 559eba040bd550bed3408b15 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.111-0400 m31100| 2015-07-09T14:14:28.109-0400 I COMMAND [conn156] dropDatabase drop_database8 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.112-0400 m30999| 2015-07-09T14:14:28.110-0400 I SHARDING [conn300] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:28.110-0400-559eba04ca4787b9985d1ddb", server: "bs-osx108-8", clientAddr: "127.0.0.1:63636", time: new Date(1436465668110), what: "dropDatabase", ns: "drop_database8", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.112-0400 m31100| 2015-07-09T14:14:28.109-0400 I COMMAND [conn28] dropDatabase drop_database6 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.112-0400 m30998| 2015-07-09T14:14:28.110-0400 I SHARDING [conn301] distributed lock 'drop_database7/bs-osx108-8:30998:1436464535:16807' acquired, ts : 559eba040bd550bed3408b18 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.113-0400 m31102| 2015-07-09T14:14:28.111-0400 I COMMAND [repl writer worker 15] dropDatabase drop_database5 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.114-0400 m31102| 2015-07-09T14:14:28.114-0400 I COMMAND [repl writer worker 15] dropDatabase drop_database5 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.118-0400 m30998| 2015-07-09T14:14:28.117-0400 I SHARDING [conn299] distributed lock 'drop_database3/bs-osx108-8:30998:1436464535:16807' acquired, ts : 559eba040bd550bed3408b19 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.121-0400 m31100| 2015-07-09T14:14:28.119-0400 I COMMAND [conn28] dropDatabase drop_database6 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.121-0400 m31100| 2015-07-09T14:14:28.119-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63673 #167 (110 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.121-0400 m30999| 2015-07-09T14:14:28.119-0400 I SHARDING [conn297] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:28.119-0400-559eba04ca4787b9985d1ddc", server: "bs-osx108-8", clientAddr: "127.0.0.1:63630", time: new Date(1436465668119), what: "dropDatabase", ns: "drop_database6", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.131-0400 m30999| 2015-07-09T14:14:28.131-0400 I SHARDING [conn300] distributed lock 'drop_database8/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba04ca4787b9985d1ddd [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.136-0400 m30999| 2015-07-09T14:14:28.135-0400 I SHARDING [conn297] distributed lock 'drop_database6/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba04ca4787b9985d1dde [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.137-0400 m31102| 2015-07-09T14:14:28.137-0400 I COMMAND [repl writer worker 12] dropDatabase drop_database2 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.139-0400 m31102| 2015-07-09T14:14:28.139-0400 I COMMAND [repl writer worker 12] dropDatabase drop_database2 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.140-0400 m31102| 2015-07-09T14:14:28.140-0400 I COMMAND [repl writer worker 13] dropDatabase drop_database4 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.141-0400 m31101| 2015-07-09T14:14:28.141-0400 I COMMAND [repl writer worker 2] dropDatabase drop_database5 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.141-0400 m31102| 2015-07-09T14:14:28.141-0400 I COMMAND [repl writer worker 13] dropDatabase drop_database4 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.142-0400 m31102| 2015-07-09T14:14:28.142-0400 I COMMAND [repl writer worker 6] dropDatabase drop_database9 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.143-0400 m31101| 2015-07-09T14:14:28.142-0400 I COMMAND [repl writer worker 2] dropDatabase drop_database5 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.144-0400 m31102| 2015-07-09T14:14:28.143-0400 I COMMAND [repl writer worker 6] dropDatabase drop_database9 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.144-0400 m31102| 2015-07-09T14:14:28.144-0400 I COMMAND [repl writer worker 3] dropDatabase drop_database1 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.147-0400 m31102| 2015-07-09T14:14:28.147-0400 I COMMAND [repl writer worker 3] dropDatabase drop_database1 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.150-0400 m31102| 2015-07-09T14:14:28.148-0400 I COMMAND [repl writer worker 14] dropDatabase drop_database3 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.150-0400 m31102| 2015-07-09T14:14:28.150-0400 I COMMAND [repl writer worker 14] dropDatabase drop_database3 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.151-0400 m31102| 2015-07-09T14:14:28.151-0400 I COMMAND [repl writer worker 0] dropDatabase drop_database7 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.153-0400 m30998| 2015-07-09T14:14:28.153-0400 I SHARDING [conn297] Placing [drop_database9] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.153-0400 m31102| 2015-07-09T14:14:28.153-0400 I COMMAND [repl writer worker 0] dropDatabase drop_database7 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.154-0400 m30999| 2015-07-09T14:14:28.153-0400 I COMMAND [conn298] DROP DATABASE: drop_database0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.154-0400 m30999| 2015-07-09T14:14:28.153-0400 I SHARDING [conn298] DBConfig::dropDatabase: drop_database0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.154-0400 m31102| 2015-07-09T14:14:28.154-0400 I COMMAND [repl writer worker 11] dropDatabase drop_database8 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.159-0400 m30999| 2015-07-09T14:14:28.154-0400 I SHARDING [conn298] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:28.154-0400-559eba04ca4787b9985d1ddf", server: "bs-osx108-8", clientAddr: "127.0.0.1:63631", time: new Date(1436465668154), what: "dropDatabase.start", ns: "drop_database0", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.159-0400 m30999| 2015-07-09T14:14:28.156-0400 I SHARDING [conn299] Placing [drop_database4] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.159-0400 m30998| 2015-07-09T14:14:28.157-0400 I SHARDING [conn301] Placing [drop_database7] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.159-0400 m30998| 2015-07-09T14:14:28.158-0400 I SHARDING [conn300] Placing [drop_database5] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.160-0400 m30998| 2015-07-09T14:14:28.158-0400 I SHARDING [conn298] Placing [drop_database1] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.160-0400 m30999| 2015-07-09T14:14:28.158-0400 I SHARDING [conn301] Placing [drop_database2] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.160-0400 m30999| 2015-07-09T14:14:28.159-0400 I SHARDING [conn297] Placing [drop_database6] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.160-0400 m30999| 2015-07-09T14:14:28.159-0400 I SHARDING [conn300] Placing [drop_database8] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.160-0400 m30998| 2015-07-09T14:14:28.159-0400 I SHARDING [conn299] Placing [drop_database3] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.160-0400 m31101| 2015-07-09T14:14:28.160-0400 I COMMAND [repl writer worker 1] dropDatabase drop_database2 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.161-0400 m31101| 2015-07-09T14:14:28.160-0400 I COMMAND [repl writer worker 1] dropDatabase drop_database2 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.161-0400 m31102| 2015-07-09T14:14:28.160-0400 I COMMAND [repl writer worker 11] dropDatabase drop_database8 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.161-0400 m31101| 2015-07-09T14:14:28.161-0400 I COMMAND [repl writer worker 10] dropDatabase drop_database4 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.161-0400 m31102| 2015-07-09T14:14:28.161-0400 I COMMAND [repl writer worker 1] dropDatabase drop_database6 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.162-0400 m31101| 2015-07-09T14:14:28.162-0400 I COMMAND [repl writer worker 10] dropDatabase drop_database4 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.162-0400 m31102| 2015-07-09T14:14:28.162-0400 I COMMAND [repl writer worker 1] dropDatabase drop_database6 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.163-0400 m31101| 2015-07-09T14:14:28.163-0400 I COMMAND [repl writer worker 6] dropDatabase drop_database9 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.165-0400 m31101| 2015-07-09T14:14:28.164-0400 I COMMAND [repl writer worker 6] dropDatabase drop_database9 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.165-0400 m31101| 2015-07-09T14:14:28.165-0400 I COMMAND [repl writer worker 14] dropDatabase drop_database1 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.168-0400 m31101| 2015-07-09T14:14:28.167-0400 I COMMAND [repl writer worker 14] dropDatabase drop_database1 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.168-0400 m31101| 2015-07-09T14:14:28.168-0400 I COMMAND [repl writer worker 12] dropDatabase drop_database3 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.169-0400 m31101| 2015-07-09T14:14:28.169-0400 I COMMAND [repl writer worker 12] dropDatabase drop_database3 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.170-0400 m31101| 2015-07-09T14:14:28.169-0400 I COMMAND [repl writer worker 0] dropDatabase drop_database7 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.171-0400 m31101| 2015-07-09T14:14:28.171-0400 I COMMAND [repl writer worker 0] dropDatabase drop_database7 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.172-0400 m31101| 2015-07-09T14:14:28.172-0400 I COMMAND [repl writer worker 4] dropDatabase drop_database8 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.174-0400 m31101| 2015-07-09T14:14:28.174-0400 I COMMAND [repl writer worker 4] dropDatabase drop_database8 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.175-0400 m31101| 2015-07-09T14:14:28.175-0400 I COMMAND [repl writer worker 3] dropDatabase drop_database6 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.176-0400 m31101| 2015-07-09T14:14:28.175-0400 I COMMAND [repl writer worker 3] dropDatabase drop_database6 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.216-0400 m30998| 2015-07-09T14:14:28.216-0400 I SHARDING [conn297] distributed lock 'drop_database9/bs-osx108-8:30998:1436464535:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.231-0400 m30998| 2015-07-09T14:14:28.227-0400 I SHARDING [conn298] distributed lock 'drop_database1/bs-osx108-8:30998:1436464535:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.231-0400 m30998| 2015-07-09T14:14:28.228-0400 I SHARDING [conn299] distributed lock 'drop_database3/bs-osx108-8:30998:1436464535:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.232-0400 m30998| 2015-07-09T14:14:28.228-0400 I SHARDING [conn300] distributed lock 'drop_database5/bs-osx108-8:30998:1436464535:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.232-0400 m30999| 2015-07-09T14:14:28.228-0400 I SHARDING [conn300] distributed lock 'drop_database8/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.233-0400 m30999| 2015-07-09T14:14:28.228-0400 I SHARDING [conn301] distributed lock 'drop_database2/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.234-0400 m30999| 2015-07-09T14:14:28.229-0400 I SHARDING [conn298] DBConfig::dropDatabase: drop_database0 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.234-0400 m30999| 2015-07-09T14:14:28.230-0400 I SHARDING [conn299] distributed lock 'drop_database4/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.235-0400 m30998| 2015-07-09T14:14:28.230-0400 I SHARDING [conn301] distributed lock 'drop_database7/bs-osx108-8:30998:1436464535:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.235-0400 m30999| 2015-07-09T14:14:28.230-0400 I SHARDING [conn297] distributed lock 'drop_database6/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.235-0400 m31100| 2015-07-09T14:14:28.235-0400 I COMMAND [conn28] dropDatabase drop_database0 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.237-0400 m31100| 2015-07-09T14:14:28.236-0400 I COMMAND [conn28] dropDatabase drop_database0 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.238-0400 m30999| 2015-07-09T14:14:28.237-0400 I SHARDING [conn298] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:28.237-0400-559eba04ca4787b9985d1de0", server: "bs-osx108-8", clientAddr: "127.0.0.1:63631", time: new Date(1436465668237), what: "dropDatabase", ns: "drop_database0", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.238-0400 m30998| 2015-07-09T14:14:28.238-0400 I COMMAND [conn297] DROP DATABASE: drop_database9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.238-0400 m30998| 2015-07-09T14:14:28.238-0400 I SHARDING [conn297] DBConfig::dropDatabase: drop_database9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.239-0400 m30998| 2015-07-09T14:14:28.238-0400 I SHARDING [conn297] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:28.238-0400-559eba040bd550bed3408b1a", server: "bs-osx108-8", clientAddr: "127.0.0.1:63629", time: new Date(1436465668238), what: "dropDatabase.start", ns: "drop_database9", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.255-0400 m31101| 2015-07-09T14:14:28.255-0400 I COMMAND [repl writer worker 8] dropDatabase drop_database0 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.257-0400 m31101| 2015-07-09T14:14:28.257-0400 I COMMAND [repl writer worker 8] dropDatabase drop_database0 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.265-0400 m31102| 2015-07-09T14:14:28.265-0400 I COMMAND [repl writer worker 4] dropDatabase drop_database0 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.268-0400 m31102| 2015-07-09T14:14:28.268-0400 I COMMAND [repl writer worker 4] dropDatabase drop_database0 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.302-0400 m30999| 2015-07-09T14:14:28.302-0400 I SHARDING [conn298] distributed lock 'drop_database0/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba04ca4787b9985d1de1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.303-0400 m30998| 2015-07-09T14:14:28.302-0400 I SHARDING [conn297] DBConfig::dropDatabase: drop_database9 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.308-0400 m30999| 2015-07-09T14:14:28.307-0400 I COMMAND [conn301] DROP DATABASE: drop_database2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.308-0400 m30999| 2015-07-09T14:14:28.308-0400 I SHARDING [conn301] DBConfig::dropDatabase: drop_database2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.309-0400 m30999| 2015-07-09T14:14:28.308-0400 I SHARDING [conn301] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:28.308-0400-559eba04ca4787b9985d1de2", server: "bs-osx108-8", clientAddr: "127.0.0.1:63637", time: new Date(1436465668308), what: "dropDatabase.start", ns: "drop_database2", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.309-0400 m30999| 2015-07-09T14:14:28.309-0400 I COMMAND [conn300] DROP DATABASE: drop_database8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.310-0400 m30999| 2015-07-09T14:14:28.309-0400 I SHARDING [conn300] DBConfig::dropDatabase: drop_database8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.310-0400 m30999| 2015-07-09T14:14:28.309-0400 I SHARDING [conn300] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:28.309-0400-559eba04ca4787b9985d1de3", server: "bs-osx108-8", clientAddr: "127.0.0.1:63636", time: new Date(1436465668309), what: "dropDatabase.start", ns: "drop_database8", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.313-0400 m30998| 2015-07-09T14:14:28.313-0400 I COMMAND [conn301] DROP DATABASE: drop_database7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.314-0400 m30998| 2015-07-09T14:14:28.313-0400 I SHARDING [conn301] DBConfig::dropDatabase: drop_database7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.314-0400 m30998| 2015-07-09T14:14:28.313-0400 I SHARDING [conn301] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:28.313-0400-559eba040bd550bed3408b1b", server: "bs-osx108-8", clientAddr: "127.0.0.1:63638", time: new Date(1436465668313), what: "dropDatabase.start", ns: "drop_database7", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.318-0400 m30998| 2015-07-09T14:14:28.318-0400 I COMMAND [conn300] DROP DATABASE: drop_database5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.319-0400 m30998| 2015-07-09T14:14:28.318-0400 I SHARDING [conn300] DBConfig::dropDatabase: drop_database5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.319-0400 m30998| 2015-07-09T14:14:28.318-0400 I SHARDING [conn300] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:28.318-0400-559eba040bd550bed3408b1c", server: "bs-osx108-8", clientAddr: "127.0.0.1:63635", time: new Date(1436465668318), what: "dropDatabase.start", ns: "drop_database5", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.327-0400 m30998| 2015-07-09T14:14:28.326-0400 I COMMAND [conn299] DROP DATABASE: drop_database3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.327-0400 m30998| 2015-07-09T14:14:28.327-0400 I SHARDING [conn299] DBConfig::dropDatabase: drop_database3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.327-0400 m30998| 2015-07-09T14:14:28.327-0400 I SHARDING [conn299] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:28.327-0400-559eba040bd550bed3408b1d", server: "bs-osx108-8", clientAddr: "127.0.0.1:63634", time: new Date(1436465668327), what: "dropDatabase.start", ns: "drop_database3", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.333-0400 m31100| 2015-07-09T14:14:28.332-0400 I COMMAND [conn46] command drop_database1.coll48 command: create { create: "coll48" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 7097 } }, Database: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 101ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.336-0400 m30998| 2015-07-09T14:14:28.335-0400 I COMMAND [conn298] DROP DATABASE: drop_database1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.336-0400 m30998| 2015-07-09T14:14:28.335-0400 I SHARDING [conn298] DBConfig::dropDatabase: drop_database1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.336-0400 m30998| 2015-07-09T14:14:28.335-0400 I SHARDING [conn298] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:28.335-0400-559eba040bd550bed3408b1e", server: "bs-osx108-8", clientAddr: "127.0.0.1:63633", time: new Date(1436465668335), what: "dropDatabase.start", ns: "drop_database1", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.339-0400 m31100| 2015-07-09T14:14:28.338-0400 I COMMAND [conn20] command drop_database4.coll48 command: create { create: "coll48" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 4040 } }, Database: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 105ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.341-0400 m30999| 2015-07-09T14:14:28.341-0400 I COMMAND [conn299] DROP DATABASE: drop_database4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.342-0400 m30999| 2015-07-09T14:14:28.341-0400 I SHARDING [conn299] DBConfig::dropDatabase: drop_database4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.342-0400 m30999| 2015-07-09T14:14:28.341-0400 I SHARDING [conn299] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:28.341-0400-559eba04ca4787b9985d1de4", server: "bs-osx108-8", clientAddr: "127.0.0.1:63632", time: new Date(1436465668341), what: "dropDatabase.start", ns: "drop_database4", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.348-0400 m31100| 2015-07-09T14:14:28.347-0400 I COMMAND [conn60] command drop_database6.coll48 command: create { create: "coll48" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 3283 } }, Database: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 113ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.349-0400 m31100| 2015-07-09T14:14:28.347-0400 I COMMAND [conn159] dropDatabase drop_database9 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.350-0400 m30999| 2015-07-09T14:14:28.350-0400 I COMMAND [conn297] DROP DATABASE: drop_database6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.354-0400 m30999| 2015-07-09T14:14:28.350-0400 I SHARDING [conn297] DBConfig::dropDatabase: drop_database6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.355-0400 m30999| 2015-07-09T14:14:28.350-0400 I SHARDING [conn297] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:28.350-0400-559eba04ca4787b9985d1de5", server: "bs-osx108-8", clientAddr: "127.0.0.1:63630", time: new Date(1436465668350), what: "dropDatabase.start", ns: "drop_database6", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.355-0400 m31100| 2015-07-09T14:14:28.351-0400 I COMMAND [conn159] dropDatabase drop_database9 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.357-0400 m30998| 2015-07-09T14:14:28.353-0400 I SHARDING [conn297] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:28.353-0400-559eba040bd550bed3408b1f", server: "bs-osx108-8", clientAddr: "127.0.0.1:63629", time: new Date(1436465668353), what: "dropDatabase", ns: "drop_database9", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.357-0400 m30999| 2015-07-09T14:14:28.356-0400 I SHARDING [conn298] Placing [drop_database0] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.380-0400 m30998| 2015-07-09T14:14:28.380-0400 I NETWORK [conn297] end connection 127.0.0.1:63629 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.384-0400 m30999| 2015-07-09T14:14:28.383-0400 I SHARDING [conn301] DBConfig::dropDatabase: drop_database2 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.385-0400 m31100| 2015-07-09T14:14:28.384-0400 I COMMAND [conn28] dropDatabase drop_database2 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.386-0400 m30998| 2015-07-09T14:14:28.385-0400 I SHARDING [conn300] DBConfig::dropDatabase: drop_database5 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.387-0400 m30999| 2015-07-09T14:14:28.386-0400 I SHARDING [conn300] DBConfig::dropDatabase: drop_database8 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.387-0400 m30998| 2015-07-09T14:14:28.386-0400 I SHARDING [conn301] DBConfig::dropDatabase: drop_database7 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.390-0400 m30999| 2015-07-09T14:14:28.390-0400 I SHARDING [conn299] DBConfig::dropDatabase: drop_database4 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.391-0400 m30999| 2015-07-09T14:14:28.390-0400 I SHARDING [conn297] DBConfig::dropDatabase: drop_database6 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.392-0400 m31100| 2015-07-09T14:14:28.391-0400 I COMMAND [conn28] dropDatabase drop_database2 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.392-0400 m30998| 2015-07-09T14:14:28.391-0400 I SHARDING [conn298] DBConfig::dropDatabase: drop_database1 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.392-0400 m31100| 2015-07-09T14:14:28.392-0400 I COMMAND [conn162] dropDatabase drop_database4 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.393-0400 m30999| 2015-07-09T14:14:28.392-0400 I SHARDING [conn301] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:28.392-0400-559eba04ca4787b9985d1de6", server: "bs-osx108-8", clientAddr: "127.0.0.1:63637", time: new Date(1436465668392), what: "dropDatabase", ns: "drop_database2", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.401-0400 m30998| 2015-07-09T14:14:28.400-0400 I SHARDING [conn299] DBConfig::dropDatabase: drop_database3 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.401-0400 m30999| 2015-07-09T14:14:28.400-0400 I SHARDING [conn298] distributed lock 'drop_database0/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.405-0400 m31100| 2015-07-09T14:14:28.405-0400 I COMMAND [conn162] dropDatabase drop_database4 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.405-0400 m31100| 2015-07-09T14:14:28.405-0400 I COMMAND [conn163] dropDatabase drop_database3 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.406-0400 m30999| 2015-07-09T14:14:28.405-0400 I SHARDING [conn299] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:28.405-0400-559eba04ca4787b9985d1de7", server: "bs-osx108-8", clientAddr: "127.0.0.1:63632", time: new Date(1436465668405), what: "dropDatabase", ns: "drop_database4", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.413-0400 m31100| 2015-07-09T14:14:28.412-0400 I COMMAND [conn163] dropDatabase drop_database3 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.413-0400 m31100| 2015-07-09T14:14:28.412-0400 I COMMAND [conn158] dropDatabase drop_database1 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.414-0400 m30998| 2015-07-09T14:14:28.413-0400 I SHARDING [conn299] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:28.413-0400-559eba040bd550bed3408b20", server: "bs-osx108-8", clientAddr: "127.0.0.1:63634", time: new Date(1436465668413), what: "dropDatabase", ns: "drop_database3", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.417-0400 m30999| 2015-07-09T14:14:28.416-0400 I SHARDING [conn301] distributed lock 'drop_database2/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba04ca4787b9985d1de8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.417-0400 m31100| 2015-07-09T14:14:28.417-0400 I COMMAND [conn158] dropDatabase drop_database1 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.421-0400 m31100| 2015-07-09T14:14:28.418-0400 I COMMAND [conn157] dropDatabase drop_database6 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.422-0400 m30998| 2015-07-09T14:14:28.419-0400 I SHARDING [conn298] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:28.419-0400-559eba040bd550bed3408b22", server: "bs-osx108-8", clientAddr: "127.0.0.1:63633", time: new Date(1436465668419), what: "dropDatabase", ns: "drop_database1", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.423-0400 m31100| 2015-07-09T14:14:28.421-0400 I COMMAND [conn157] dropDatabase drop_database6 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.424-0400 m30998| 2015-07-09T14:14:28.421-0400 I SHARDING [conn299] distributed lock 'drop_database3/bs-osx108-8:30998:1436464535:16807' acquired, ts : 559eba040bd550bed3408b21 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.424-0400 m31100| 2015-07-09T14:14:28.422-0400 I COMMAND [conn161] dropDatabase drop_database7 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.425-0400 m30999| 2015-07-09T14:14:28.423-0400 I SHARDING [conn297] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:28.423-0400-559eba04ca4787b9985d1dea", server: "bs-osx108-8", clientAddr: "127.0.0.1:63630", time: new Date(1436465668423), what: "dropDatabase", ns: "drop_database6", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.428-0400 m31100| 2015-07-09T14:14:28.427-0400 I COMMAND [conn161] dropDatabase drop_database7 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.429-0400 m30998| 2015-07-09T14:14:28.427-0400 I SHARDING [conn301] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:28.427-0400-559eba040bd550bed3408b23", server: "bs-osx108-8", clientAddr: "127.0.0.1:63638", time: new Date(1436465668427), what: "dropDatabase", ns: "drop_database7", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.429-0400 m31100| 2015-07-09T14:14:28.427-0400 I COMMAND [conn159] dropDatabase drop_database5 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.430-0400 m30999| 2015-07-09T14:14:28.429-0400 I SHARDING [conn299] distributed lock 'drop_database4/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba04ca4787b9985d1de9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.430-0400 m31100| 2015-07-09T14:14:28.430-0400 I COMMAND [conn159] dropDatabase drop_database5 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.431-0400 m31100| 2015-07-09T14:14:28.430-0400 I COMMAND [conn156] dropDatabase drop_database8 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.432-0400 m30998| 2015-07-09T14:14:28.431-0400 I SHARDING [conn300] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:28.431-0400-559eba040bd550bed3408b24", server: "bs-osx108-8", clientAddr: "127.0.0.1:63635", time: new Date(1436465668431), what: "dropDatabase", ns: "drop_database5", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.435-0400 m31100| 2015-07-09T14:14:28.432-0400 I COMMAND [conn156] dropDatabase drop_database8 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.435-0400 m30999| 2015-07-09T14:14:28.433-0400 I SHARDING [conn300] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:28.433-0400-559eba04ca4787b9985d1deb", server: "bs-osx108-8", clientAddr: "127.0.0.1:63636", time: new Date(1436465668433), what: "dropDatabase", ns: "drop_database8", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.450-0400 m30998| 2015-07-09T14:14:28.450-0400 I SHARDING [conn301] distributed lock 'drop_database7/bs-osx108-8:30998:1436464535:16807' acquired, ts : 559eba040bd550bed3408b26 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.451-0400 m30999| 2015-07-09T14:14:28.450-0400 I SHARDING [conn297] distributed lock 'drop_database6/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba04ca4787b9985d1dec [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.453-0400 m30998| 2015-07-09T14:14:28.451-0400 I SHARDING [conn298] distributed lock 'drop_database1/bs-osx108-8:30998:1436464535:16807' acquired, ts : 559eba040bd550bed3408b25 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.457-0400 m30998| 2015-07-09T14:14:28.456-0400 I SHARDING [conn300] distributed lock 'drop_database5/bs-osx108-8:30998:1436464535:16807' acquired, ts : 559eba040bd550bed3408b27 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.460-0400 m30999| 2015-07-09T14:14:28.458-0400 I SHARDING [conn300] distributed lock 'drop_database8/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba04ca4787b9985d1ded [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.465-0400 m30999| 2015-07-09T14:14:28.464-0400 I COMMAND [conn298] DROP DATABASE: drop_database0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.465-0400 m30999| 2015-07-09T14:14:28.464-0400 I SHARDING [conn298] DBConfig::dropDatabase: drop_database0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.466-0400 m30999| 2015-07-09T14:14:28.464-0400 I SHARDING [conn298] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:28.464-0400-559eba04ca4787b9985d1dee", server: "bs-osx108-8", clientAddr: "127.0.0.1:63631", time: new Date(1436465668464), what: "dropDatabase.start", ns: "drop_database0", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.471-0400 m30999| 2015-07-09T14:14:28.468-0400 I SHARDING [conn301] Placing [drop_database2] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.472-0400 m30998| 2015-07-09T14:14:28.470-0400 I SHARDING [conn299] Placing [drop_database3] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.472-0400 m30999| 2015-07-09T14:14:28.471-0400 I SHARDING [conn299] Placing [drop_database4] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.474-0400 m30998| 2015-07-09T14:14:28.474-0400 I SHARDING [conn301] Placing [drop_database7] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.475-0400 m30999| 2015-07-09T14:14:28.475-0400 I SHARDING [conn300] Placing [drop_database8] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.475-0400 m30999| 2015-07-09T14:14:28.475-0400 I SHARDING [conn297] Placing [drop_database6] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.475-0400 m30998| 2015-07-09T14:14:28.475-0400 I SHARDING [conn300] Placing [drop_database5] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.476-0400 m30998| 2015-07-09T14:14:28.475-0400 I SHARDING [conn298] Placing [drop_database1] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.491-0400 m31102| 2015-07-09T14:14:28.490-0400 I COMMAND [repl writer worker 3] dropDatabase drop_database9 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.493-0400 m31101| 2015-07-09T14:14:28.491-0400 I COMMAND [repl writer worker 14] dropDatabase drop_database9 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.493-0400 m31101| 2015-07-09T14:14:28.493-0400 I COMMAND [repl writer worker 14] dropDatabase drop_database9 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.494-0400 m31101| 2015-07-09T14:14:28.493-0400 I COMMAND [repl writer worker 12] dropDatabase drop_database2 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.494-0400 m31102| 2015-07-09T14:14:28.493-0400 I COMMAND [repl writer worker 3] dropDatabase drop_database9 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.495-0400 m31101| 2015-07-09T14:14:28.494-0400 I COMMAND [repl writer worker 12] dropDatabase drop_database2 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.495-0400 m31101| 2015-07-09T14:14:28.494-0400 I COMMAND [repl writer worker 0] dropDatabase drop_database4 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.495-0400 m31102| 2015-07-09T14:14:28.494-0400 I COMMAND [repl writer worker 14] dropDatabase drop_database2 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.496-0400 m31101| 2015-07-09T14:14:28.495-0400 I COMMAND [repl writer worker 0] dropDatabase drop_database4 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.496-0400 m31102| 2015-07-09T14:14:28.496-0400 I COMMAND [repl writer worker 14] dropDatabase drop_database2 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.496-0400 m31101| 2015-07-09T14:14:28.496-0400 I COMMAND [repl writer worker 4] dropDatabase drop_database3 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.497-0400 m31102| 2015-07-09T14:14:28.496-0400 I COMMAND [repl writer worker 0] dropDatabase drop_database4 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.497-0400 m31101| 2015-07-09T14:14:28.497-0400 I COMMAND [repl writer worker 4] dropDatabase drop_database3 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.498-0400 m31102| 2015-07-09T14:14:28.497-0400 I COMMAND [repl writer worker 0] dropDatabase drop_database4 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.498-0400 m31101| 2015-07-09T14:14:28.497-0400 I COMMAND [repl writer worker 3] dropDatabase drop_database1 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.498-0400 m31102| 2015-07-09T14:14:28.498-0400 I COMMAND [repl writer worker 11] dropDatabase drop_database3 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.499-0400 m31102| 2015-07-09T14:14:28.499-0400 I COMMAND [repl writer worker 11] dropDatabase drop_database3 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.500-0400 m31101| 2015-07-09T14:14:28.499-0400 I COMMAND [repl writer worker 3] dropDatabase drop_database1 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.500-0400 m31101| 2015-07-09T14:14:28.499-0400 I COMMAND [repl writer worker 7] dropDatabase drop_database6 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.501-0400 m31102| 2015-07-09T14:14:28.500-0400 I COMMAND [repl writer worker 1] dropDatabase drop_database1 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.501-0400 m31102| 2015-07-09T14:14:28.500-0400 I COMMAND [repl writer worker 1] dropDatabase drop_database1 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.501-0400 m31101| 2015-07-09T14:14:28.500-0400 I COMMAND [repl writer worker 7] dropDatabase drop_database6 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.501-0400 m31102| 2015-07-09T14:14:28.501-0400 I COMMAND [repl writer worker 10] dropDatabase drop_database6 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.502-0400 m31101| 2015-07-09T14:14:28.501-0400 I COMMAND [repl writer worker 13] dropDatabase drop_database7 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.502-0400 m31102| 2015-07-09T14:14:28.502-0400 I COMMAND [repl writer worker 10] dropDatabase drop_database6 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.502-0400 m31101| 2015-07-09T14:14:28.502-0400 I COMMAND [repl writer worker 13] dropDatabase drop_database7 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.503-0400 m31102| 2015-07-09T14:14:28.503-0400 I COMMAND [repl writer worker 2] dropDatabase drop_database7 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.503-0400 m31101| 2015-07-09T14:14:28.503-0400 I COMMAND [repl writer worker 8] dropDatabase drop_database5 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.504-0400 m31102| 2015-07-09T14:14:28.503-0400 I COMMAND [repl writer worker 2] dropDatabase drop_database7 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.504-0400 m31101| 2015-07-09T14:14:28.504-0400 I COMMAND [repl writer worker 8] dropDatabase drop_database5 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.504-0400 m31102| 2015-07-09T14:14:28.504-0400 I COMMAND [repl writer worker 4] dropDatabase drop_database5 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.504-0400 m31101| 2015-07-09T14:14:28.504-0400 I COMMAND [repl writer worker 15] dropDatabase drop_database8 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.505-0400 m31102| 2015-07-09T14:14:28.505-0400 I COMMAND [repl writer worker 4] dropDatabase drop_database5 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.505-0400 m31102| 2015-07-09T14:14:28.505-0400 I COMMAND [repl writer worker 5] dropDatabase drop_database8 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.506-0400 m31102| 2015-07-09T14:14:28.506-0400 I COMMAND [repl writer worker 5] dropDatabase drop_database8 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.506-0400 m31101| 2015-07-09T14:14:28.506-0400 I COMMAND [repl writer worker 15] dropDatabase drop_database8 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.532-0400 m30999| 2015-07-09T14:14:28.532-0400 I SHARDING [conn301] distributed lock 'drop_database2/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.533-0400 m30999| 2015-07-09T14:14:28.532-0400 I SHARDING [conn298] DBConfig::dropDatabase: drop_database0 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.533-0400 m31100| 2015-07-09T14:14:28.533-0400 I COMMAND [conn156] dropDatabase drop_database0 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.534-0400 m30999| 2015-07-09T14:14:28.534-0400 I SHARDING [conn299] distributed lock 'drop_database4/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.534-0400 m30998| 2015-07-09T14:14:28.534-0400 I SHARDING [conn298] distributed lock 'drop_database1/bs-osx108-8:30998:1436464535:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.538-0400 m31100| 2015-07-09T14:14:28.535-0400 I COMMAND [conn156] dropDatabase drop_database0 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.539-0400 m30999| 2015-07-09T14:14:28.535-0400 I SHARDING [conn297] distributed lock 'drop_database6/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.539-0400 m30999| 2015-07-09T14:14:28.535-0400 I SHARDING [conn298] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:28.535-0400-559eba04ca4787b9985d1def", server: "bs-osx108-8", clientAddr: "127.0.0.1:63631", time: new Date(1436465668535), what: "dropDatabase", ns: "drop_database0", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.540-0400 m31102| 2015-07-09T14:14:28.536-0400 I COMMAND [repl writer worker 8] dropDatabase drop_database0 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.541-0400 m31101| 2015-07-09T14:14:28.538-0400 I COMMAND [repl writer worker 5] dropDatabase drop_database0 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.541-0400 m31102| 2015-07-09T14:14:28.540-0400 I COMMAND [repl writer worker 8] dropDatabase drop_database0 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.542-0400 m31101| 2015-07-09T14:14:28.542-0400 I COMMAND [repl writer worker 5] dropDatabase drop_database0 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.553-0400 m30998| 2015-07-09T14:14:28.552-0400 I SHARDING [conn299] distributed lock 'drop_database3/bs-osx108-8:30998:1436464535:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.553-0400 m30999| 2015-07-09T14:14:28.552-0400 I SHARDING [conn300] distributed lock 'drop_database8/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.554-0400 m30998| 2015-07-09T14:14:28.553-0400 I SHARDING [conn301] distributed lock 'drop_database7/bs-osx108-8:30998:1436464535:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.556-0400 m30998| 2015-07-09T14:14:28.555-0400 I SHARDING [conn300] distributed lock 'drop_database5/bs-osx108-8:30998:1436464535:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.558-0400 m30999| 2015-07-09T14:14:28.557-0400 I NETWORK [conn298] end connection 127.0.0.1:63631 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.582-0400 m30999| 2015-07-09T14:14:28.582-0400 I COMMAND [conn301] DROP DATABASE: drop_database2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.583-0400 m30999| 2015-07-09T14:14:28.582-0400 I SHARDING [conn301] DBConfig::dropDatabase: drop_database2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.583-0400 m30999| 2015-07-09T14:14:28.582-0400 I SHARDING [conn301] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:28.582-0400-559eba04ca4787b9985d1df0", server: "bs-osx108-8", clientAddr: "127.0.0.1:63637", time: new Date(1436465668582), what: "dropDatabase.start", ns: "drop_database2", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.610-0400 m30999| 2015-07-09T14:14:28.609-0400 I COMMAND [conn299] DROP DATABASE: drop_database4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.610-0400 m30999| 2015-07-09T14:14:28.609-0400 I SHARDING [conn299] DBConfig::dropDatabase: drop_database4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.611-0400 m30999| 2015-07-09T14:14:28.609-0400 I SHARDING [conn299] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:28.609-0400-559eba04ca4787b9985d1df1", server: "bs-osx108-8", clientAddr: "127.0.0.1:63632", time: new Date(1436465668609), what: "dropDatabase.start", ns: "drop_database4", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.625-0400 m30999| 2015-07-09T14:14:28.624-0400 I COMMAND [conn297] DROP DATABASE: drop_database6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.627-0400 m30999| 2015-07-09T14:14:28.625-0400 I SHARDING [conn297] DBConfig::dropDatabase: drop_database6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.627-0400 m30999| 2015-07-09T14:14:28.625-0400 I SHARDING [conn297] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:28.625-0400-559eba04ca4787b9985d1df2", server: "bs-osx108-8", clientAddr: "127.0.0.1:63630", time: new Date(1436465668625), what: "dropDatabase.start", ns: "drop_database6", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.627-0400 m30998| 2015-07-09T14:14:28.625-0400 I COMMAND [conn298] DROP DATABASE: drop_database1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.627-0400 m30998| 2015-07-09T14:14:28.625-0400 I SHARDING [conn298] DBConfig::dropDatabase: drop_database1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.627-0400 m30998| 2015-07-09T14:14:28.625-0400 I SHARDING [conn298] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:28.625-0400-559eba040bd550bed3408b28", server: "bs-osx108-8", clientAddr: "127.0.0.1:63633", time: new Date(1436465668625), what: "dropDatabase.start", ns: "drop_database1", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.632-0400 m30998| 2015-07-09T14:14:28.631-0400 I COMMAND [conn299] DROP DATABASE: drop_database3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.632-0400 m30998| 2015-07-09T14:14:28.631-0400 I SHARDING [conn299] DBConfig::dropDatabase: drop_database3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.633-0400 m30998| 2015-07-09T14:14:28.631-0400 I SHARDING [conn299] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:28.631-0400-559eba040bd550bed3408b29", server: "bs-osx108-8", clientAddr: "127.0.0.1:63634", time: new Date(1436465668631), what: "dropDatabase.start", ns: "drop_database3", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.656-0400 m30998| 2015-07-09T14:14:28.655-0400 I COMMAND [conn301] DROP DATABASE: drop_database7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.656-0400 m30998| 2015-07-09T14:14:28.655-0400 I SHARDING [conn301] DBConfig::dropDatabase: drop_database7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.657-0400 m30998| 2015-07-09T14:14:28.655-0400 I SHARDING [conn301] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:28.655-0400-559eba040bd550bed3408b2a", server: "bs-osx108-8", clientAddr: "127.0.0.1:63638", time: new Date(1436465668655), what: "dropDatabase.start", ns: "drop_database7", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.672-0400 m31100| 2015-07-09T14:14:28.669-0400 I COMMAND [conn54] command drop_database5.coll48 command: create { create: "coll48" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 112ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.673-0400 m31100| 2015-07-09T14:14:28.670-0400 I COMMAND [conn51] command drop_database8.coll48 command: create { create: "coll48" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 110ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.673-0400 m30998| 2015-07-09T14:14:28.672-0400 I COMMAND [conn300] DROP DATABASE: drop_database5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.673-0400 m30998| 2015-07-09T14:14:28.672-0400 I SHARDING [conn300] DBConfig::dropDatabase: drop_database5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.674-0400 m30998| 2015-07-09T14:14:28.673-0400 I SHARDING [conn300] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:28.672-0400-559eba040bd550bed3408b2b", server: "bs-osx108-8", clientAddr: "127.0.0.1:63635", time: new Date(1436465668673), what: "dropDatabase.start", ns: "drop_database5", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.675-0400 m30999| 2015-07-09T14:14:28.674-0400 I COMMAND [conn300] DROP DATABASE: drop_database8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.675-0400 m30999| 2015-07-09T14:14:28.674-0400 I SHARDING [conn300] DBConfig::dropDatabase: drop_database8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.675-0400 m30999| 2015-07-09T14:14:28.674-0400 I SHARDING [conn300] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:28.674-0400-559eba04ca4787b9985d1df3", server: "bs-osx108-8", clientAddr: "127.0.0.1:63636", time: new Date(1436465668674), what: "dropDatabase.start", ns: "drop_database8", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.683-0400 m30999| 2015-07-09T14:14:28.681-0400 I SHARDING [conn301] DBConfig::dropDatabase: drop_database2 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.684-0400 m31100| 2015-07-09T14:14:28.681-0400 I COMMAND [conn156] dropDatabase drop_database2 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.684-0400 m31100| 2015-07-09T14:14:28.683-0400 I COMMAND [conn156] dropDatabase drop_database2 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.684-0400 m30999| 2015-07-09T14:14:28.683-0400 I SHARDING [conn301] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:28.683-0400-559eba04ca4787b9985d1df4", server: "bs-osx108-8", clientAddr: "127.0.0.1:63637", time: new Date(1436465668683), what: "dropDatabase", ns: "drop_database2", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.691-0400 m30999| 2015-07-09T14:14:28.689-0400 I SHARDING [conn297] DBConfig::dropDatabase: drop_database6 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.692-0400 m31100| 2015-07-09T14:14:28.690-0400 I COMMAND [conn156] dropDatabase drop_database6 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.692-0400 m30998| 2015-07-09T14:14:28.691-0400 I SHARDING [conn299] DBConfig::dropDatabase: drop_database3 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.693-0400 m30999| 2015-07-09T14:14:28.691-0400 I SHARDING [conn299] DBConfig::dropDatabase: drop_database4 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.701-0400 m30998| 2015-07-09T14:14:28.700-0400 I SHARDING [conn298] DBConfig::dropDatabase: drop_database1 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.709-0400 m30998| 2015-07-09T14:14:28.709-0400 I SHARDING [conn301] DBConfig::dropDatabase: drop_database7 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.710-0400 m30998| 2015-07-09T14:14:28.709-0400 I SHARDING [conn300] DBConfig::dropDatabase: drop_database5 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.711-0400 m31100| 2015-07-09T14:14:28.711-0400 I COMMAND [conn156] dropDatabase drop_database6 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.712-0400 m31100| 2015-07-09T14:14:28.711-0400 I COMMAND [conn163] dropDatabase drop_database5 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.713-0400 m30999| 2015-07-09T14:14:28.711-0400 I SHARDING [conn297] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:28.711-0400-559eba04ca4787b9985d1df5", server: "bs-osx108-8", clientAddr: "127.0.0.1:63630", time: new Date(1436465668711), what: "dropDatabase", ns: "drop_database6", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.719-0400 m30999| 2015-07-09T14:14:28.717-0400 I SHARDING [conn300] DBConfig::dropDatabase: drop_database8 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.719-0400 m31100| 2015-07-09T14:14:28.718-0400 I COMMAND [conn163] dropDatabase drop_database5 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.719-0400 m31100| 2015-07-09T14:14:28.719-0400 I COMMAND [conn156] dropDatabase drop_database8 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.723-0400 m30999| 2015-07-09T14:14:28.721-0400 I NETWORK [conn301] end connection 127.0.0.1:63637 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.725-0400 m31100| 2015-07-09T14:14:28.724-0400 I COMMAND [conn156] dropDatabase drop_database8 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.725-0400 m31100| 2015-07-09T14:14:28.724-0400 I COMMAND [conn158] dropDatabase drop_database7 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.727-0400 m30999| 2015-07-09T14:14:28.724-0400 I NETWORK [conn297] end connection 127.0.0.1:63630 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.728-0400 m30999| 2015-07-09T14:14:28.727-0400 I SHARDING [conn300] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:28.727-0400-559eba04ca4787b9985d1df6", server: "bs-osx108-8", clientAddr: "127.0.0.1:63636", time: new Date(1436465668727), what: "dropDatabase", ns: "drop_database8", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.743-0400 m31100| 2015-07-09T14:14:28.741-0400 I COMMAND [conn158] dropDatabase drop_database7 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.743-0400 m31100| 2015-07-09T14:14:28.742-0400 I COMMAND [conn161] dropDatabase drop_database1 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.745-0400 m30998| 2015-07-09T14:14:28.744-0400 I SHARDING [conn301] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:28.743-0400-559eba040bd550bed3408b2c", server: "bs-osx108-8", clientAddr: "127.0.0.1:63638", time: new Date(1436465668743), what: "dropDatabase", ns: "drop_database7", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.748-0400 m30998| 2015-07-09T14:14:28.747-0400 I SHARDING [conn300] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:28.747-0400-559eba040bd550bed3408b2d", server: "bs-osx108-8", clientAddr: "127.0.0.1:63635", time: new Date(1436465668747), what: "dropDatabase", ns: "drop_database5", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.754-0400 m31100| 2015-07-09T14:14:28.753-0400 I COMMAND [conn161] dropDatabase drop_database1 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.754-0400 m31100| 2015-07-09T14:14:28.754-0400 I COMMAND [conn157] dropDatabase drop_database4 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.755-0400 m30998| 2015-07-09T14:14:28.754-0400 I SHARDING [conn298] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:28.754-0400-559eba040bd550bed3408b2e", server: "bs-osx108-8", clientAddr: "127.0.0.1:63633", time: new Date(1436465668754), what: "dropDatabase", ns: "drop_database1", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.757-0400 m31100| 2015-07-09T14:14:28.757-0400 I COMMAND [conn157] dropDatabase drop_database4 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.758-0400 m30999| 2015-07-09T14:14:28.758-0400 I SHARDING [conn299] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:28.758-0400-559eba04ca4787b9985d1df7", server: "bs-osx108-8", clientAddr: "127.0.0.1:63632", time: new Date(1436465668758), what: "dropDatabase", ns: "drop_database4", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.759-0400 m31100| 2015-07-09T14:14:28.758-0400 I COMMAND [conn159] dropDatabase drop_database3 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.760-0400 m31100| 2015-07-09T14:14:28.760-0400 I COMMAND [conn159] dropDatabase drop_database3 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.760-0400 m30998| 2015-07-09T14:14:28.760-0400 I SHARDING [conn299] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:28.760-0400-559eba040bd550bed3408b2f", server: "bs-osx108-8", clientAddr: "127.0.0.1:63634", time: new Date(1436465668760), what: "dropDatabase", ns: "drop_database3", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.763-0400 m31102| 2015-07-09T14:14:28.762-0400 I COMMAND [repl writer worker 11] dropDatabase drop_database2 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.765-0400 m31102| 2015-07-09T14:14:28.764-0400 I COMMAND [repl writer worker 11] dropDatabase drop_database2 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.765-0400 m31102| 2015-07-09T14:14:28.765-0400 I COMMAND [repl writer worker 1] dropDatabase drop_database6 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.767-0400 m31102| 2015-07-09T14:14:28.766-0400 I COMMAND [repl writer worker 1] dropDatabase drop_database6 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.767-0400 m31102| 2015-07-09T14:14:28.767-0400 I COMMAND [repl writer worker 10] dropDatabase drop_database5 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.768-0400 m31102| 2015-07-09T14:14:28.767-0400 I COMMAND [repl writer worker 10] dropDatabase drop_database5 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.769-0400 m31102| 2015-07-09T14:14:28.768-0400 I COMMAND [repl writer worker 2] dropDatabase drop_database8 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.770-0400 m31101| 2015-07-09T14:14:28.770-0400 I COMMAND [repl writer worker 4] dropDatabase drop_database2 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.771-0400 m31102| 2015-07-09T14:14:28.770-0400 I COMMAND [repl writer worker 2] dropDatabase drop_database8 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.771-0400 m31101| 2015-07-09T14:14:28.771-0400 I COMMAND [repl writer worker 4] dropDatabase drop_database2 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.771-0400 m31101| 2015-07-09T14:14:28.771-0400 I COMMAND [repl writer worker 3] dropDatabase drop_database6 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.772-0400 m31102| 2015-07-09T14:14:28.771-0400 I COMMAND [repl writer worker 4] dropDatabase drop_database7 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.772-0400 m31101| 2015-07-09T14:14:28.772-0400 I COMMAND [repl writer worker 3] dropDatabase drop_database6 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.772-0400 m31101| 2015-07-09T14:14:28.772-0400 I COMMAND [repl writer worker 7] dropDatabase drop_database5 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.773-0400 m31102| 2015-07-09T14:14:28.772-0400 I COMMAND [repl writer worker 4] dropDatabase drop_database7 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.773-0400 m31101| 2015-07-09T14:14:28.772-0400 I COMMAND [repl writer worker 7] dropDatabase drop_database5 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.773-0400 m31102| 2015-07-09T14:14:28.773-0400 I COMMAND [repl writer worker 5] dropDatabase drop_database1 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.774-0400 m31102| 2015-07-09T14:14:28.773-0400 I COMMAND [repl writer worker 5] dropDatabase drop_database1 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.774-0400 m31101| 2015-07-09T14:14:28.773-0400 I COMMAND [repl writer worker 13] dropDatabase drop_database8 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.774-0400 m31102| 2015-07-09T14:14:28.774-0400 I COMMAND [repl writer worker 9] dropDatabase drop_database4 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.775-0400 m31101| 2015-07-09T14:14:28.774-0400 I COMMAND [repl writer worker 13] dropDatabase drop_database8 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.775-0400 m31101| 2015-07-09T14:14:28.775-0400 I COMMAND [repl writer worker 8] dropDatabase drop_database7 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.775-0400 m31102| 2015-07-09T14:14:28.775-0400 I COMMAND [repl writer worker 9] dropDatabase drop_database4 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.776-0400 m31102| 2015-07-09T14:14:28.776-0400 I COMMAND [repl writer worker 8] dropDatabase drop_database3 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.776-0400 m31101| 2015-07-09T14:14:28.776-0400 I COMMAND [repl writer worker 8] dropDatabase drop_database7 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.777-0400 m31101| 2015-07-09T14:14:28.776-0400 I COMMAND [repl writer worker 15] dropDatabase drop_database1 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.777-0400 m31102| 2015-07-09T14:14:28.776-0400 I COMMAND [repl writer worker 8] dropDatabase drop_database3 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.777-0400 m31101| 2015-07-09T14:14:28.777-0400 I COMMAND [repl writer worker 15] dropDatabase drop_database1 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.778-0400 m31101| 2015-07-09T14:14:28.778-0400 I COMMAND [repl writer worker 9] dropDatabase drop_database4 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.779-0400 m31101| 2015-07-09T14:14:28.779-0400 I COMMAND [repl writer worker 9] dropDatabase drop_database4 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.779-0400 m31101| 2015-07-09T14:14:28.779-0400 I COMMAND [repl writer worker 5] dropDatabase drop_database3 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.780-0400 m31101| 2015-07-09T14:14:28.780-0400 I COMMAND [repl writer worker 5] dropDatabase drop_database3 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.799-0400 m30999| 2015-07-09T14:14:28.789-0400 I NETWORK [conn300] end connection 127.0.0.1:63636 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.799-0400 m30998| 2015-07-09T14:14:28.790-0400 I NETWORK [conn298] end connection 127.0.0.1:63633 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.799-0400 m30998| 2015-07-09T14:14:28.798-0400 I NETWORK [conn300] end connection 127.0.0.1:63635 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.806-0400 m30998| 2015-07-09T14:14:28.799-0400 I NETWORK [conn299] end connection 127.0.0.1:63634 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.806-0400 m30998| 2015-07-09T14:14:28.799-0400 I NETWORK [conn301] end connection 127.0.0.1:63638 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.810-0400 m30999| 2015-07-09T14:14:28.810-0400 I NETWORK [conn299] end connection 127.0.0.1:63632 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.825-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.825-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.825-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.825-0400 jstests/concurrency/fsm_workloads/drop_database.js: Workload completed in 3204 ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.825-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.826-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.826-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.826-0400 m30999| 2015-07-09T14:14:28.825-0400 I COMMAND [conn1] DROP: db48.coll48 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.826-0400 m30999| 2015-07-09T14:14:28.826-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:28.826-0400-559eba04ca4787b9985d1df8", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465668826), what: "dropCollection.start", ns: "db48.coll48", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.884-0400 m30999| 2015-07-09T14:14:28.883-0400 I SHARDING [conn1] distributed lock 'db48.coll48/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba04ca4787b9985d1df9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.884-0400 m31100| 2015-07-09T14:14:28.884-0400 I COMMAND [conn34] CMD: drop db48.coll48 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.887-0400 m31200| 2015-07-09T14:14:28.886-0400 I COMMAND [conn84] CMD: drop db48.coll48 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.889-0400 m31101| 2015-07-09T14:14:28.889-0400 I COMMAND [repl writer worker 2] CMD: drop db48.coll48 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.889-0400 m31102| 2015-07-09T14:14:28.889-0400 I COMMAND [repl writer worker 15] CMD: drop db48.coll48 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.890-0400 m31201| 2015-07-09T14:14:28.890-0400 I COMMAND [repl writer worker 7] CMD: drop db48.coll48 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.891-0400 m31202| 2015-07-09T14:14:28.890-0400 I COMMAND [repl writer worker 10] CMD: drop db48.coll48 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.943-0400 m31100| 2015-07-09T14:14:28.942-0400 I SHARDING [conn34] remotely refreshing metadata for db48.coll48 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559eba00ca4787b9985d1d6f, current metadata version is 2|3||559eba00ca4787b9985d1d6f [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.945-0400 m31100| 2015-07-09T14:14:28.944-0400 W SHARDING [conn34] no chunks found when reloading db48.coll48, previous version was 0|0||559eba00ca4787b9985d1d6f, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.945-0400 m31100| 2015-07-09T14:14:28.944-0400 I SHARDING [conn34] dropping metadata for db48.coll48 at shard version 2|3||559eba00ca4787b9985d1d6f, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.946-0400 m31200| 2015-07-09T14:14:28.946-0400 I SHARDING [conn84] remotely refreshing metadata for db48.coll48 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559eba00ca4787b9985d1d6f, current metadata version is 2|5||559eba00ca4787b9985d1d6f [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.948-0400 m31200| 2015-07-09T14:14:28.947-0400 W SHARDING [conn84] no chunks found when reloading db48.coll48, previous version was 0|0||559eba00ca4787b9985d1d6f, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.948-0400 m31200| 2015-07-09T14:14:28.948-0400 I SHARDING [conn84] dropping metadata for db48.coll48 at shard version 2|5||559eba00ca4787b9985d1d6f, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:28.949-0400 m30999| 2015-07-09T14:14:28.949-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:28.949-0400-559eba04ca4787b9985d1dfa", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465668949), what: "dropCollection", ns: "db48.coll48", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.004-0400 m30999| 2015-07-09T14:14:29.003-0400 I SHARDING [conn1] distributed lock 'db48.coll48/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.059-0400 m30999| 2015-07-09T14:14:29.059-0400 I COMMAND [conn1] DROP DATABASE: db48 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.060-0400 m30999| 2015-07-09T14:14:29.059-0400 I SHARDING [conn1] DBConfig::dropDatabase: db48 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.060-0400 m30999| 2015-07-09T14:14:29.059-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:29.059-0400-559eba05ca4787b9985d1dfb", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465669059), what: "dropDatabase.start", ns: "db48", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.165-0400 m30999| 2015-07-09T14:14:29.165-0400 I SHARDING [conn1] DBConfig::dropDatabase: db48 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.166-0400 m31100| 2015-07-09T14:14:29.165-0400 I COMMAND [conn157] dropDatabase db48 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.166-0400 m31100| 2015-07-09T14:14:29.166-0400 I COMMAND [conn157] dropDatabase db48 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.167-0400 m30999| 2015-07-09T14:14:29.166-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:29.166-0400-559eba05ca4787b9985d1dfc", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465669166), what: "dropDatabase", ns: "db48", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.167-0400 m31102| 2015-07-09T14:14:29.167-0400 I COMMAND [repl writer worker 7] dropDatabase db48 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.167-0400 m31102| 2015-07-09T14:14:29.167-0400 I COMMAND [repl writer worker 7] dropDatabase db48 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.167-0400 m31101| 2015-07-09T14:14:29.167-0400 I COMMAND [repl writer worker 11] dropDatabase db48 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.168-0400 m31101| 2015-07-09T14:14:29.167-0400 I COMMAND [repl writer worker 11] dropDatabase db48 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.261-0400 m31100| 2015-07-09T14:14:29.261-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.266-0400 m31101| 2015-07-09T14:14:29.265-0400 I COMMAND [repl writer worker 6] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.266-0400 m31102| 2015-07-09T14:14:29.265-0400 I COMMAND [repl writer worker 6] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.303-0400 m31200| 2015-07-09T14:14:29.303-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.307-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.307-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.307-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.307-0400 jstests/concurrency/fsm_workloads/list_indexes.js [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.307-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.307-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.307-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.307-0400 m31202| 2015-07-09T14:14:29.307-0400 I COMMAND [repl writer worker 0] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.307-0400 m31201| 2015-07-09T14:14:29.307-0400 I COMMAND [repl writer worker 12] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.315-0400 m30999| 2015-07-09T14:14:29.315-0400 I SHARDING [conn1] distributed lock 'db49/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba05ca4787b9985d1dfd [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.319-0400 m30999| 2015-07-09T14:14:29.319-0400 I SHARDING [conn1] Placing [db49] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.319-0400 m30999| 2015-07-09T14:14:29.319-0400 I SHARDING [conn1] Enabling sharding for database [db49] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.373-0400 m30999| 2015-07-09T14:14:29.372-0400 I SHARDING [conn1] distributed lock 'db49/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.394-0400 m31100| 2015-07-09T14:14:29.394-0400 I INDEX [conn70] build index on: db49.coll49 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.394-0400 m31100| 2015-07-09T14:14:29.394-0400 I INDEX [conn70] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.399-0400 m31100| 2015-07-09T14:14:29.399-0400 I INDEX [conn70] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.401-0400 m30999| 2015-07-09T14:14:29.400-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db49.coll49", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.404-0400 m30999| 2015-07-09T14:14:29.403-0400 I SHARDING [conn1] distributed lock 'db49.coll49/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba05ca4787b9985d1dfe [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.405-0400 m30999| 2015-07-09T14:14:29.404-0400 I SHARDING [conn1] enable sharding on: db49.coll49 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.405-0400 m30999| 2015-07-09T14:14:29.404-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:29.404-0400-559eba05ca4787b9985d1dff", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465669404), what: "shardCollection.start", ns: "db49.coll49", details: { shardKey: { _id: "hashed" }, collection: "db49.coll49", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.408-0400 m31101| 2015-07-09T14:14:29.408-0400 I INDEX [repl writer worker 12] build index on: db49.coll49 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.409-0400 m31101| 2015-07-09T14:14:29.408-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.414-0400 m31102| 2015-07-09T14:14:29.413-0400 I INDEX [repl writer worker 14] build index on: db49.coll49 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.414-0400 m31102| 2015-07-09T14:14:29.413-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.417-0400 m31101| 2015-07-09T14:14:29.416-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.421-0400 m31102| 2015-07-09T14:14:29.421-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.458-0400 m30999| 2015-07-09T14:14:29.457-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db49.coll49 using new epoch 559eba05ca4787b9985d1e00 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.566-0400 m30999| 2015-07-09T14:14:29.565-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db49.coll49: 0ms sequenceNumber: 215 version: 1|1||559eba05ca4787b9985d1e00 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.621-0400 m30999| 2015-07-09T14:14:29.621-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db49.coll49: 0ms sequenceNumber: 216 version: 1|1||559eba05ca4787b9985d1e00 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.623-0400 m31100| 2015-07-09T14:14:29.623-0400 I SHARDING [conn51] remotely refreshing metadata for db49.coll49 with requested shard version 1|1||559eba05ca4787b9985d1e00, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.625-0400 m31100| 2015-07-09T14:14:29.624-0400 I SHARDING [conn51] collection db49.coll49 was previously unsharded, new metadata loaded with shard version 1|1||559eba05ca4787b9985d1e00 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.625-0400 m31100| 2015-07-09T14:14:29.624-0400 I SHARDING [conn51] collection version was loaded at version 1|1||559eba05ca4787b9985d1e00, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.625-0400 m30999| 2015-07-09T14:14:29.625-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:29.625-0400-559eba05ca4787b9985d1e01", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465669625), what: "shardCollection", ns: "db49.coll49", details: { version: "1|1||559eba05ca4787b9985d1e00" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.680-0400 m30999| 2015-07-09T14:14:29.680-0400 I SHARDING [conn1] distributed lock 'db49.coll49/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.681-0400 m30999| 2015-07-09T14:14:29.681-0400 I SHARDING [conn1] moving chunk ns: db49.coll49 moving ( ns: db49.coll49, shard: test-rs0, lastmod: 1|1||000000000000000000000000, min: { _id: 0 }, max: { _id: MaxKey }) test-rs0 -> test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.681-0400 m31100| 2015-07-09T14:14:29.681-0400 I SHARDING [conn34] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.683-0400 m31100| 2015-07-09T14:14:29.682-0400 I SHARDING [conn34] received moveChunk request: { moveChunk: "db49.coll49", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eba05ca4787b9985d1e00') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.686-0400 m31100| 2015-07-09T14:14:29.686-0400 I SHARDING [conn34] distributed lock 'db49.coll49/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eba05792e00bb67274a04 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.687-0400 m31100| 2015-07-09T14:14:29.686-0400 I SHARDING [conn34] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:29.686-0400-559eba05792e00bb67274a05", server: "bs-osx108-8", clientAddr: "127.0.0.1:62636", time: new Date(1436465669686), what: "moveChunk.start", ns: "db49.coll49", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.740-0400 m31100| 2015-07-09T14:14:29.739-0400 I SHARDING [conn34] remotely refreshing metadata for db49.coll49 based on current shard version 1|1||559eba05ca4787b9985d1e00, current metadata version is 1|1||559eba05ca4787b9985d1e00 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.741-0400 m31100| 2015-07-09T14:14:29.741-0400 I SHARDING [conn34] metadata of collection db49.coll49 already up to date (shard version : 1|1||559eba05ca4787b9985d1e00, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.741-0400 m31100| 2015-07-09T14:14:29.741-0400 I SHARDING [conn34] moveChunk request accepted at version 1|1||559eba05ca4787b9985d1e00 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.742-0400 m31100| 2015-07-09T14:14:29.741-0400 I SHARDING [conn34] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.742-0400 m31200| 2015-07-09T14:14:29.742-0400 I SHARDING [conn16] remotely refreshing metadata for db49.coll49, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.744-0400 m31200| 2015-07-09T14:14:29.743-0400 I SHARDING [conn16] collection db49.coll49 was previously unsharded, new metadata loaded with shard version 0|0||559eba05ca4787b9985d1e00 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.744-0400 m31200| 2015-07-09T14:14:29.743-0400 I SHARDING [conn16] collection version was loaded at version 1|1||559eba05ca4787b9985d1e00, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.744-0400 m31200| 2015-07-09T14:14:29.744-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: 0 } -> { _id: MaxKey } for collection db49.coll49 from test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 at epoch 559eba05ca4787b9985d1e00 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.747-0400 m31100| 2015-07-09T14:14:29.746-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db49.coll49", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.750-0400 m31100| 2015-07-09T14:14:29.749-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db49.coll49", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.755-0400 m31100| 2015-07-09T14:14:29.754-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db49.coll49", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.764-0400 m31100| 2015-07-09T14:14:29.763-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db49.coll49", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.765-0400 m31200| 2015-07-09T14:14:29.765-0400 I INDEX [migrateThread] build index on: db49.coll49 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.765-0400 m31200| 2015-07-09T14:14:29.765-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.783-0400 m31100| 2015-07-09T14:14:29.782-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db49.coll49", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.784-0400 m31200| 2015-07-09T14:14:29.783-0400 I INDEX [migrateThread] build index on: db49.coll49 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.784-0400 m31200| 2015-07-09T14:14:29.783-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.794-0400 m31200| 2015-07-09T14:14:29.793-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.795-0400 m31200| 2015-07-09T14:14:29.794-0400 I SHARDING [migrateThread] Deleter starting delete for: db49.coll49 from { _id: 0 } -> { _id: MaxKey }, with opId: 85125 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.795-0400 m31200| 2015-07-09T14:14:29.795-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db49.coll49 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.804-0400 m31201| 2015-07-09T14:14:29.804-0400 I INDEX [repl writer worker 0] build index on: db49.coll49 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.804-0400 m31201| 2015-07-09T14:14:29.804-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.804-0400 m31202| 2015-07-09T14:14:29.804-0400 I INDEX [repl writer worker 2] build index on: db49.coll49 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.805-0400 m31202| 2015-07-09T14:14:29.804-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.808-0400 m31201| 2015-07-09T14:14:29.808-0400 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.809-0400 m31202| 2015-07-09T14:14:29.808-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.810-0400 m31200| 2015-07-09T14:14:29.810-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.811-0400 m31200| 2015-07-09T14:14:29.810-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db49.coll49' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.816-0400 m31100| 2015-07-09T14:14:29.816-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db49.coll49", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.816-0400 m31100| 2015-07-09T14:14:29.816-0400 I SHARDING [conn34] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.817-0400 m31100| 2015-07-09T14:14:29.817-0400 I SHARDING [conn34] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.817-0400 m31100| 2015-07-09T14:14:29.817-0400 I SHARDING [conn34] moveChunk setting version to: 2|0||559eba05ca4787b9985d1e00 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.823-0400 m31200| 2015-07-09T14:14:29.822-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db49.coll49' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.823-0400 m31200| 2015-07-09T14:14:29.823-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:29.823-0400-559eba05d5a107a5b9c0db49", server: "bs-osx108-8", clientAddr: "", time: new Date(1436465669823), what: "moveChunk.to", ns: "db49.coll49", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 5: 50, step 2 of 5: 14, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 12, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.876-0400 m31100| 2015-07-09T14:14:29.876-0400 I SHARDING [conn34] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db49.coll49", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.876-0400 m31100| 2015-07-09T14:14:29.876-0400 I SHARDING [conn34] moveChunk updating self version to: 2|1||559eba05ca4787b9985d1e00 through { _id: MinKey } -> { _id: 0 } for collection 'db49.coll49' [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.878-0400 m31100| 2015-07-09T14:14:29.877-0400 I SHARDING [conn34] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:29.877-0400-559eba05792e00bb67274a06", server: "bs-osx108-8", clientAddr: "127.0.0.1:62636", time: new Date(1436465669877), what: "moveChunk.commit", ns: "db49.coll49", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.930-0400 m31100| 2015-07-09T14:14:29.930-0400 I SHARDING [conn34] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.931-0400 m31100| 2015-07-09T14:14:29.930-0400 I SHARDING [conn34] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.931-0400 m31100| 2015-07-09T14:14:29.930-0400 I SHARDING [conn34] Deleter starting delete for: db49.coll49 from { _id: 0 } -> { _id: MaxKey }, with opId: 128544 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.931-0400 m31100| 2015-07-09T14:14:29.930-0400 I SHARDING [conn34] rangeDeleter deleted 0 documents for db49.coll49 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.931-0400 m31100| 2015-07-09T14:14:29.930-0400 I SHARDING [conn34] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.933-0400 m31100| 2015-07-09T14:14:29.933-0400 I SHARDING [conn34] distributed lock 'db49.coll49/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.934-0400 m31100| 2015-07-09T14:14:29.933-0400 I SHARDING [conn34] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:29.933-0400-559eba05792e00bb67274a07", server: "bs-osx108-8", clientAddr: "127.0.0.1:62636", time: new Date(1436465669933), what: "moveChunk.from", ns: "db49.coll49", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 6: 0, step 2 of 6: 58, step 3 of 6: 3, step 4 of 6: 71, step 5 of 6: 114, step 6 of 6: 0, to: "test-rs1", from: "test-rs0", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.988-0400 m31100| 2015-07-09T14:14:29.987-0400 I COMMAND [conn34] command db49.coll49 command: moveChunk { moveChunk: "db49.coll49", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eba05ca4787b9985d1e00') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 305ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.990-0400 m30999| 2015-07-09T14:14:29.989-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db49.coll49: 0ms sequenceNumber: 217 version: 2|1||559eba05ca4787b9985d1e00 based on: 1|1||559eba05ca4787b9985d1e00 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.991-0400 m31100| 2015-07-09T14:14:29.991-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db49.coll49", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba05ca4787b9985d1e00') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.994-0400 m31100| 2015-07-09T14:14:29.994-0400 I SHARDING [conn34] distributed lock 'db49.coll49/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eba05792e00bb67274a08 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.994-0400 m31100| 2015-07-09T14:14:29.994-0400 I SHARDING [conn34] remotely refreshing metadata for db49.coll49 based on current shard version 2|0||559eba05ca4787b9985d1e00, current metadata version is 2|0||559eba05ca4787b9985d1e00 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.996-0400 m31100| 2015-07-09T14:14:29.995-0400 I SHARDING [conn34] updating metadata for db49.coll49 from shard version 2|0||559eba05ca4787b9985d1e00 to shard version 2|1||559eba05ca4787b9985d1e00 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.996-0400 m31100| 2015-07-09T14:14:29.995-0400 I SHARDING [conn34] collection version was loaded at version 2|1||559eba05ca4787b9985d1e00, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.996-0400 m31100| 2015-07-09T14:14:29.995-0400 I SHARDING [conn34] splitChunk accepted at version 2|1||559eba05ca4787b9985d1e00 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:29.997-0400 m31100| 2015-07-09T14:14:29.996-0400 I SHARDING [conn34] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:29.996-0400-559eba05792e00bb67274a09", server: "bs-osx108-8", clientAddr: "127.0.0.1:62636", time: new Date(1436465669996), what: "split", ns: "db49.coll49", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559eba05ca4787b9985d1e00') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559eba05ca4787b9985d1e00') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.051-0400 m31100| 2015-07-09T14:14:30.051-0400 I SHARDING [conn34] distributed lock 'db49.coll49/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.053-0400 m30999| 2015-07-09T14:14:30.053-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db49.coll49: 0ms sequenceNumber: 218 version: 2|3||559eba05ca4787b9985d1e00 based on: 2|1||559eba05ca4787b9985d1e00 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.053-0400 m31200| 2015-07-09T14:14:30.053-0400 I SHARDING [conn84] received splitChunk request: { splitChunk: "db49.coll49", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba05ca4787b9985d1e00') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.057-0400 m31200| 2015-07-09T14:14:30.057-0400 I SHARDING [conn84] distributed lock 'db49.coll49/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eba06d5a107a5b9c0db4a [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.057-0400 m31200| 2015-07-09T14:14:30.057-0400 I SHARDING [conn84] remotely refreshing metadata for db49.coll49 based on current shard version 0|0||559eba05ca4787b9985d1e00, current metadata version is 1|1||559eba05ca4787b9985d1e00 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.058-0400 m31200| 2015-07-09T14:14:30.058-0400 I SHARDING [conn84] updating metadata for db49.coll49 from shard version 0|0||559eba05ca4787b9985d1e00 to shard version 2|0||559eba05ca4787b9985d1e00 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.058-0400 m31200| 2015-07-09T14:14:30.058-0400 I SHARDING [conn84] collection version was loaded at version 2|3||559eba05ca4787b9985d1e00, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.058-0400 m31200| 2015-07-09T14:14:30.058-0400 I SHARDING [conn84] splitChunk accepted at version 2|0||559eba05ca4787b9985d1e00 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.059-0400 m31200| 2015-07-09T14:14:30.059-0400 I SHARDING [conn84] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:30.059-0400-559eba06d5a107a5b9c0db4b", server: "bs-osx108-8", clientAddr: "127.0.0.1:63007", time: new Date(1436465670059), what: "split", ns: "db49.coll49", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559eba05ca4787b9985d1e00') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559eba05ca4787b9985d1e00') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.114-0400 m31200| 2015-07-09T14:14:30.114-0400 I SHARDING [conn84] distributed lock 'db49.coll49/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.117-0400 m30999| 2015-07-09T14:14:30.116-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db49.coll49: 0ms sequenceNumber: 219 version: 2|5||559eba05ca4787b9985d1e00 based on: 2|3||559eba05ca4787b9985d1e00 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.126-0400 m31100| 2015-07-09T14:14:30.125-0400 I INDEX [conn51] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.126-0400 m31100| 2015-07-09T14:14:30.125-0400 I INDEX [conn51] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.127-0400 m31200| 2015-07-09T14:14:30.127-0400 I INDEX [conn19] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.127-0400 m31200| 2015-07-09T14:14:30.127-0400 I INDEX [conn19] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.131-0400 m31100| 2015-07-09T14:14:30.131-0400 I INDEX [conn51] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.132-0400 m31200| 2015-07-09T14:14:30.132-0400 I INDEX [conn19] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.145-0400 m31200| 2015-07-09T14:14:30.144-0400 I INDEX [conn19] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.145-0400 m31200| 2015-07-09T14:14:30.144-0400 I INDEX [conn19] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.151-0400 m31102| 2015-07-09T14:14:30.150-0400 I INDEX [repl writer worker 0] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.151-0400 m31100| 2015-07-09T14:14:30.150-0400 I INDEX [conn51] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.152-0400 m31201| 2015-07-09T14:14:30.150-0400 I INDEX [repl writer worker 2] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.152-0400 m31102| 2015-07-09T14:14:30.150-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.152-0400 m31202| 2015-07-09T14:14:30.150-0400 I INDEX [repl writer worker 3] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.152-0400 m31100| 2015-07-09T14:14:30.150-0400 I INDEX [conn51] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.152-0400 m31201| 2015-07-09T14:14:30.150-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.153-0400 m31202| 2015-07-09T14:14:30.150-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.153-0400 m31200| 2015-07-09T14:14:30.153-0400 I INDEX [conn19] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.156-0400 m31100| 2015-07-09T14:14:30.155-0400 I INDEX [conn51] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.158-0400 m31201| 2015-07-09T14:14:30.156-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.159-0400 m31202| 2015-07-09T14:14:30.158-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.160-0400 m31101| 2015-07-09T14:14:30.155-0400 I INDEX [repl writer worker 0] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.160-0400 m31101| 2015-07-09T14:14:30.156-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.161-0400 m31102| 2015-07-09T14:14:30.160-0400 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.168-0400 m31101| 2015-07-09T14:14:30.167-0400 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.169-0400 m31100| 2015-07-09T14:14:30.167-0400 I INDEX [conn51] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.169-0400 m31100| 2015-07-09T14:14:30.168-0400 I INDEX [conn51] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.170-0400 m31200| 2015-07-09T14:14:30.170-0400 I INDEX [conn19] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.170-0400 m31200| 2015-07-09T14:14:30.170-0400 I INDEX [conn19] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.174-0400 m31102| 2015-07-09T14:14:30.174-0400 I INDEX [repl writer worker 11] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.174-0400 m31102| 2015-07-09T14:14:30.174-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.177-0400 m31202| 2015-07-09T14:14:30.177-0400 I INDEX [repl writer worker 9] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.178-0400 m31202| 2015-07-09T14:14:30.177-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.179-0400 m31100| 2015-07-09T14:14:30.178-0400 I INDEX [conn51] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.183-0400 m31201| 2015-07-09T14:14:30.182-0400 I INDEX [repl writer worker 8] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.183-0400 m31201| 2015-07-09T14:14:30.182-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.184-0400 m31200| 2015-07-09T14:14:30.181-0400 I INDEX [conn19] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.184-0400 m31101| 2015-07-09T14:14:30.183-0400 I INDEX [repl writer worker 4] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.184-0400 m31101| 2015-07-09T14:14:30.183-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.193-0400 m31201| 2015-07-09T14:14:30.193-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.194-0400 m31202| 2015-07-09T14:14:30.193-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.194-0400 m31101| 2015-07-09T14:14:30.194-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.200-0400 m31200| 2015-07-09T14:14:30.199-0400 I INDEX [conn19] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.200-0400 m31200| 2015-07-09T14:14:30.199-0400 I INDEX [conn19] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.200-0400 m31102| 2015-07-09T14:14:30.199-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.201-0400 m31100| 2015-07-09T14:14:30.201-0400 I INDEX [conn51] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.202-0400 m31100| 2015-07-09T14:14:30.201-0400 I INDEX [conn51] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.202-0400 m31101| 2015-07-09T14:14:30.202-0400 I INDEX [repl writer worker 3] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.203-0400 m31101| 2015-07-09T14:14:30.202-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.211-0400 m31200| 2015-07-09T14:14:30.211-0400 I INDEX [conn19] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.213-0400 m31102| 2015-07-09T14:14:30.212-0400 I INDEX [repl writer worker 1] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.213-0400 m31102| 2015-07-09T14:14:30.212-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.214-0400 m31201| 2015-07-09T14:14:30.211-0400 I INDEX [repl writer worker 10] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.214-0400 m31201| 2015-07-09T14:14:30.211-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.214-0400 m31202| 2015-07-09T14:14:30.212-0400 I INDEX [repl writer worker 7] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.214-0400 m31202| 2015-07-09T14:14:30.212-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.215-0400 m31100| 2015-07-09T14:14:30.214-0400 I INDEX [conn51] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.217-0400 m31202| 2015-07-09T14:14:30.216-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.217-0400 m31102| 2015-07-09T14:14:30.216-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.218-0400 m31101| 2015-07-09T14:14:30.217-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.222-0400 m31201| 2015-07-09T14:14:30.221-0400 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.230-0400 m31101| 2015-07-09T14:14:30.229-0400 I INDEX [repl writer worker 7] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.231-0400 m31101| 2015-07-09T14:14:30.229-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.231-0400 m31100| 2015-07-09T14:14:30.229-0400 I INDEX [conn51] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.232-0400 m31202| 2015-07-09T14:14:30.230-0400 I INDEX [repl writer worker 15] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.232-0400 m31100| 2015-07-09T14:14:30.229-0400 I INDEX [conn51] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.232-0400 m31102| 2015-07-09T14:14:30.230-0400 I INDEX [repl writer worker 10] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.233-0400 m31200| 2015-07-09T14:14:30.230-0400 I INDEX [conn19] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.233-0400 m31202| 2015-07-09T14:14:30.230-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.233-0400 m31200| 2015-07-09T14:14:30.230-0400 I INDEX [conn19] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.234-0400 m31102| 2015-07-09T14:14:30.230-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.240-0400 m31201| 2015-07-09T14:14:30.240-0400 I INDEX [repl writer worker 9] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.240-0400 m31201| 2015-07-09T14:14:30.240-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.241-0400 m31100| 2015-07-09T14:14:30.240-0400 I INDEX [conn51] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.242-0400 m31200| 2015-07-09T14:14:30.242-0400 I INDEX [conn19] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.246-0400 m31102| 2015-07-09T14:14:30.246-0400 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.248-0400 m31201| 2015-07-09T14:14:30.248-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.249-0400 m31202| 2015-07-09T14:14:30.248-0400 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.249-0400 m31101| 2015-07-09T14:14:30.248-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.254-0400 m31200| 2015-07-09T14:14:30.253-0400 I INDEX [conn19] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.254-0400 m31200| 2015-07-09T14:14:30.253-0400 I INDEX [conn19] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.261-0400 m31100| 2015-07-09T14:14:30.261-0400 I INDEX [conn51] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.261-0400 m31102| 2015-07-09T14:14:30.261-0400 I INDEX [repl writer worker 2] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.262-0400 m31102| 2015-07-09T14:14:30.261-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.262-0400 m31100| 2015-07-09T14:14:30.261-0400 I INDEX [conn51] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.266-0400 m31101| 2015-07-09T14:14:30.266-0400 I INDEX [repl writer worker 13] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.266-0400 m31101| 2015-07-09T14:14:30.266-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.272-0400 m31201| 2015-07-09T14:14:30.272-0400 I INDEX [repl writer worker 5] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.272-0400 m31201| 2015-07-09T14:14:30.272-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.272-0400 m31202| 2015-07-09T14:14:30.272-0400 I INDEX [repl writer worker 12] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.272-0400 m31202| 2015-07-09T14:14:30.272-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.278-0400 m31200| 2015-07-09T14:14:30.277-0400 I INDEX [conn19] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.279-0400 m31102| 2015-07-09T14:14:30.279-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.287-0400 m31101| 2015-07-09T14:14:30.286-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.287-0400 m31100| 2015-07-09T14:14:30.286-0400 I INDEX [conn51] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.288-0400 m31202| 2015-07-09T14:14:30.288-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.289-0400 m31201| 2015-07-09T14:14:30.288-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.292-0400 m31202| 2015-07-09T14:14:30.291-0400 I INDEX [repl writer worker 14] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.292-0400 m31202| 2015-07-09T14:14:30.291-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.297-0400 m31200| 2015-07-09T14:14:30.296-0400 I INDEX [conn19] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.297-0400 m31200| 2015-07-09T14:14:30.296-0400 I INDEX [conn19] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.297-0400 m31201| 2015-07-09T14:14:30.296-0400 I INDEX [repl writer worker 3] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.298-0400 m31201| 2015-07-09T14:14:30.296-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.302-0400 m31101| 2015-07-09T14:14:30.302-0400 I INDEX [repl writer worker 8] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.304-0400 m31101| 2015-07-09T14:14:30.302-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.304-0400 m31100| 2015-07-09T14:14:30.302-0400 I INDEX [conn51] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.305-0400 m31100| 2015-07-09T14:14:30.302-0400 I INDEX [conn51] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.305-0400 m31102| 2015-07-09T14:14:30.302-0400 I INDEX [repl writer worker 4] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.305-0400 m31102| 2015-07-09T14:14:30.303-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.306-0400 m31202| 2015-07-09T14:14:30.306-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.309-0400 m31200| 2015-07-09T14:14:30.309-0400 I INDEX [conn19] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.310-0400 m31201| 2015-07-09T14:14:30.309-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.316-0400 m31100| 2015-07-09T14:14:30.314-0400 I INDEX [conn51] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.317-0400 m31101| 2015-07-09T14:14:30.317-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.322-0400 m31102| 2015-07-09T14:14:30.321-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.330-0400 m31202| 2015-07-09T14:14:30.329-0400 I INDEX [repl writer worker 8] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.330-0400 m31202| 2015-07-09T14:14:30.330-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.333-0400 m31201| 2015-07-09T14:14:30.332-0400 I INDEX [repl writer worker 15] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.333-0400 m31201| 2015-07-09T14:14:30.332-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.340-0400 m31200| 2015-07-09T14:14:30.339-0400 I INDEX [conn19] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.340-0400 m31200| 2015-07-09T14:14:30.339-0400 I INDEX [conn19] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.340-0400 m31100| 2015-07-09T14:14:30.339-0400 I INDEX [conn51] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.341-0400 m31101| 2015-07-09T14:14:30.339-0400 I INDEX [repl writer worker 15] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.341-0400 m31100| 2015-07-09T14:14:30.339-0400 I INDEX [conn51] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.341-0400 m31101| 2015-07-09T14:14:30.340-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.345-0400 m31202| 2015-07-09T14:14:30.344-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.346-0400 m31201| 2015-07-09T14:14:30.345-0400 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.352-0400 m31102| 2015-07-09T14:14:30.351-0400 I INDEX [repl writer worker 5] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.352-0400 m31102| 2015-07-09T14:14:30.351-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.354-0400 m31100| 2015-07-09T14:14:30.353-0400 I INDEX [conn51] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.355-0400 m31200| 2015-07-09T14:14:30.355-0400 I INDEX [conn19] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.358-0400 m31101| 2015-07-09T14:14:30.357-0400 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.359-0400 m31102| 2015-07-09T14:14:30.357-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.364-0400 m31100| 2015-07-09T14:14:30.363-0400 I INDEX [conn51] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.364-0400 m31100| 2015-07-09T14:14:30.363-0400 I INDEX [conn51] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.371-0400 m31101| 2015-07-09T14:14:30.370-0400 I INDEX [repl writer worker 9] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.371-0400 m31101| 2015-07-09T14:14:30.371-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.382-0400 m31201| 2015-07-09T14:14:30.381-0400 I INDEX [repl writer worker 6] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.383-0400 m31201| 2015-07-09T14:14:30.381-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.383-0400 m31202| 2015-07-09T14:14:30.382-0400 I INDEX [repl writer worker 11] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.383-0400 m31202| 2015-07-09T14:14:30.382-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.389-0400 m31200| 2015-07-09T14:14:30.389-0400 I INDEX [conn19] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.390-0400 m31200| 2015-07-09T14:14:30.389-0400 I INDEX [conn19] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.390-0400 m31102| 2015-07-09T14:14:30.389-0400 I INDEX [repl writer worker 9] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.390-0400 m31102| 2015-07-09T14:14:30.389-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.394-0400 m31100| 2015-07-09T14:14:30.393-0400 I INDEX [conn51] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.396-0400 m31101| 2015-07-09T14:14:30.395-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.398-0400 m31201| 2015-07-09T14:14:30.397-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.409-0400 m31200| 2015-07-09T14:14:30.408-0400 I INDEX [conn19] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.409-0400 m31202| 2015-07-09T14:14:30.408-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.410-0400 m31102| 2015-07-09T14:14:30.409-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.412-0400 m31101| 2015-07-09T14:14:30.412-0400 I INDEX [repl writer worker 5] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.412-0400 m31101| 2015-07-09T14:14:30.412-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.423-0400 m31202| 2015-07-09T14:14:30.423-0400 I INDEX [repl writer worker 6] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.423-0400 m31202| 2015-07-09T14:14:30.423-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.425-0400 m31100| 2015-07-09T14:14:30.425-0400 I INDEX [conn51] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.426-0400 m31100| 2015-07-09T14:14:30.425-0400 I INDEX [conn51] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.431-0400 m31102| 2015-07-09T14:14:30.430-0400 I INDEX [repl writer worker 8] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.431-0400 m31102| 2015-07-09T14:14:30.430-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.431-0400 m31201| 2015-07-09T14:14:30.429-0400 I INDEX [repl writer worker 1] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.431-0400 m31201| 2015-07-09T14:14:30.429-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.438-0400 m31101| 2015-07-09T14:14:30.438-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.439-0400 m31200| 2015-07-09T14:14:30.438-0400 I INDEX [conn19] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.439-0400 m31202| 2015-07-09T14:14:30.438-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.439-0400 m31200| 2015-07-09T14:14:30.438-0400 I INDEX [conn19] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.442-0400 m31100| 2015-07-09T14:14:30.441-0400 I INDEX [conn51] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.446-0400 m31201| 2015-07-09T14:14:30.445-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.447-0400 m31102| 2015-07-09T14:14:30.447-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.450-0400 m31200| 2015-07-09T14:14:30.450-0400 I INDEX [conn19] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.451-0400 Using 10 threads (requested 10) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.467-0400 m31101| 2015-07-09T14:14:30.453-0400 I INDEX [repl writer worker 2] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.470-0400 m31101| 2015-07-09T14:14:30.454-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.497-0400 m31102| 2015-07-09T14:14:30.470-0400 I INDEX [repl writer worker 15] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.498-0400 m31102| 2015-07-09T14:14:30.470-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.498-0400 m31201| 2015-07-09T14:14:30.493-0400 I INDEX [repl writer worker 7] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.498-0400 m31201| 2015-07-09T14:14:30.494-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.528-0400 m31202| 2015-07-09T14:14:30.504-0400 I INDEX [repl writer worker 10] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.528-0400 m31202| 2015-07-09T14:14:30.505-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.529-0400 m31101| 2015-07-09T14:14:30.517-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.529-0400 m31102| 2015-07-09T14:14:30.526-0400 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.541-0400 m31201| 2015-07-09T14:14:30.540-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.550-0400 m31202| 2015-07-09T14:14:30.550-0400 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.563-0400 m30999| 2015-07-09T14:14:30.562-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63674 #302 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.574-0400 m30999| 2015-07-09T14:14:30.573-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63675 #303 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.579-0400 m30999| 2015-07-09T14:14:30.579-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63676 #304 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.583-0400 m30998| 2015-07-09T14:14:30.582-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63677 #302 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.587-0400 m30999| 2015-07-09T14:14:30.586-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63678 #305 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.588-0400 m30998| 2015-07-09T14:14:30.588-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63679 #303 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.591-0400 m30999| 2015-07-09T14:14:30.589-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63680 #306 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.591-0400 m30998| 2015-07-09T14:14:30.589-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63681 #304 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.592-0400 m30998| 2015-07-09T14:14:30.591-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63682 #305 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.592-0400 m30998| 2015-07-09T14:14:30.591-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63683 #306 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.599-0400 setting random seed: 749363410286 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.599-0400 setting random seed: 8842739532701 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.599-0400 setting random seed: 4445040351711 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.600-0400 setting random seed: 4939973587170 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.601-0400 setting random seed: 6010767612606 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.601-0400 m31100| 2015-07-09T14:14:30.600-0400 I COMMAND [conn34] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.601-0400 m31200| 2015-07-09T14:14:30.600-0400 I COMMAND [conn63] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.601-0400 setting random seed: 2505202591419 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.601-0400 setting random seed: 9923395318910 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.602-0400 setting random seed: 2971312114968 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.602-0400 setting random seed: 4550842549651 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.602-0400 m31100| 2015-07-09T14:14:30.601-0400 I COMMAND [conn37] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.603-0400 setting random seed: 7793225995264 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.603-0400 m31200| 2015-07-09T14:14:30.602-0400 I COMMAND [conn84] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.605-0400 m31102| 2015-07-09T14:14:30.603-0400 I COMMAND [repl writer worker 7] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.605-0400 m31101| 2015-07-09T14:14:30.604-0400 I COMMAND [repl writer worker 11] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.606-0400 m30998| 2015-07-09T14:14:30.604-0400 I SHARDING [conn302] ChunkManager: time to load chunks for db49.coll49: 1ms sequenceNumber: 60 version: 2|5||559eba05ca4787b9985d1e00 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.606-0400 m31200| 2015-07-09T14:14:30.604-0400 I COMMAND [conn48] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.606-0400 m31100| 2015-07-09T14:14:30.605-0400 I COMMAND [conn38] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.607-0400 m31200| 2015-07-09T14:14:30.605-0400 I COMMAND [conn64] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.607-0400 m31201| 2015-07-09T14:14:30.606-0400 I COMMAND [repl writer worker 4] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.607-0400 m31100| 2015-07-09T14:14:30.607-0400 I COMMAND [conn15] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.607-0400 m31200| 2015-07-09T14:14:30.607-0400 I COMMAND [conn65] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.608-0400 m31202| 2015-07-09T14:14:30.607-0400 I COMMAND [repl writer worker 5] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.608-0400 m31102| 2015-07-09T14:14:30.607-0400 I COMMAND [repl writer worker 12] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.609-0400 m31201| 2015-07-09T14:14:30.608-0400 I COMMAND [repl writer worker 14] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.609-0400 m31202| 2015-07-09T14:14:30.609-0400 I COMMAND [repl writer worker 1] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.610-0400 m31101| 2015-07-09T14:14:30.609-0400 I COMMAND [repl writer worker 1] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.610-0400 m31200| 2015-07-09T14:14:30.609-0400 I COMMAND [conn85] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.610-0400 m31100| 2015-07-09T14:14:30.609-0400 I COMMAND [conn35] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.610-0400 m31201| 2015-07-09T14:14:30.610-0400 I COMMAND [repl writer worker 12] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.611-0400 m31102| 2015-07-09T14:14:30.610-0400 I COMMAND [repl writer worker 13] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.611-0400 m31100| 2015-07-09T14:14:30.611-0400 I COMMAND [conn32] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.612-0400 m31202| 2015-07-09T14:14:30.611-0400 I COMMAND [repl writer worker 0] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.612-0400 m31201| 2015-07-09T14:14:30.611-0400 I COMMAND [repl writer worker 11] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.612-0400 m31200| 2015-07-09T14:14:30.612-0400 I COMMAND [conn47] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.613-0400 m31200| 2015-07-09T14:14:30.612-0400 I COMMAND [conn84] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.613-0400 m31101| 2015-07-09T14:14:30.612-0400 I COMMAND [repl writer worker 10] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.613-0400 m31200| 2015-07-09T14:14:30.613-0400 I COMMAND [conn62] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.613-0400 m31202| 2015-07-09T14:14:30.613-0400 I COMMAND [repl writer worker 13] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.614-0400 m31100| 2015-07-09T14:14:30.613-0400 I COMMAND [conn36] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.614-0400 m31102| 2015-07-09T14:14:30.614-0400 I COMMAND [repl writer worker 6] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.614-0400 m31201| 2015-07-09T14:14:30.614-0400 I COMMAND [repl writer worker 13] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.615-0400 m31101| 2015-07-09T14:14:30.614-0400 I COMMAND [repl writer worker 6] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.615-0400 m31202| 2015-07-09T14:14:30.615-0400 I COMMAND [repl writer worker 4] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.615-0400 m31100| 2015-07-09T14:14:30.615-0400 I COMMAND [conn39] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.616-0400 m31100| 2015-07-09T14:14:30.615-0400 I COMMAND [conn34] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.616-0400 m31200| 2015-07-09T14:14:30.615-0400 I COMMAND [conn34] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.617-0400 m31102| 2015-07-09T14:14:30.617-0400 I COMMAND [repl writer worker 3] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.617-0400 m31202| 2015-07-09T14:14:30.617-0400 I COMMAND [repl writer worker 2] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.618-0400 m31100| 2015-07-09T14:14:30.617-0400 I COMMAND [conn132] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.618-0400 m31101| 2015-07-09T14:14:30.618-0400 I COMMAND [repl writer worker 14] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.619-0400 m31201| 2015-07-09T14:14:30.618-0400 I COMMAND [repl writer worker 0] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.619-0400 m31202| 2015-07-09T14:14:30.619-0400 I COMMAND [repl writer worker 3] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.620-0400 m31101| 2015-07-09T14:14:30.619-0400 I COMMAND [repl writer worker 12] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.621-0400 m31201| 2015-07-09T14:14:30.620-0400 I COMMAND [repl writer worker 2] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.621-0400 m31102| 2015-07-09T14:14:30.620-0400 I COMMAND [repl writer worker 14] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.621-0400 m31202| 2015-07-09T14:14:30.620-0400 I COMMAND [repl writer worker 9] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.621-0400 m31101| 2015-07-09T14:14:30.621-0400 I COMMAND [repl writer worker 0] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.622-0400 m31201| 2015-07-09T14:14:30.622-0400 I COMMAND [repl writer worker 8] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.622-0400 m31102| 2015-07-09T14:14:30.622-0400 I COMMAND [repl writer worker 0] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.623-0400 m31101| 2015-07-09T14:14:30.623-0400 I COMMAND [repl writer worker 4] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.623-0400 m31202| 2015-07-09T14:14:30.623-0400 I COMMAND [repl writer worker 7] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.623-0400 m31102| 2015-07-09T14:14:30.623-0400 I COMMAND [repl writer worker 11] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.624-0400 m31201| 2015-07-09T14:14:30.623-0400 I COMMAND [repl writer worker 10] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.624-0400 m31101| 2015-07-09T14:14:30.624-0400 I COMMAND [repl writer worker 3] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.624-0400 m31202| 2015-07-09T14:14:30.624-0400 I COMMAND [repl writer worker 15] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.625-0400 m31102| 2015-07-09T14:14:30.625-0400 I COMMAND [repl writer worker 1] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.625-0400 m31101| 2015-07-09T14:14:30.625-0400 I COMMAND [repl writer worker 7] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.626-0400 m31201| 2015-07-09T14:14:30.625-0400 I COMMAND [repl writer worker 9] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.626-0400 m31102| 2015-07-09T14:14:30.626-0400 I COMMAND [repl writer worker 10] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.716-0400 m31200| 2015-07-09T14:14:30.714-0400 I INDEX [conn35] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.716-0400 m31200| 2015-07-09T14:14:30.714-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.716-0400 m31100| 2015-07-09T14:14:30.714-0400 I INDEX [conn51] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.717-0400 m31100| 2015-07-09T14:14:30.714-0400 I INDEX [conn51] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.718-0400 m31200| 2015-07-09T14:14:30.717-0400 I INDEX [conn35] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.726-0400 m31200| 2015-07-09T14:14:30.726-0400 I INDEX [conn19] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.726-0400 m31200| 2015-07-09T14:14:30.726-0400 I INDEX [conn19] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.732-0400 m31100| 2015-07-09T14:14:30.732-0400 I INDEX [conn51] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.738-0400 m31202| 2015-07-09T14:14:30.734-0400 I INDEX [repl writer worker 12] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.738-0400 m31202| 2015-07-09T14:14:30.734-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.740-0400 m31201| 2015-07-09T14:14:30.739-0400 I INDEX [repl writer worker 5] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.740-0400 m31201| 2015-07-09T14:14:30.740-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.750-0400 m31200| 2015-07-09T14:14:30.749-0400 I INDEX [conn19] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.750-0400 m31202| 2015-07-09T14:14:30.750-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.752-0400 m31100| 2015-07-09T14:14:30.750-0400 I INDEX [conn45] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.752-0400 m31100| 2015-07-09T14:14:30.750-0400 I INDEX [conn45] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.757-0400 m31102| 2015-07-09T14:14:30.756-0400 I INDEX [repl writer worker 2] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.758-0400 m31102| 2015-07-09T14:14:30.756-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.759-0400 m31101| 2015-07-09T14:14:30.758-0400 I INDEX [repl writer worker 13] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.759-0400 m31101| 2015-07-09T14:14:30.758-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.762-0400 m31201| 2015-07-09T14:14:30.761-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.769-0400 m31100| 2015-07-09T14:14:30.767-0400 I INDEX [conn45] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.769-0400 m31200| 2015-07-09T14:14:30.767-0400 I INDEX [conn137] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.770-0400 m31200| 2015-07-09T14:14:30.767-0400 I INDEX [conn137] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.774-0400 m31201| 2015-07-09T14:14:30.773-0400 I INDEX [repl writer worker 3] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.774-0400 m31202| 2015-07-09T14:14:30.773-0400 I INDEX [repl writer worker 14] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.775-0400 m31102| 2015-07-09T14:14:30.773-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.775-0400 m31201| 2015-07-09T14:14:30.773-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.775-0400 m31202| 2015-07-09T14:14:30.773-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.781-0400 m31101| 2015-07-09T14:14:30.780-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.781-0400 m31100| 2015-07-09T14:14:30.780-0400 I INDEX [conn20] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.783-0400 m31100| 2015-07-09T14:14:30.781-0400 I INDEX [conn20] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.788-0400 m31202| 2015-07-09T14:14:30.787-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.788-0400 m31200| 2015-07-09T14:14:30.788-0400 I INDEX [conn137] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.793-0400 m31102| 2015-07-09T14:14:30.791-0400 I INDEX [repl writer worker 4] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.794-0400 m31102| 2015-07-09T14:14:30.791-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.794-0400 m31201| 2015-07-09T14:14:30.791-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.795-0400 m31100| 2015-07-09T14:14:30.794-0400 I INDEX [conn20] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.796-0400 m31101| 2015-07-09T14:14:30.795-0400 I INDEX [repl writer worker 8] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.796-0400 m31101| 2015-07-09T14:14:30.796-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.800-0400 m31200| 2015-07-09T14:14:30.799-0400 I INDEX [conn30] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.800-0400 m31200| 2015-07-09T14:14:30.799-0400 I INDEX [conn30] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.804-0400 m31202| 2015-07-09T14:14:30.802-0400 I INDEX [repl writer worker 8] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.805-0400 m31202| 2015-07-09T14:14:30.802-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.815-0400 m31101| 2015-07-09T14:14:30.815-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.815-0400 m31102| 2015-07-09T14:14:30.815-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.816-0400 m31200| 2015-07-09T14:14:30.815-0400 I INDEX [conn30] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.816-0400 m31202| 2015-07-09T14:14:30.815-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.816-0400 m31100| 2015-07-09T14:14:30.816-0400 I INDEX [conn60] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.817-0400 m31100| 2015-07-09T14:14:30.816-0400 I INDEX [conn60] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.818-0400 m31201| 2015-07-09T14:14:30.818-0400 I INDEX [repl writer worker 15] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.819-0400 m31201| 2015-07-09T14:14:30.818-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.819-0400 m31200| 2015-07-09T14:14:30.818-0400 I COMMAND [conn30] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo0: 1.0 }, name: "foo0_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 78917 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 106ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.821-0400 m31200| 2015-07-09T14:14:30.820-0400 I INDEX [conn80] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.821-0400 m31200| 2015-07-09T14:14:30.820-0400 I INDEX [conn80] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.826-0400 m31102| 2015-07-09T14:14:30.824-0400 I INDEX [repl writer worker 5] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.826-0400 m31102| 2015-07-09T14:14:30.824-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.837-0400 m31100| 2015-07-09T14:14:30.835-0400 I INDEX [conn60] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.837-0400 m31101| 2015-07-09T14:14:30.835-0400 I INDEX [repl writer worker 15] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.837-0400 m31101| 2015-07-09T14:14:30.835-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.839-0400 m31100| 2015-07-09T14:14:30.836-0400 I COMMAND [conn60] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo0: 1.0 }, name: "foo0_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 82287 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 123ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.842-0400 m31201| 2015-07-09T14:14:30.841-0400 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.843-0400 m31202| 2015-07-09T14:14:30.842-0400 I INDEX [repl writer worker 11] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.843-0400 m31202| 2015-07-09T14:14:30.842-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.847-0400 m31102| 2015-07-09T14:14:30.846-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.847-0400 m31101| 2015-07-09T14:14:30.846-0400 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.849-0400 m31100| 2015-07-09T14:14:30.846-0400 I INDEX [conn54] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.849-0400 m31100| 2015-07-09T14:14:30.846-0400 I INDEX [conn54] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.852-0400 m31202| 2015-07-09T14:14:30.850-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.852-0400 m31200| 2015-07-09T14:14:30.850-0400 I INDEX [conn80] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.853-0400 m31200| 2015-07-09T14:14:30.851-0400 I COMMAND [conn80] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo7: 1.0 }, name: "foo7_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 101864 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 135ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.854-0400 m31201| 2015-07-09T14:14:30.854-0400 I INDEX [repl writer worker 6] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.855-0400 m31201| 2015-07-09T14:14:30.854-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.856-0400 m31100| 2015-07-09T14:14:30.854-0400 I INDEX [conn54] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.858-0400 m31100| 2015-07-09T14:14:30.856-0400 I COMMAND [conn54] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo7: 1.0 }, name: "foo7_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 120809 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 140ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.859-0400 m31102| 2015-07-09T14:14:30.859-0400 I INDEX [repl writer worker 9] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.859-0400 m31102| 2015-07-09T14:14:30.859-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.860-0400 m31101| 2015-07-09T14:14:30.859-0400 I INDEX [repl writer worker 9] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.860-0400 m31101| 2015-07-09T14:14:30.859-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.862-0400 m31200| 2015-07-09T14:14:30.862-0400 I INDEX [conn60] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.862-0400 m31200| 2015-07-09T14:14:30.862-0400 I INDEX [conn60] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.873-0400 m31202| 2015-07-09T14:14:30.870-0400 I INDEX [repl writer worker 6] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.873-0400 m31202| 2015-07-09T14:14:30.871-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.875-0400 m31102| 2015-07-09T14:14:30.874-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.876-0400 m31201| 2015-07-09T14:14:30.876-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.877-0400 m31100| 2015-07-09T14:14:30.876-0400 I INDEX [conn48] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.877-0400 m31100| 2015-07-09T14:14:30.876-0400 I INDEX [conn48] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.881-0400 m31101| 2015-07-09T14:14:30.881-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.891-0400 m31200| 2015-07-09T14:14:30.890-0400 I INDEX [conn60] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.892-0400 m31200| 2015-07-09T14:14:30.891-0400 I COMMAND [conn60] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo9: 1.0 }, name: "foo9_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 132223 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 172ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.900-0400 m31202| 2015-07-09T14:14:30.896-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.900-0400 m31102| 2015-07-09T14:14:30.896-0400 I INDEX [repl writer worker 8] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.900-0400 m31102| 2015-07-09T14:14:30.896-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.901-0400 m31100| 2015-07-09T14:14:30.896-0400 I INDEX [conn48] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.901-0400 m31100| 2015-07-09T14:14:30.897-0400 I COMMAND [conn48] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo9: 1.0 }, name: "foo9_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 137688 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 178ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.901-0400 m31201| 2015-07-09T14:14:30.900-0400 I INDEX [repl writer worker 1] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.902-0400 m31201| 2015-07-09T14:14:30.900-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.906-0400 m31200| 2015-07-09T14:14:30.906-0400 I INDEX [conn38] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.906-0400 m31200| 2015-07-09T14:14:30.906-0400 I INDEX [conn38] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:30.913-0400 m31102| 2015-07-09T14:14:30.913-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.191-0400 m31202| 2015-07-09T14:14:30.918-0400 I INDEX [repl writer worker 10] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.192-0400 m31202| 2015-07-09T14:14:30.918-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.192-0400 m31100| 2015-07-09T14:14:30.918-0400 I INDEX [conn58] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.192-0400 m31100| 2015-07-09T14:14:30.918-0400 I INDEX [conn58] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.192-0400 m31101| 2015-07-09T14:14:30.918-0400 I INDEX [repl writer worker 5] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.192-0400 m31101| 2015-07-09T14:14:30.918-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.193-0400 m31201| 2015-07-09T14:14:30.922-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.193-0400 m31200| 2015-07-09T14:14:30.922-0400 I INDEX [conn38] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.193-0400 m31200| 2015-07-09T14:14:30.923-0400 I COMMAND [conn38] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo5: 1.0 }, name: "foo5_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 171042 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 202ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.193-0400 m31100| 2015-07-09T14:14:30.929-0400 I INDEX [conn58] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.194-0400 m31100| 2015-07-09T14:14:30.930-0400 I COMMAND [conn58] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo5: 1.0 }, name: "foo5_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 177055 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 209ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.194-0400 m31202| 2015-07-09T14:14:30.941-0400 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.194-0400 m31102| 2015-07-09T14:14:30.941-0400 I INDEX [repl writer worker 15] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.194-0400 m31102| 2015-07-09T14:14:30.941-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.195-0400 m31101| 2015-07-09T14:14:30.942-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.195-0400 m31200| 2015-07-09T14:14:30.946-0400 I INDEX [conn81] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.195-0400 m31200| 2015-07-09T14:14:30.946-0400 I INDEX [conn81] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.195-0400 m31201| 2015-07-09T14:14:30.946-0400 I INDEX [repl writer worker 7] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.196-0400 m31201| 2015-07-09T14:14:30.946-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.196-0400 m31100| 2015-07-09T14:14:30.955-0400 I INDEX [conn57] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.196-0400 m31100| 2015-07-09T14:14:30.955-0400 I INDEX [conn57] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.197-0400 m31202| 2015-07-09T14:14:30.956-0400 I INDEX [repl writer worker 5] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.197-0400 m31202| 2015-07-09T14:14:30.956-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.199-0400 m31101| 2015-07-09T14:14:30.958-0400 I INDEX [repl writer worker 2] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.199-0400 m31101| 2015-07-09T14:14:30.958-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.199-0400 m31102| 2015-07-09T14:14:30.966-0400 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.199-0400 m31201| 2015-07-09T14:14:30.971-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.200-0400 m31202| 2015-07-09T14:14:30.971-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.200-0400 m31200| 2015-07-09T14:14:30.976-0400 I INDEX [conn81] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.201-0400 m31200| 2015-07-09T14:14:30.977-0400 I COMMAND [conn81] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo4: 1.0 }, name: "foo4_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 202052 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 256ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.201-0400 m31100| 2015-07-09T14:14:30.980-0400 I INDEX [conn57] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.202-0400 m31101| 2015-07-09T14:14:30.982-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.203-0400 m31100| 2015-07-09T14:14:30.983-0400 I COMMAND [conn57] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo4: 1.0 }, name: "foo4_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 208918 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 262ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.203-0400 m31102| 2015-07-09T14:14:30.990-0400 I INDEX [repl writer worker 7] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.203-0400 m31102| 2015-07-09T14:14:30.990-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.204-0400 m31200| 2015-07-09T14:14:30.991-0400 I INDEX [conn52] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.204-0400 m31200| 2015-07-09T14:14:30.991-0400 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.205-0400 m31202| 2015-07-09T14:14:30.995-0400 I INDEX [repl writer worker 1] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.205-0400 m31202| 2015-07-09T14:14:30.995-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.206-0400 m31201| 2015-07-09T14:14:30.994-0400 I INDEX [repl writer worker 4] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.206-0400 m31201| 2015-07-09T14:14:30.994-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.206-0400 m31101| 2015-07-09T14:14:30.997-0400 I INDEX [repl writer worker 11] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.207-0400 m31101| 2015-07-09T14:14:30.997-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.207-0400 m31101| 2015-07-09T14:14:31.012-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.207-0400 m31201| 2015-07-09T14:14:31.013-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.207-0400 m31102| 2015-07-09T14:14:31.013-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.207-0400 m31100| 2015-07-09T14:14:31.013-0400 I INDEX [conn46] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.207-0400 m31100| 2015-07-09T14:14:31.013-0400 I INDEX [conn46] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.208-0400 m31200| 2015-07-09T14:14:31.018-0400 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.208-0400 m31200| 2015-07-09T14:14:31.019-0400 I COMMAND [conn52] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo1: 1.0 }, name: "foo1_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 253650 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 295ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.208-0400 m31202| 2015-07-09T14:14:31.021-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.209-0400 m31102| 2015-07-09T14:14:31.026-0400 I INDEX [repl writer worker 12] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.209-0400 m31102| 2015-07-09T14:14:31.026-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.209-0400 m31100| 2015-07-09T14:14:31.028-0400 I INDEX [conn46] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.210-0400 m31100| 2015-07-09T14:14:31.029-0400 I COMMAND [conn46] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo1: 1.0 }, name: "foo1_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 260144 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 306ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.211-0400 m31201| 2015-07-09T14:14:31.033-0400 I INDEX [repl writer worker 14] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.211-0400 m31201| 2015-07-09T14:14:31.033-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.211-0400 m31200| 2015-07-09T14:14:31.033-0400 I INDEX [conn28] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.211-0400 m31200| 2015-07-09T14:14:31.034-0400 I INDEX [conn28] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.212-0400 m31101| 2015-07-09T14:14:31.041-0400 I INDEX [repl writer worker 1] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.212-0400 m31202| 2015-07-09T14:14:31.041-0400 I INDEX [repl writer worker 0] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.212-0400 m31101| 2015-07-09T14:14:31.041-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.212-0400 m31202| 2015-07-09T14:14:31.041-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.212-0400 m31102| 2015-07-09T14:14:31.047-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.212-0400 m31100| 2015-07-09T14:14:31.048-0400 I INDEX [conn72] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.212-0400 m31100| 2015-07-09T14:14:31.048-0400 I INDEX [conn72] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.213-0400 m31200| 2015-07-09T14:14:31.053-0400 I INDEX [conn28] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.213-0400 m31201| 2015-07-09T14:14:31.053-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.213-0400 m31200| 2015-07-09T14:14:31.054-0400 I COMMAND [conn84] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.214-0400 m31200| 2015-07-09T14:14:31.054-0400 I COMMAND [conn28] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo3: 1.0 }, name: "foo3_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 293806 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 329ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.214-0400 m31101| 2015-07-09T14:14:31.055-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.214-0400 m31202| 2015-07-09T14:14:31.060-0400 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.214-0400 m31200| 2015-07-09T14:14:31.060-0400 I COMMAND [conn64] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.215-0400 m31200| 2015-07-09T14:14:31.060-0400 I COMMAND [conn84] command db49.coll49 command: dropIndexes { deleteIndexes: "coll49", index: { foo6: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:114 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 295395 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 301ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.216-0400 m31200| 2015-07-09T14:14:31.061-0400 I COMMAND [conn64] command db49.coll49 command: dropIndexes { deleteIndexes: "coll49", index: { foo2: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:114 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 288851 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 289ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.217-0400 m31200| 2015-07-09T14:14:31.062-0400 I COMMAND [conn34] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.218-0400 m31201| 2015-07-09T14:14:31.062-0400 I INDEX [repl writer worker 12] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.219-0400 m31201| 2015-07-09T14:14:31.062-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.219-0400 m31201| 2015-07-09T14:14:31.072-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.219-0400 m31100| 2015-07-09T14:14:31.074-0400 I INDEX [conn72] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.220-0400 m31200| 2015-07-09T14:14:31.072-0400 I COMMAND [conn34] command db49.coll49 command: dropIndexes { deleteIndexes: "coll49", index: { foo5: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:114 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 127862 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 138ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.220-0400 m31200| 2015-07-09T14:14:31.072-0400 I COMMAND [conn48] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.221-0400 m31100| 2015-07-09T14:14:31.076-0400 I COMMAND [conn72] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo3: 1.0 }, name: "foo3_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 303976 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 350ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.222-0400 m31100| 2015-07-09T14:14:31.076-0400 I COMMAND [conn34] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.222-0400 m31100| 2015-07-09T14:14:31.080-0400 I COMMAND [conn34] command db49.coll49 command: dropIndexes { deleteIndexes: "coll49", index: { foo6: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:114 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 316030 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 320ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.223-0400 m31100| 2015-07-09T14:14:31.080-0400 I COMMAND [conn15] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.223-0400 m31101| 2015-07-09T14:14:31.080-0400 I INDEX [repl writer worker 10] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.223-0400 m31101| 2015-07-09T14:14:31.081-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.223-0400 m31102| 2015-07-09T14:14:31.080-0400 I INDEX [repl writer worker 13] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.224-0400 m31102| 2015-07-09T14:14:31.080-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.225-0400 m31202| 2015-07-09T14:14:31.082-0400 I INDEX [repl writer worker 13] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.226-0400 m31202| 2015-07-09T14:14:31.082-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.226-0400 m31100| 2015-07-09T14:14:31.084-0400 I COMMAND [conn15] command db49.coll49 command: dropIndexes { deleteIndexes: "coll49", index: { foo2: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:114 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 308580 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 312ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.227-0400 m31100| 2015-07-09T14:14:31.084-0400 I COMMAND [conn20] command db49.$cmd command: listIndexes { listIndexes: "coll49", cursor: { batchSize: 2.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 exception: [db49.coll49] shard version not ok: version epoch mismatch detected for db49.coll49, the collection may have been dropped and recreated ( ns : db49.coll49, received : 0|0||000000000000000000000000, wanted : 2|3||559eba05ca4787b9985d1e00, send ) code:13388 numYields:0 reslen:391 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 285598 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 285ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.228-0400 m31100| 2015-07-09T14:14:31.084-0400 I COMMAND [conn60] command db49.$cmd command: listIndexes { listIndexes: "coll49", cursor: { batchSize: 2.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 exception: [db49.coll49] shard version not ok: version epoch mismatch detected for db49.coll49, the collection may have been dropped and recreated ( ns : db49.coll49, received : 0|0||000000000000000000000000, wanted : 2|3||559eba05ca4787b9985d1e00, send ) code:13388 numYields:0 reslen:391 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 242847 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 242ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.228-0400 m30999| 2015-07-09T14:14:31.085-0400 I SHARDING [conn306] sharded connection to test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.228-0400 m30999| 2015-07-09T14:14:31.085-0400 I SHARDING [conn306] retrying command: { listIndexes: "coll49", cursor: { batchSize: 2.0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.230-0400 m30998| 2015-07-09T14:14:31.085-0400 I SHARDING [conn305] sharded connection to test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.230-0400 m30998| 2015-07-09T14:14:31.085-0400 I SHARDING [conn305] retrying command: { listIndexes: "coll49", cursor: { batchSize: 2.0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.231-0400 m30998| 2015-07-09T14:14:31.086-0400 I SHARDING [conn306] sharded connection to test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.231-0400 m30998| 2015-07-09T14:14:31.086-0400 I SHARDING [conn306] retrying command: { listIndexes: "coll49", cursor: { batchSize: 2.0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.232-0400 m30998| 2015-07-09T14:14:31.086-0400 I SHARDING [conn304] sharded connection to test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.232-0400 m30998| 2015-07-09T14:14:31.086-0400 I SHARDING [conn302] sharded connection to test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.232-0400 m30998| 2015-07-09T14:14:31.087-0400 I SHARDING [conn304] retrying command: { listIndexes: "coll49", cursor: { batchSize: 2.0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.233-0400 m30999| 2015-07-09T14:14:31.087-0400 I SHARDING [conn304] sharded connection to test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.233-0400 m30999| 2015-07-09T14:14:31.087-0400 I SHARDING [conn304] retrying command: { listIndexes: "coll49", cursor: { batchSize: 2.0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.234-0400 m31100| 2015-07-09T14:14:31.084-0400 I COMMAND [conn48] command db49.$cmd command: listIndexes { listIndexes: "coll49", cursor: { batchSize: 2.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 exception: [db49.coll49] shard version not ok: version epoch mismatch detected for db49.coll49, the collection may have been dropped and recreated ( ns : db49.coll49, received : 0|0||000000000000000000000000, wanted : 2|3||559eba05ca4787b9985d1e00, send ) code:13388 numYields:0 reslen:391 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 183493 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 183ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.236-0400 m31100| 2015-07-09T14:14:31.084-0400 I COMMAND [conn54] command db49.$cmd command: listIndexes { listIndexes: "coll49", cursor: { batchSize: 2.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 exception: [db49.coll49] shard version not ok: version epoch mismatch detected for db49.coll49, the collection may have been dropped and recreated ( ns : db49.coll49, received : 0|0||000000000000000000000000, wanted : 2|3||559eba05ca4787b9985d1e00, send ) code:13388 numYields:0 reslen:391 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 225781 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 225ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.237-0400 m31100| 2015-07-09T14:14:31.085-0400 I COMMAND [conn132] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.237-0400 m31100| 2015-07-09T14:14:31.086-0400 I NETWORK [conn72] end connection 127.0.0.1:62808 (109 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.238-0400 m31100| 2015-07-09T14:14:31.086-0400 I NETWORK [conn46] end connection 127.0.0.1:62663 (109 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.238-0400 m31100| 2015-07-09T14:14:31.087-0400 I NETWORK [conn54] end connection 127.0.0.1:62744 (107 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.238-0400 m31100| 2015-07-09T14:14:31.087-0400 I NETWORK [conn20] end connection 127.0.0.1:62589 (106 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.240-0400 m30998| 2015-07-09T14:14:31.088-0400 I SHARDING [conn302] retrying command: { listIndexes: "coll49", cursor: { batchSize: 2.0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.240-0400 m31100| 2015-07-09T14:14:31.088-0400 I NETWORK [conn48] end connection 127.0.0.1:62665 (105 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.241-0400 m31201| 2015-07-09T14:14:31.090-0400 I INDEX [repl writer worker 11] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.241-0400 m31201| 2015-07-09T14:14:31.090-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.241-0400 m31100| 2015-07-09T14:14:31.089-0400 I NETWORK [conn60] end connection 127.0.0.1:62755 (104 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.242-0400 m31202| 2015-07-09T14:14:31.096-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.242-0400 m31100| 2015-07-09T14:14:31.096-0400 I COMMAND [conn38] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.243-0400 m31100| 2015-07-09T14:14:31.096-0400 I COMMAND [conn132] command db49.coll49 command: dropIndexes { deleteIndexes: "coll49", index: { foo5: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:114 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 151021 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 161ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.243-0400 m31102| 2015-07-09T14:14:31.096-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.243-0400 m31100| 2015-07-09T14:14:31.097-0400 I COMMAND [conn38] command db49.coll49 command: dropIndexes { deleteIndexes: "coll49", index: { foo4: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:114 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 108294 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 109ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.243-0400 m31101| 2015-07-09T14:14:31.101-0400 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.243-0400 m31200| 2015-07-09T14:14:31.102-0400 I COMMAND [conn34] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.243-0400 m31100| 2015-07-09T14:14:31.103-0400 I COMMAND [conn39] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.244-0400 m31202| 2015-07-09T14:14:31.103-0400 I COMMAND [repl writer worker 4] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.244-0400 m31200| 2015-07-09T14:14:31.105-0400 I COMMAND [conn62] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.244-0400 m31201| 2015-07-09T14:14:31.107-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.246-0400 m31202| 2015-07-09T14:14:31.107-0400 I COMMAND [repl writer worker 2] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.246-0400 m31201| 2015-07-09T14:14:31.108-0400 I COMMAND [repl writer worker 13] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.246-0400 m31200| 2015-07-09T14:14:31.112-0400 I COMMAND [conn47] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.246-0400 m31100| 2015-07-09T14:14:31.112-0400 I COMMAND [conn132] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.246-0400 m31202| 2015-07-09T14:14:31.113-0400 I COMMAND [repl writer worker 3] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.247-0400 m31201| 2015-07-09T14:14:31.114-0400 I COMMAND [repl writer worker 0] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.247-0400 m31100| 2015-07-09T14:14:31.115-0400 I COMMAND [conn32] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.247-0400 m31100| 2015-07-09T14:14:31.115-0400 I COMMAND [conn38] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.247-0400 m31101| 2015-07-09T14:14:31.113-0400 I INDEX [repl writer worker 6] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.248-0400 m31101| 2015-07-09T14:14:31.114-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.248-0400 m31102| 2015-07-09T14:14:31.116-0400 I INDEX [repl writer worker 6] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.249-0400 m31102| 2015-07-09T14:14:31.116-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.249-0400 m31200| 2015-07-09T14:14:31.116-0400 I COMMAND [conn48] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.249-0400 m31100| 2015-07-09T14:14:31.117-0400 I COMMAND [conn36] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.250-0400 m31201| 2015-07-09T14:14:31.118-0400 I COMMAND [repl writer worker 2] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.250-0400 m31202| 2015-07-09T14:14:31.121-0400 I COMMAND [repl writer worker 9] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.250-0400 m31200| 2015-07-09T14:14:31.122-0400 I COMMAND [conn65] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.250-0400 m31100| 2015-07-09T14:14:31.122-0400 I COMMAND [conn38] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.251-0400 m31201| 2015-07-09T14:14:31.123-0400 I COMMAND [repl writer worker 8] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.252-0400 m31202| 2015-07-09T14:14:31.123-0400 I COMMAND [repl writer worker 7] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.252-0400 m31200| 2015-07-09T14:14:31.123-0400 I COMMAND [conn64] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.253-0400 m31101| 2015-07-09T14:14:31.125-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.253-0400 m31102| 2015-07-09T14:14:31.126-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.253-0400 m31101| 2015-07-09T14:14:31.126-0400 I COMMAND [repl writer worker 14] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.254-0400 m31201| 2015-07-09T14:14:31.126-0400 I COMMAND [repl writer worker 10] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.254-0400 m31202| 2015-07-09T14:14:31.127-0400 I COMMAND [repl writer worker 15] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.254-0400 m31102| 2015-07-09T14:14:31.127-0400 I COMMAND [repl writer worker 3] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.254-0400 m31201| 2015-07-09T14:14:31.128-0400 I COMMAND [repl writer worker 9] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.254-0400 m31102| 2015-07-09T14:14:31.128-0400 I COMMAND [repl writer worker 14] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.255-0400 m31101| 2015-07-09T14:14:31.128-0400 I COMMAND [repl writer worker 12] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.256-0400 m31202| 2015-07-09T14:14:31.128-0400 I COMMAND [repl writer worker 12] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.256-0400 m31201| 2015-07-09T14:14:31.129-0400 I COMMAND [repl writer worker 5] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.257-0400 m31101| 2015-07-09T14:14:31.129-0400 I COMMAND [repl writer worker 0] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.257-0400 m31202| 2015-07-09T14:14:31.130-0400 I COMMAND [repl writer worker 14] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.257-0400 m31102| 2015-07-09T14:14:31.130-0400 I COMMAND [repl writer worker 0] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.257-0400 m31201| 2015-07-09T14:14:31.131-0400 I COMMAND [repl writer worker 3] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.257-0400 m31102| 2015-07-09T14:14:31.131-0400 I COMMAND [repl writer worker 11] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.257-0400 m31101| 2015-07-09T14:14:31.131-0400 I COMMAND [repl writer worker 4] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.257-0400 m31202| 2015-07-09T14:14:31.132-0400 I COMMAND [repl writer worker 8] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.258-0400 m31201| 2015-07-09T14:14:31.132-0400 I COMMAND [repl writer worker 15] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.258-0400 m31102| 2015-07-09T14:14:31.132-0400 I COMMAND [repl writer worker 1] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.258-0400 m31202| 2015-07-09T14:14:31.132-0400 I COMMAND [repl writer worker 11] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.258-0400 m31101| 2015-07-09T14:14:31.133-0400 I COMMAND [repl writer worker 3] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.258-0400 m31201| 2015-07-09T14:14:31.133-0400 I COMMAND [repl writer worker 6] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.259-0400 m31102| 2015-07-09T14:14:31.133-0400 I COMMAND [repl writer worker 10] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.259-0400 m31102| 2015-07-09T14:14:31.134-0400 I COMMAND [repl writer worker 2] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.259-0400 m31101| 2015-07-09T14:14:31.135-0400 I COMMAND [repl writer worker 7] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.260-0400 m31102| 2015-07-09T14:14:31.135-0400 I COMMAND [repl writer worker 4] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.260-0400 m31101| 2015-07-09T14:14:31.136-0400 I COMMAND [repl writer worker 13] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.261-0400 m31102| 2015-07-09T14:14:31.136-0400 I COMMAND [repl writer worker 5] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.261-0400 m31101| 2015-07-09T14:14:31.137-0400 I COMMAND [repl writer worker 8] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.261-0400 m31102| 2015-07-09T14:14:31.137-0400 I COMMAND [repl writer worker 9] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.261-0400 m31101| 2015-07-09T14:14:31.138-0400 I COMMAND [repl writer worker 15] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.262-0400 m31101| 2015-07-09T14:14:31.138-0400 I COMMAND [repl writer worker 9] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.262-0400 m31200| 2015-07-09T14:14:31.189-0400 I INDEX [conn81] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.262-0400 m31200| 2015-07-09T14:14:31.189-0400 I INDEX [conn81] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.262-0400 m31100| 2015-07-09T14:14:31.197-0400 I INDEX [conn57] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.262-0400 m31100| 2015-07-09T14:14:31.197-0400 I INDEX [conn57] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.263-0400 m31200| 2015-07-09T14:14:31.197-0400 I INDEX [conn81] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.263-0400 m31100| 2015-07-09T14:14:31.204-0400 I INDEX [conn57] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.265-0400 m31200| 2015-07-09T14:14:31.204-0400 I INDEX [conn30] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.265-0400 m31200| 2015-07-09T14:14:31.204-0400 I INDEX [conn30] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.265-0400 m31202| 2015-07-09T14:14:31.209-0400 I INDEX [repl writer worker 6] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.266-0400 m31202| 2015-07-09T14:14:31.209-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.266-0400 m31201| 2015-07-09T14:14:31.209-0400 I INDEX [repl writer worker 1] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.267-0400 m31201| 2015-07-09T14:14:31.209-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.267-0400 m31200| 2015-07-09T14:14:31.215-0400 I INDEX [conn30] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.268-0400 m31100| 2015-07-09T14:14:31.221-0400 I INDEX [conn45] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.268-0400 m31100| 2015-07-09T14:14:31.221-0400 I INDEX [conn45] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.269-0400 m31202| 2015-07-09T14:14:31.223-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.270-0400 m31101| 2015-07-09T14:14:31.223-0400 I INDEX [repl writer worker 5] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.270-0400 m31101| 2015-07-09T14:14:31.223-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.270-0400 m31102| 2015-07-09T14:14:31.224-0400 I INDEX [repl writer worker 8] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.271-0400 m31102| 2015-07-09T14:14:31.224-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.271-0400 m31201| 2015-07-09T14:14:31.227-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.271-0400 m31200| 2015-07-09T14:14:31.228-0400 I INDEX [conn38] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.271-0400 m31200| 2015-07-09T14:14:31.228-0400 I INDEX [conn38] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.272-0400 m31101| 2015-07-09T14:14:31.235-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.272-0400 m31100| 2015-07-09T14:14:31.235-0400 I INDEX [conn45] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.272-0400 m31102| 2015-07-09T14:14:31.235-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.272-0400 m31200| 2015-07-09T14:14:31.236-0400 I INDEX [conn38] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.275-0400 m31202| 2015-07-09T14:14:31.235-0400 I INDEX [repl writer worker 10] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.275-0400 m31202| 2015-07-09T14:14:31.235-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.275-0400 m31201| 2015-07-09T14:14:31.240-0400 I INDEX [repl writer worker 7] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.275-0400 m31201| 2015-07-09T14:14:31.240-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.275-0400 m31100| 2015-07-09T14:14:31.245-0400 I INDEX [conn50] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.276-0400 m31100| 2015-07-09T14:14:31.245-0400 I INDEX [conn50] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.276-0400 m31202| 2015-07-09T14:14:31.246-0400 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.277-0400 m31200| 2015-07-09T14:14:31.248-0400 I INDEX [conn137] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.277-0400 m31200| 2015-07-09T14:14:31.248-0400 I INDEX [conn137] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.277-0400 m31102| 2015-07-09T14:14:31.251-0400 I INDEX [repl writer worker 15] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.278-0400 m31102| 2015-07-09T14:14:31.251-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.278-0400 m31100| 2015-07-09T14:14:31.251-0400 I INDEX [conn50] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.278-0400 m31101| 2015-07-09T14:14:31.253-0400 I INDEX [repl writer worker 2] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.278-0400 m31201| 2015-07-09T14:14:31.252-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.280-0400 m31101| 2015-07-09T14:14:31.253-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.280-0400 m31200| 2015-07-09T14:14:31.257-0400 I INDEX [conn137] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.283-0400 m31202| 2015-07-09T14:14:31.260-0400 I INDEX [repl writer worker 5] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.283-0400 m31202| 2015-07-09T14:14:31.260-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.284-0400 m31100| 2015-07-09T14:14:31.263-0400 I INDEX [conn51] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.284-0400 m31100| 2015-07-09T14:14:31.263-0400 I INDEX [conn51] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.284-0400 m31201| 2015-07-09T14:14:31.269-0400 I INDEX [repl writer worker 4] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.285-0400 m31201| 2015-07-09T14:14:31.269-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.286-0400 m31101| 2015-07-09T14:14:31.268-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.286-0400 m31200| 2015-07-09T14:14:31.269-0400 I INDEX [conn60] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.287-0400 m31200| 2015-07-09T14:14:31.269-0400 I INDEX [conn60] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.287-0400 m31102| 2015-07-09T14:14:31.272-0400 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.289-0400 m31202| 2015-07-09T14:14:31.276-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.289-0400 m31100| 2015-07-09T14:14:31.276-0400 I INDEX [conn51] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.290-0400 m31101| 2015-07-09T14:14:31.281-0400 I INDEX [repl writer worker 11] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.290-0400 m31101| 2015-07-09T14:14:31.281-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.291-0400 m31200| 2015-07-09T14:14:31.287-0400 I INDEX [conn60] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.299-0400 m31102| 2015-07-09T14:14:31.298-0400 I INDEX [repl writer worker 7] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.301-0400 m31102| 2015-07-09T14:14:31.298-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.301-0400 m31201| 2015-07-09T14:14:31.297-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.302-0400 m31202| 2015-07-09T14:14:31.298-0400 I INDEX [repl writer worker 1] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.303-0400 m31202| 2015-07-09T14:14:31.298-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.303-0400 m31100| 2015-07-09T14:14:31.300-0400 I INDEX [conn73] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.303-0400 m31100| 2015-07-09T14:14:31.300-0400 I INDEX [conn73] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.305-0400 m31101| 2015-07-09T14:14:31.305-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.310-0400 m31200| 2015-07-09T14:14:31.309-0400 I INDEX [conn52] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.310-0400 m31200| 2015-07-09T14:14:31.309-0400 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.312-0400 m31102| 2015-07-09T14:14:31.311-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.315-0400 m31100| 2015-07-09T14:14:31.312-0400 I INDEX [conn73] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.316-0400 m31202| 2015-07-09T14:14:31.315-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.321-0400 m31101| 2015-07-09T14:14:31.320-0400 I INDEX [repl writer worker 1] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.321-0400 m31101| 2015-07-09T14:14:31.320-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.324-0400 m31100| 2015-07-09T14:14:31.324-0400 I INDEX [conn49] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.324-0400 m31100| 2015-07-09T14:14:31.324-0400 I INDEX [conn49] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.325-0400 m31102| 2015-07-09T14:14:31.324-0400 I INDEX [repl writer worker 12] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.325-0400 m31102| 2015-07-09T14:14:31.324-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.326-0400 m31201| 2015-07-09T14:14:31.325-0400 I INDEX [repl writer worker 14] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.327-0400 m31201| 2015-07-09T14:14:31.325-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.327-0400 m31202| 2015-07-09T14:14:31.325-0400 I INDEX [repl writer worker 0] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.329-0400 m31202| 2015-07-09T14:14:31.325-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.329-0400 m31200| 2015-07-09T14:14:31.327-0400 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.330-0400 m31200| 2015-07-09T14:14:31.328-0400 I COMMAND [conn52] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo9: 1.0 }, name: "foo9_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 71285 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 110ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.333-0400 m31202| 2015-07-09T14:14:31.332-0400 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.337-0400 m31102| 2015-07-09T14:14:31.335-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.342-0400 m31201| 2015-07-09T14:14:31.341-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.343-0400 m31100| 2015-07-09T14:14:31.342-0400 I INDEX [conn49] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.345-0400 m31100| 2015-07-09T14:14:31.344-0400 I COMMAND [conn49] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo9: 1.0 }, name: "foo9_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 95665 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 126ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.349-0400 m31101| 2015-07-09T14:14:31.348-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.351-0400 m31200| 2015-07-09T14:14:31.350-0400 I INDEX [conn28] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.352-0400 m31200| 2015-07-09T14:14:31.350-0400 I INDEX [conn28] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.352-0400 m31202| 2015-07-09T14:14:31.350-0400 I INDEX [repl writer worker 13] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.352-0400 m31202| 2015-07-09T14:14:31.350-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.354-0400 m31102| 2015-07-09T14:14:31.353-0400 I INDEX [repl writer worker 13] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.355-0400 m31102| 2015-07-09T14:14:31.353-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.361-0400 m31202| 2015-07-09T14:14:31.360-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.362-0400 m31100| 2015-07-09T14:14:31.361-0400 I INDEX [conn58] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.362-0400 m31100| 2015-07-09T14:14:31.361-0400 I INDEX [conn58] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.363-0400 m31201| 2015-07-09T14:14:31.360-0400 I INDEX [repl writer worker 12] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.363-0400 m31201| 2015-07-09T14:14:31.360-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.375-0400 m31101| 2015-07-09T14:14:31.373-0400 I INDEX [repl writer worker 10] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.375-0400 m31101| 2015-07-09T14:14:31.373-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.375-0400 m31100| 2015-07-09T14:14:31.373-0400 I INDEX [conn58] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.376-0400 m31200| 2015-07-09T14:14:31.373-0400 I INDEX [conn28] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.376-0400 m31200| 2015-07-09T14:14:31.374-0400 I COMMAND [conn28] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo3: 1.0 }, name: "foo3_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 108721 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 155ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.377-0400 m31100| 2015-07-09T14:14:31.375-0400 I COMMAND [conn58] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo3: 1.0 }, name: "foo3_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 124547 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 156ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.383-0400 m31102| 2015-07-09T14:14:31.382-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.383-0400 m31201| 2015-07-09T14:14:31.382-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.395-0400 m31101| 2015-07-09T14:14:31.394-0400 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.395-0400 m31202| 2015-07-09T14:14:31.394-0400 I INDEX [repl writer worker 4] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.395-0400 m31202| 2015-07-09T14:14:31.395-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.396-0400 m31200| 2015-07-09T14:14:31.394-0400 I INDEX [conn80] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.396-0400 m31200| 2015-07-09T14:14:31.394-0400 I INDEX [conn80] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.396-0400 m31100| 2015-07-09T14:14:31.395-0400 I INDEX [conn56] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.396-0400 m31100| 2015-07-09T14:14:31.395-0400 I INDEX [conn56] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.401-0400 m31201| 2015-07-09T14:14:31.400-0400 I INDEX [repl writer worker 11] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.401-0400 m31201| 2015-07-09T14:14:31.400-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.402-0400 m31102| 2015-07-09T14:14:31.401-0400 I INDEX [repl writer worker 6] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.403-0400 m31102| 2015-07-09T14:14:31.401-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.408-0400 m31100| 2015-07-09T14:14:31.407-0400 I INDEX [conn56] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.409-0400 m31200| 2015-07-09T14:14:31.407-0400 I INDEX [conn80] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.409-0400 m31100| 2015-07-09T14:14:31.408-0400 I COMMAND [conn56] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo8: 1.0 }, name: "foo8_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 150827 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 182ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.411-0400 m31200| 2015-07-09T14:14:31.408-0400 I COMMAND [conn80] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo1: 1.0 }, name: "foo1_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 148891 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 182ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.423-0400 m31201| 2015-07-09T14:14:31.421-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.424-0400 m31202| 2015-07-09T14:14:31.421-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.425-0400 m31101| 2015-07-09T14:14:31.423-0400 I INDEX [repl writer worker 6] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.425-0400 m31101| 2015-07-09T14:14:31.423-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.435-0400 m31200| 2015-07-09T14:14:31.434-0400 I INDEX [conn81] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.435-0400 m31200| 2015-07-09T14:14:31.434-0400 I INDEX [conn81] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.435-0400 m31100| 2015-07-09T14:14:31.434-0400 I INDEX [conn71] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.436-0400 m31100| 2015-07-09T14:14:31.434-0400 I INDEX [conn71] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.442-0400 m31102| 2015-07-09T14:14:31.441-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.442-0400 m31201| 2015-07-09T14:14:31.441-0400 I INDEX [repl writer worker 13] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.442-0400 m31201| 2015-07-09T14:14:31.441-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.451-0400 m31200| 2015-07-09T14:14:31.450-0400 I INDEX [conn81] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.453-0400 m31202| 2015-07-09T14:14:31.451-0400 I INDEX [repl writer worker 2] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.453-0400 m31202| 2015-07-09T14:14:31.451-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.454-0400 m31200| 2015-07-09T14:14:31.451-0400 I COMMAND [conn81] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo8: 1.0 }, name: "foo8_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 182647 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 225ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.459-0400 m31101| 2015-07-09T14:14:31.459-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.459-0400 m31100| 2015-07-09T14:14:31.459-0400 I INDEX [conn71] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.461-0400 m31100| 2015-07-09T14:14:31.460-0400 I COMMAND [conn71] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo1: 1.0 }, name: "foo1_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 182465 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 234ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.462-0400 m31102| 2015-07-09T14:14:31.461-0400 I INDEX [repl writer worker 3] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.462-0400 m31102| 2015-07-09T14:14:31.461-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.473-0400 m31100| 2015-07-09T14:14:31.472-0400 I INDEX [conn151] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.474-0400 m31100| 2015-07-09T14:14:31.473-0400 I INDEX [conn151] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.474-0400 m31200| 2015-07-09T14:14:31.473-0400 I INDEX [conn35] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.475-0400 m31200| 2015-07-09T14:14:31.473-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.475-0400 m31201| 2015-07-09T14:14:31.474-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.477-0400 m31202| 2015-07-09T14:14:31.477-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.477-0400 m31101| 2015-07-09T14:14:31.477-0400 I INDEX [repl writer worker 14] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.478-0400 m31101| 2015-07-09T14:14:31.477-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.485-0400 m31100| 2015-07-09T14:14:31.485-0400 I INDEX [conn151] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.486-0400 m31102| 2015-07-09T14:14:31.485-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.488-0400 m31100| 2015-07-09T14:14:31.486-0400 I COMMAND [conn151] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo0: 1.0 }, name: "foo0_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 232260 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 258ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.493-0400 m31100| 2015-07-09T14:14:31.486-0400 I COMMAND [conn51] command db49.$cmd command: listIndexes { listIndexes: "coll49", cursor: { batchSize: 2.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 exception: [db49.coll49] shard version not ok: version mismatch detected for db49.coll49, stored major version 2 does not match received 1 ( ns : db49.coll49, received : 1|1||559eba05ca4787b9985d1e00, wanted : 2|3||559eba05ca4787b9985d1e00, send ) code:13388 numYields:0 reslen:383 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 205013 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 205ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.495-0400 m31100| 2015-07-09T14:14:31.486-0400 I COMMAND [conn58] command db49.coll49 command: listIndexes { listIndexes: "coll49", cursor: { batchSize: 2.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:312 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 109480 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 109ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.495-0400 m30999| 2015-07-09T14:14:31.488-0400 I SHARDING [conn303] sharded connection to test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.496-0400 m30999| 2015-07-09T14:14:31.488-0400 I SHARDING [conn303] retrying command: { listIndexes: "coll49", cursor: { batchSize: 2.0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.496-0400 m31100| 2015-07-09T14:14:31.486-0400 I COMMAND [conn45] command db49.coll49 command: listIndexes { listIndexes: "coll49", cursor: { batchSize: 2.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:312 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 204570 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 205ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.496-0400 m31100| 2015-07-09T14:14:31.487-0400 I COMMAND [conn50] command db49.coll49 command: listIndexes { listIndexes: "coll49", cursor: { batchSize: 2.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:312 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 203159 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 203ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.497-0400 m31100| 2015-07-09T14:14:31.487-0400 I COMMAND [conn15] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.499-0400 m31100| 2015-07-09T14:14:31.487-0400 I COMMAND [conn49] command db49.coll49 command: listIndexes { listIndexes: "coll49", cursor: { batchSize: 2.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:312 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 141018 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 142ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.499-0400 m31100| 2015-07-09T14:14:31.488-0400 I NETWORK [conn51] end connection 127.0.0.1:62668 (103 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.500-0400 m31200| 2015-07-09T14:14:31.491-0400 I INDEX [conn35] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.501-0400 m31100| 2015-07-09T14:14:31.492-0400 I COMMAND [conn15] command db49.coll49 command: dropIndexes { deleteIndexes: "coll49", index: { foo2: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:114 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 205670 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 209ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.501-0400 m31100| 2015-07-09T14:14:31.492-0400 I COMMAND [conn36] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.501-0400 m31201| 2015-07-09T14:14:31.492-0400 I INDEX [repl writer worker 0] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.502-0400 m31201| 2015-07-09T14:14:31.492-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.502-0400 m31200| 2015-07-09T14:14:31.492-0400 I COMMAND [conn64] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.503-0400 m31200| 2015-07-09T14:14:31.492-0400 I COMMAND [conn35] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo0: 1.0 }, name: "foo0_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 223571 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 264ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.503-0400 m31200| 2015-07-09T14:14:31.493-0400 I COMMAND [conn64] command db49.coll49 command: dropIndexes { deleteIndexes: "coll49", index: { foo2: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:114 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 209849 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 211ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.504-0400 m31100| 2015-07-09T14:14:31.493-0400 I COMMAND [conn36] command db49.coll49 command: dropIndexes { deleteIndexes: "coll49", index: { foo7: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:114 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 176275 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 177ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.504-0400 m31100| 2015-07-09T14:14:31.493-0400 I COMMAND [conn38] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.504-0400 m31200| 2015-07-09T14:14:31.494-0400 I COMMAND [conn65] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.504-0400 m31100| 2015-07-09T14:14:31.498-0400 I COMMAND [conn32] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.505-0400 m31102| 2015-07-09T14:14:31.498-0400 I INDEX [repl writer worker 14] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.505-0400 m31102| 2015-07-09T14:14:31.498-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.505-0400 m31202| 2015-07-09T14:14:31.498-0400 I INDEX [repl writer worker 3] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.505-0400 m31202| 2015-07-09T14:14:31.498-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.506-0400 m31100| 2015-07-09T14:14:31.498-0400 I COMMAND [conn39] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.506-0400 m31200| 2015-07-09T14:14:31.501-0400 I COMMAND [conn65] command db49.coll49 command: dropIndexes { deleteIndexes: "coll49", index: { foo7: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:114 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 177567 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 184ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.507-0400 m31201| 2015-07-09T14:14:31.501-0400 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.507-0400 m31101| 2015-07-09T14:14:31.501-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.507-0400 m31200| 2015-07-09T14:14:31.503-0400 I COMMAND [conn48] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.507-0400 m31102| 2015-07-09T14:14:31.505-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.508-0400 m31200| 2015-07-09T14:14:31.505-0400 I COMMAND [conn47] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.508-0400 m31100| 2015-07-09T14:14:31.505-0400 I COMMAND [conn132] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.509-0400 m31200| 2015-07-09T14:14:31.509-0400 I COMMAND [conn62] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.510-0400 m31202| 2015-07-09T14:14:31.509-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.510-0400 m31100| 2015-07-09T14:14:31.510-0400 I COMMAND [conn34] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.511-0400 m31100| 2015-07-09T14:14:31.511-0400 I COMMAND [conn35] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.513-0400 m31201| 2015-07-09T14:14:31.513-0400 I INDEX [repl writer worker 2] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.514-0400 m31200| 2015-07-09T14:14:31.513-0400 I COMMAND [conn34] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.514-0400 m31100| 2015-07-09T14:14:31.513-0400 I COMMAND [conn15] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.515-0400 m31201| 2015-07-09T14:14:31.513-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.515-0400 m31101| 2015-07-09T14:14:31.513-0400 I INDEX [repl writer worker 12] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.515-0400 m31101| 2015-07-09T14:14:31.513-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.517-0400 m31200| 2015-07-09T14:14:31.517-0400 I COMMAND [conn84] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.517-0400 m31102| 2015-07-09T14:14:31.517-0400 I INDEX [repl writer worker 0] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.517-0400 m31102| 2015-07-09T14:14:31.517-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.518-0400 m31100| 2015-07-09T14:14:31.517-0400 I COMMAND [conn15] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.523-0400 m31101| 2015-07-09T14:14:31.523-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.523-0400 m31200| 2015-07-09T14:14:31.522-0400 I COMMAND [conn85] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.527-0400 m31201| 2015-07-09T14:14:31.526-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.527-0400 m31202| 2015-07-09T14:14:31.526-0400 I INDEX [repl writer worker 9] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.527-0400 m31202| 2015-07-09T14:14:31.527-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.527-0400 m31200| 2015-07-09T14:14:31.526-0400 I COMMAND [conn63] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.528-0400 m31102| 2015-07-09T14:14:31.528-0400 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.530-0400 m31200| 2015-07-09T14:14:31.529-0400 I COMMAND [conn48] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.540-0400 m31102| 2015-07-09T14:14:31.539-0400 I INDEX [repl writer worker 11] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.541-0400 m31102| 2015-07-09T14:14:31.539-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.541-0400 m31101| 2015-07-09T14:14:31.540-0400 I INDEX [repl writer worker 0] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.541-0400 m31101| 2015-07-09T14:14:31.540-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.543-0400 m31202| 2015-07-09T14:14:31.543-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.544-0400 m31202| 2015-07-09T14:14:31.543-0400 I COMMAND [repl writer worker 7] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.545-0400 m31201| 2015-07-09T14:14:31.545-0400 I INDEX [repl writer worker 8] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.545-0400 m31201| 2015-07-09T14:14:31.545-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.551-0400 m31202| 2015-07-09T14:14:31.550-0400 I COMMAND [repl writer worker 15] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.551-0400 m31101| 2015-07-09T14:14:31.550-0400 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.552-0400 m31102| 2015-07-09T14:14:31.552-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.553-0400 m31102| 2015-07-09T14:14:31.552-0400 I COMMAND [repl writer worker 1] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.553-0400 m31202| 2015-07-09T14:14:31.553-0400 I COMMAND [repl writer worker 12] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.556-0400 m31202| 2015-07-09T14:14:31.555-0400 I COMMAND [repl writer worker 14] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.556-0400 m31102| 2015-07-09T14:14:31.555-0400 I COMMAND [repl writer worker 10] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.557-0400 m31201| 2015-07-09T14:14:31.556-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.558-0400 m31202| 2015-07-09T14:14:31.557-0400 I COMMAND [repl writer worker 8] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.558-0400 m31102| 2015-07-09T14:14:31.557-0400 I COMMAND [repl writer worker 2] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.559-0400 m31201| 2015-07-09T14:14:31.559-0400 I COMMAND [repl writer worker 10] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.560-0400 m31101| 2015-07-09T14:14:31.559-0400 I INDEX [repl writer worker 4] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.561-0400 m31101| 2015-07-09T14:14:31.559-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.561-0400 m31201| 2015-07-09T14:14:31.560-0400 I COMMAND [repl writer worker 9] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.561-0400 m31102| 2015-07-09T14:14:31.561-0400 I COMMAND [repl writer worker 4] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.562-0400 m31202| 2015-07-09T14:14:31.562-0400 I COMMAND [repl writer worker 11] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.562-0400 m31201| 2015-07-09T14:14:31.562-0400 I COMMAND [repl writer worker 5] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.562-0400 m31102| 2015-07-09T14:14:31.562-0400 I COMMAND [repl writer worker 5] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.564-0400 m31202| 2015-07-09T14:14:31.563-0400 I COMMAND [repl writer worker 6] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.564-0400 m31201| 2015-07-09T14:14:31.563-0400 I COMMAND [repl writer worker 3] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.565-0400 m31102| 2015-07-09T14:14:31.565-0400 I COMMAND [repl writer worker 9] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.566-0400 m31201| 2015-07-09T14:14:31.565-0400 I COMMAND [repl writer worker 15] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.566-0400 m31201| 2015-07-09T14:14:31.566-0400 I COMMAND [repl writer worker 6] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.567-0400 m31101| 2015-07-09T14:14:31.567-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.568-0400 m31102| 2015-07-09T14:14:31.567-0400 I COMMAND [repl writer worker 8] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.568-0400 m31101| 2015-07-09T14:14:31.567-0400 I COMMAND [repl writer worker 3] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.568-0400 m31202| 2015-07-09T14:14:31.568-0400 I COMMAND [repl writer worker 10] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.569-0400 m31201| 2015-07-09T14:14:31.568-0400 I COMMAND [repl writer worker 1] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.569-0400 m31102| 2015-07-09T14:14:31.568-0400 I COMMAND [repl writer worker 15] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.569-0400 m31101| 2015-07-09T14:14:31.569-0400 I COMMAND [repl writer worker 7] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.570-0400 m31202| 2015-07-09T14:14:31.569-0400 I COMMAND [repl writer worker 5] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.570-0400 m31102| 2015-07-09T14:14:31.570-0400 I COMMAND [repl writer worker 7] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.570-0400 m31201| 2015-07-09T14:14:31.570-0400 I COMMAND [repl writer worker 7] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.570-0400 m31101| 2015-07-09T14:14:31.570-0400 I COMMAND [repl writer worker 13] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.571-0400 m31202| 2015-07-09T14:14:31.571-0400 I COMMAND [repl writer worker 1] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.571-0400 m31101| 2015-07-09T14:14:31.571-0400 I COMMAND [repl writer worker 8] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.572-0400 m31201| 2015-07-09T14:14:31.572-0400 I COMMAND [repl writer worker 4] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.572-0400 m31101| 2015-07-09T14:14:31.572-0400 I COMMAND [repl writer worker 15] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.572-0400 m31201| 2015-07-09T14:14:31.572-0400 I COMMAND [repl writer worker 14] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.574-0400 m31101| 2015-07-09T14:14:31.573-0400 I COMMAND [repl writer worker 9] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.575-0400 m31101| 2015-07-09T14:14:31.574-0400 I COMMAND [repl writer worker 5] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.576-0400 m31101| 2015-07-09T14:14:31.576-0400 I COMMAND [repl writer worker 2] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.578-0400 m31101| 2015-07-09T14:14:31.577-0400 I COMMAND [repl writer worker 11] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.602-0400 m31200| 2015-07-09T14:14:31.601-0400 I INDEX [conn81] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.603-0400 m31200| 2015-07-09T14:14:31.601-0400 I INDEX [conn81] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.603-0400 m31100| 2015-07-09T14:14:31.602-0400 I INDEX [conn56] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.604-0400 m31100| 2015-07-09T14:14:31.603-0400 I INDEX [conn56] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.608-0400 m31200| 2015-07-09T14:14:31.608-0400 I INDEX [conn81] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.611-0400 m31100| 2015-07-09T14:14:31.611-0400 I INDEX [conn56] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.613-0400 m31102| 2015-07-09T14:14:31.613-0400 I COMMAND [repl writer worker 12] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.616-0400 m31200| 2015-07-09T14:14:31.615-0400 I INDEX [conn80] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.616-0400 m31200| 2015-07-09T14:14:31.615-0400 I INDEX [conn80] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.616-0400 m31101| 2015-07-09T14:14:31.615-0400 I COMMAND [repl writer worker 1] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.620-0400 m31201| 2015-07-09T14:14:31.620-0400 I INDEX [repl writer worker 12] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.621-0400 m31201| 2015-07-09T14:14:31.620-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.622-0400 m31100| 2015-07-09T14:14:31.621-0400 I INDEX [conn49] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.622-0400 m31100| 2015-07-09T14:14:31.622-0400 I INDEX [conn49] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.629-0400 m31202| 2015-07-09T14:14:31.628-0400 I INDEX [repl writer worker 0] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.629-0400 m31202| 2015-07-09T14:14:31.628-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.630-0400 m31102| 2015-07-09T14:14:31.629-0400 I INDEX [repl writer worker 13] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.630-0400 m31102| 2015-07-09T14:14:31.629-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.634-0400 m31200| 2015-07-09T14:14:31.633-0400 I INDEX [conn80] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.634-0400 m31101| 2015-07-09T14:14:31.633-0400 I INDEX [repl writer worker 10] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.634-0400 m31101| 2015-07-09T14:14:31.633-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.642-0400 m31100| 2015-07-09T14:14:31.642-0400 I INDEX [conn49] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.643-0400 m31201| 2015-07-09T14:14:31.642-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.645-0400 m31202| 2015-07-09T14:14:31.644-0400 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.648-0400 m31102| 2015-07-09T14:14:31.648-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.649-0400 m31101| 2015-07-09T14:14:31.648-0400 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.658-0400 m31200| 2015-07-09T14:14:31.656-0400 I INDEX [conn35] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.658-0400 m31200| 2015-07-09T14:14:31.656-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.659-0400 m31201| 2015-07-09T14:14:31.658-0400 I INDEX [repl writer worker 11] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.659-0400 m31201| 2015-07-09T14:14:31.658-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.660-0400 m31100| 2015-07-09T14:14:31.659-0400 I INDEX [conn151] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.660-0400 m31100| 2015-07-09T14:14:31.659-0400 I INDEX [conn151] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.661-0400 m31102| 2015-07-09T14:14:31.660-0400 I INDEX [repl writer worker 6] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.661-0400 m31102| 2015-07-09T14:14:31.660-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.661-0400 m31101| 2015-07-09T14:14:31.661-0400 I INDEX [repl writer worker 6] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.662-0400 m31202| 2015-07-09T14:14:31.661-0400 I INDEX [repl writer worker 13] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.662-0400 m31101| 2015-07-09T14:14:31.661-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.662-0400 m31202| 2015-07-09T14:14:31.661-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.667-0400 m31200| 2015-07-09T14:14:31.666-0400 I INDEX [conn35] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.673-0400 m31201| 2015-07-09T14:14:31.672-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.676-0400 m31100| 2015-07-09T14:14:31.675-0400 I INDEX [conn151] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.676-0400 m31101| 2015-07-09T14:14:31.675-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.678-0400 m31102| 2015-07-09T14:14:31.678-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.680-0400 m31202| 2015-07-09T14:14:31.680-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.684-0400 m31201| 2015-07-09T14:14:31.682-0400 I INDEX [repl writer worker 13] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.684-0400 m31201| 2015-07-09T14:14:31.682-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.687-0400 m31200| 2015-07-09T14:14:31.686-0400 I INDEX [conn28] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.687-0400 m31200| 2015-07-09T14:14:31.686-0400 I INDEX [conn28] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.688-0400 m31100| 2015-07-09T14:14:31.687-0400 I INDEX [conn50] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.688-0400 m31100| 2015-07-09T14:14:31.687-0400 I INDEX [conn50] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.690-0400 m31101| 2015-07-09T14:14:31.690-0400 I INDEX [repl writer worker 14] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.691-0400 m31101| 2015-07-09T14:14:31.690-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.695-0400 m31200| 2015-07-09T14:14:31.693-0400 I INDEX [conn28] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.696-0400 m31202| 2015-07-09T14:14:31.695-0400 I INDEX [repl writer worker 4] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.696-0400 m31202| 2015-07-09T14:14:31.695-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.698-0400 m31102| 2015-07-09T14:14:31.698-0400 I INDEX [repl writer worker 3] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.698-0400 m31102| 2015-07-09T14:14:31.698-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.705-0400 m31100| 2015-07-09T14:14:31.704-0400 I INDEX [conn50] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.705-0400 m31101| 2015-07-09T14:14:31.704-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.710-0400 m31201| 2015-07-09T14:14:31.709-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.710-0400 m31200| 2015-07-09T14:14:31.709-0400 I INDEX [conn52] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.710-0400 m31200| 2015-07-09T14:14:31.709-0400 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.711-0400 m31102| 2015-07-09T14:14:31.710-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.720-0400 m31100| 2015-07-09T14:14:31.719-0400 I INDEX [conn58] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.720-0400 m31100| 2015-07-09T14:14:31.719-0400 I INDEX [conn58] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.720-0400 m31202| 2015-07-09T14:14:31.719-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.720-0400 m31101| 2015-07-09T14:14:31.719-0400 I INDEX [repl writer worker 12] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.721-0400 m31101| 2015-07-09T14:14:31.719-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.725-0400 m31200| 2015-07-09T14:14:31.724-0400 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.726-0400 m31200| 2015-07-09T14:14:31.725-0400 I COMMAND [conn52] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo3: 1.0 }, name: "foo3_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 78936 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 109ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.729-0400 m31102| 2015-07-09T14:14:31.728-0400 I INDEX [repl writer worker 14] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.729-0400 m31102| 2015-07-09T14:14:31.728-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.734-0400 m31201| 2015-07-09T14:14:31.733-0400 I INDEX [repl writer worker 0] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.734-0400 m31201| 2015-07-09T14:14:31.733-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.745-0400 m31100| 2015-07-09T14:14:31.740-0400 I INDEX [conn58] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.748-0400 m31100| 2015-07-09T14:14:31.741-0400 I COMMAND [conn58] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo3: 1.0 }, name: "foo3_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 88998 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 125ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.748-0400 m31200| 2015-07-09T14:14:31.742-0400 I INDEX [conn60] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.748-0400 m31200| 2015-07-09T14:14:31.743-0400 I INDEX [conn60] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.749-0400 m31100| 2015-07-09T14:14:31.743-0400 I COMMAND [conn56] command db49.coll49 command: listIndexes { listIndexes: "coll49", cursor: { batchSize: 2.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:312 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 126592 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 126ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.750-0400 m31101| 2015-07-09T14:14:31.743-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.750-0400 m30999| 2015-07-09T14:14:31.743-0400 I SHARDING [conn306] sharded connection to test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.750-0400 m30999| 2015-07-09T14:14:31.743-0400 I SHARDING [conn306] retrying command: { listIndexes: "coll49", cursor: { batchSize: 2.0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.753-0400 m31202| 2015-07-09T14:14:31.744-0400 I INDEX [repl writer worker 2] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.754-0400 m31202| 2015-07-09T14:14:31.744-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.754-0400 m31100| 2015-07-09T14:14:31.744-0400 I NETWORK [conn151] end connection 127.0.0.1:63445 (102 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.755-0400 m31102| 2015-07-09T14:14:31.748-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.755-0400 m31201| 2015-07-09T14:14:31.748-0400 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.756-0400 m31202| 2015-07-09T14:14:31.756-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.757-0400 m31100| 2015-07-09T14:14:31.756-0400 I INDEX [conn71] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.757-0400 m31100| 2015-07-09T14:14:31.756-0400 I INDEX [conn71] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.766-0400 m31200| 2015-07-09T14:14:31.765-0400 I INDEX [conn60] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.769-0400 m31200| 2015-07-09T14:14:31.765-0400 I COMMAND [conn60] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo5: 1.0 }, name: "foo5_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 106137 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 146ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.769-0400 m31101| 2015-07-09T14:14:31.767-0400 I INDEX [repl writer worker 0] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.770-0400 m31101| 2015-07-09T14:14:31.767-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.770-0400 m31202| 2015-07-09T14:14:31.767-0400 I INDEX [repl writer worker 3] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.771-0400 m31202| 2015-07-09T14:14:31.767-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.772-0400 m31201| 2015-07-09T14:14:31.767-0400 I INDEX [repl writer worker 2] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.772-0400 m31201| 2015-07-09T14:14:31.767-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.772-0400 m31102| 2015-07-09T14:14:31.769-0400 I INDEX [repl writer worker 0] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.773-0400 m31102| 2015-07-09T14:14:31.769-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.774-0400 m31100| 2015-07-09T14:14:31.773-0400 I INDEX [conn71] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.776-0400 m31100| 2015-07-09T14:14:31.774-0400 I COMMAND [conn71] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo5: 1.0 }, name: "foo5_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 124178 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 154ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.776-0400 m31200| 2015-07-09T14:14:31.774-0400 I INDEX [conn81] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.777-0400 m31200| 2015-07-09T14:14:31.774-0400 I INDEX [conn81] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.781-0400 m31201| 2015-07-09T14:14:31.780-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.783-0400 m31101| 2015-07-09T14:14:31.780-0400 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.785-0400 m31102| 2015-07-09T14:14:31.784-0400 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.794-0400 m31202| 2015-07-09T14:14:31.794-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.797-0400 m31200| 2015-07-09T14:14:31.797-0400 I INDEX [conn81] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.800-0400 m31200| 2015-07-09T14:14:31.797-0400 I COMMAND [conn81] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo4: 1.0 }, name: "foo4_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 140938 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 172ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.801-0400 m31100| 2015-07-09T14:14:31.800-0400 I INDEX [conn45] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.801-0400 m31100| 2015-07-09T14:14:31.801-0400 I INDEX [conn45] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.807-0400 m31101| 2015-07-09T14:14:31.806-0400 I INDEX [repl writer worker 4] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.807-0400 m31101| 2015-07-09T14:14:31.806-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.808-0400 m31201| 2015-07-09T14:14:31.806-0400 I INDEX [repl writer worker 8] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.809-0400 m31201| 2015-07-09T14:14:31.807-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.809-0400 m31102| 2015-07-09T14:14:31.807-0400 I INDEX [repl writer worker 11] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.809-0400 m31102| 2015-07-09T14:14:31.807-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.815-0400 m31200| 2015-07-09T14:14:31.814-0400 I INDEX [conn38] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.815-0400 m31200| 2015-07-09T14:14:31.814-0400 I INDEX [conn38] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.822-0400 m31100| 2015-07-09T14:14:31.821-0400 I INDEX [conn45] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.823-0400 m31101| 2015-07-09T14:14:31.821-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.823-0400 m31201| 2015-07-09T14:14:31.822-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.824-0400 m31100| 2015-07-09T14:14:31.822-0400 I COMMAND [conn45] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo4: 1.0 }, name: "foo4_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 149543 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 197ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.825-0400 m31202| 2015-07-09T14:14:31.822-0400 I INDEX [repl writer worker 9] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.826-0400 m31202| 2015-07-09T14:14:31.822-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.826-0400 m31102| 2015-07-09T14:14:31.825-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.831-0400 m31200| 2015-07-09T14:14:31.831-0400 I INDEX [conn38] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.833-0400 m31200| 2015-07-09T14:14:31.832-0400 I COMMAND [conn38] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo9: 1.0 }, name: "foo9_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 168270 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 202ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.838-0400 m31101| 2015-07-09T14:14:31.837-0400 I INDEX [repl writer worker 3] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.838-0400 m31101| 2015-07-09T14:14:31.837-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.839-0400 m31100| 2015-07-09T14:14:31.838-0400 I INDEX [conn73] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.839-0400 m31100| 2015-07-09T14:14:31.838-0400 I INDEX [conn73] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.840-0400 m31201| 2015-07-09T14:14:31.839-0400 I INDEX [repl writer worker 10] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.841-0400 m31201| 2015-07-09T14:14:31.839-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.841-0400 m31202| 2015-07-09T14:14:31.839-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.847-0400 m31102| 2015-07-09T14:14:31.845-0400 I INDEX [repl writer worker 1] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.847-0400 m31102| 2015-07-09T14:14:31.845-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.847-0400 m31201| 2015-07-09T14:14:31.846-0400 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.853-0400 m31101| 2015-07-09T14:14:31.852-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.857-0400 m31100| 2015-07-09T14:14:31.857-0400 I INDEX [conn73] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.858-0400 m31200| 2015-07-09T14:14:31.858-0400 I INDEX [conn137] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.859-0400 m31200| 2015-07-09T14:14:31.858-0400 I INDEX [conn137] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.859-0400 m31100| 2015-07-09T14:14:31.858-0400 I COMMAND [conn73] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo9: 1.0 }, name: "foo9_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 193058 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 228ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.863-0400 m31202| 2015-07-09T14:14:31.863-0400 I INDEX [repl writer worker 7] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.863-0400 m31202| 2015-07-09T14:14:31.863-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.866-0400 m31102| 2015-07-09T14:14:31.865-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.869-0400 m31100| 2015-07-09T14:14:31.868-0400 I INDEX [conn57] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.869-0400 m31200| 2015-07-09T14:14:31.868-0400 I INDEX [conn137] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.870-0400 m31100| 2015-07-09T14:14:31.868-0400 I INDEX [conn57] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.871-0400 m31201| 2015-07-09T14:14:31.868-0400 I INDEX [repl writer worker 9] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.873-0400 m31200| 2015-07-09T14:14:31.869-0400 I COMMAND [conn137] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo0: 1.0 }, name: "foo0_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 198801 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 235ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.874-0400 m31201| 2015-07-09T14:14:31.868-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.874-0400 m31202| 2015-07-09T14:14:31.874-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.878-0400 m31101| 2015-07-09T14:14:31.874-0400 I INDEX [repl writer worker 7] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.880-0400 m31101| 2015-07-09T14:14:31.874-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.881-0400 m31102| 2015-07-09T14:14:31.876-0400 I INDEX [repl writer worker 10] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.881-0400 m31102| 2015-07-09T14:14:31.876-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.882-0400 m31200| 2015-07-09T14:14:31.876-0400 I INDEX [conn30] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.882-0400 m31200| 2015-07-09T14:14:31.876-0400 I INDEX [conn30] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.882-0400 m31201| 2015-07-09T14:14:31.878-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.887-0400 m31100| 2015-07-09T14:14:31.886-0400 I INDEX [conn57] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.887-0400 m31202| 2015-07-09T14:14:31.887-0400 I INDEX [repl writer worker 15] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.887-0400 m31202| 2015-07-09T14:14:31.887-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.890-0400 m31100| 2015-07-09T14:14:31.888-0400 I COMMAND [conn57] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo0: 1.0 }, name: "foo0_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 225096 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 255ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.894-0400 m31200| 2015-07-09T14:14:31.893-0400 I INDEX [conn30] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.895-0400 m31101| 2015-07-09T14:14:31.893-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.895-0400 m31200| 2015-07-09T14:14:31.894-0400 I COMMAND [conn85] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.896-0400 m31200| 2015-07-09T14:14:31.894-0400 I COMMAND [conn30] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo6: 1.0 }, name: "foo6_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 235349 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 260ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.897-0400 m31200| 2015-07-09T14:14:31.895-0400 I COMMAND [conn34] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.899-0400 m31200| 2015-07-09T14:14:31.895-0400 I COMMAND [conn85] command db49.coll49 command: dropIndexes { deleteIndexes: "coll49", index: { foo3: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:114 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 149782 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 151ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.899-0400 m31102| 2015-07-09T14:14:31.896-0400 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.900-0400 m31200| 2015-07-09T14:14:31.898-0400 I COMMAND [conn34] command db49.coll49 command: dropIndexes { deleteIndexes: "coll49", index: { foo1: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:114 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 149798 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 152ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.900-0400 m31201| 2015-07-09T14:14:31.898-0400 I INDEX [repl writer worker 5] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.901-0400 m31201| 2015-07-09T14:14:31.898-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.901-0400 m31200| 2015-07-09T14:14:31.898-0400 I COMMAND [conn48] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.903-0400 m31100| 2015-07-09T14:14:31.901-0400 I INDEX [conn59] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.903-0400 m31100| 2015-07-09T14:14:31.901-0400 I INDEX [conn59] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.904-0400 m31200| 2015-07-09T14:14:31.901-0400 I COMMAND [conn48] command db49.coll49 command: dropIndexes { deleteIndexes: "coll49", index: { foo2: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:114 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 151737 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 154ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.904-0400 m31200| 2015-07-09T14:14:31.901-0400 I COMMAND [conn63] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.905-0400 m31202| 2015-07-09T14:14:31.905-0400 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.907-0400 m31102| 2015-07-09T14:14:31.905-0400 I INDEX [repl writer worker 2] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.907-0400 m31102| 2015-07-09T14:14:31.905-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.911-0400 m31101| 2015-07-09T14:14:31.911-0400 I INDEX [repl writer worker 13] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.912-0400 m31101| 2015-07-09T14:14:31.911-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.915-0400 m31201| 2015-07-09T14:14:31.914-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.919-0400 m31100| 2015-07-09T14:14:31.919-0400 I INDEX [conn59] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.920-0400 m31100| 2015-07-09T14:14:31.919-0400 I COMMAND [conn59] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo6: 1.0 }, name: "foo6_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 254836 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 286ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.921-0400 m31100| 2015-07-09T14:14:31.919-0400 I COMMAND [conn39] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.924-0400 m31100| 2015-07-09T14:14:31.923-0400 I COMMAND [conn39] command db49.coll49 command: dropIndexes { deleteIndexes: "coll49", index: { foo3: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:114 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 175044 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 178ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.924-0400 m31100| 2015-07-09T14:14:31.923-0400 I COMMAND [conn35] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.926-0400 m31202| 2015-07-09T14:14:31.924-0400 I INDEX [repl writer worker 12] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.927-0400 m31202| 2015-07-09T14:14:31.924-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.929-0400 m31201| 2015-07-09T14:14:31.924-0400 I INDEX [repl writer worker 3] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.929-0400 m31201| 2015-07-09T14:14:31.924-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.930-0400 m31101| 2015-07-09T14:14:31.925-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.930-0400 m31102| 2015-07-09T14:14:31.925-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.932-0400 m31100| 2015-07-09T14:14:31.925-0400 I COMMAND [conn35] command db49.coll49 command: dropIndexes { deleteIndexes: "coll49", index: { foo1: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:114 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 177084 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 179ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.936-0400 m31100| 2015-07-09T14:14:31.925-0400 I COMMAND [conn58] command db49.coll49 command: listIndexes { listIndexes: "coll49", cursor: { batchSize: 2.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:312 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 179616 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 179ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.937-0400 m31100| 2015-07-09T14:14:31.926-0400 I COMMAND [conn71] command db49.$cmd command: listIndexes { listIndexes: "coll49", cursor: { batchSize: 2.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 exception: [db49.coll49] shard version not ok: version epoch mismatch detected for db49.coll49, the collection may have been dropped and recreated ( ns : db49.coll49, received : 0|0||000000000000000000000000, wanted : 2|3||559eba05ca4787b9985d1e00, send ) code:13388 numYields:0 reslen:391 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 150610 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 150ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.937-0400 m31100| 2015-07-09T14:14:31.926-0400 I COMMAND [conn56] command db49.coll49 command: listIndexes { listIndexes: "coll49", cursor: { batchSize: 2.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:312 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 179136 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 179ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.938-0400 m30999| 2015-07-09T14:14:31.926-0400 I SHARDING [conn303] sharded connection to test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.938-0400 m30999| 2015-07-09T14:14:31.926-0400 I SHARDING [conn303] retrying command: { listIndexes: "coll49", cursor: { batchSize: 2.0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.939-0400 m31100| 2015-07-09T14:14:31.926-0400 I NETWORK [conn59] end connection 127.0.0.1:62752 (101 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.939-0400 m30998| 2015-07-09T14:14:31.926-0400 I SHARDING [conn303] sharded connection to test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.939-0400 m30998| 2015-07-09T14:14:31.926-0400 I SHARDING [conn303] retrying command: { listIndexes: "coll49", cursor: { batchSize: 2.0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.940-0400 m31100| 2015-07-09T14:14:31.927-0400 I NETWORK [conn71] end connection 127.0.0.1:62807 (100 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.940-0400 m31100| 2015-07-09T14:14:31.927-0400 I COMMAND [conn15] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.940-0400 m31200| 2015-07-09T14:14:31.929-0400 I COMMAND [conn84] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.940-0400 m31100| 2015-07-09T14:14:31.932-0400 I COMMAND [conn34] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.942-0400 m31202| 2015-07-09T14:14:31.932-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.943-0400 m31100| 2015-07-09T14:14:31.932-0400 I COMMAND [conn15] command db49.coll49 command: dropIndexes { deleteIndexes: "coll49", index: { foo2: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:114 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 180607 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 185ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.945-0400 m31201| 2015-07-09T14:14:31.933-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.945-0400 m31200| 2015-07-09T14:14:31.935-0400 I COMMAND [conn34] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.946-0400 m31100| 2015-07-09T14:14:31.935-0400 I COMMAND [conn38] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.946-0400 m31201| 2015-07-09T14:14:31.936-0400 I COMMAND [repl writer worker 15] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.946-0400 m31102| 2015-07-09T14:14:31.937-0400 I INDEX [repl writer worker 4] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.946-0400 m31102| 2015-07-09T14:14:31.937-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.947-0400 m31100| 2015-07-09T14:14:31.938-0400 I COMMAND [conn39] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.947-0400 m31200| 2015-07-09T14:14:31.943-0400 I COMMAND [conn64] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.947-0400 m31101| 2015-07-09T14:14:31.943-0400 I INDEX [repl writer worker 8] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.947-0400 m31101| 2015-07-09T14:14:31.943-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.948-0400 m31202| 2015-07-09T14:14:31.943-0400 I INDEX [repl writer worker 14] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.948-0400 m31202| 2015-07-09T14:14:31.943-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.948-0400 m31201| 2015-07-09T14:14:31.943-0400 I COMMAND [repl writer worker 6] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.948-0400 m31100| 2015-07-09T14:14:31.947-0400 I COMMAND [conn37] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.948-0400 m31100| 2015-07-09T14:14:31.948-0400 I COMMAND [conn35] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.949-0400 m31201| 2015-07-09T14:14:31.948-0400 I COMMAND [repl writer worker 1] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.950-0400 m31100| 2015-07-09T14:14:31.949-0400 I COMMAND [conn132] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.950-0400 m31101| 2015-07-09T14:14:31.949-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.951-0400 m31102| 2015-07-09T14:14:31.951-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.951-0400 m31101| 2015-07-09T14:14:31.951-0400 I COMMAND [repl writer worker 15] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.952-0400 m31202| 2015-07-09T14:14:31.951-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.952-0400 m31100| 2015-07-09T14:14:31.951-0400 I COMMAND [conn34] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.952-0400 m31200| 2015-07-09T14:14:31.952-0400 I COMMAND [conn85] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.954-0400 m31201| 2015-07-09T14:14:31.953-0400 I COMMAND [repl writer worker 7] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.954-0400 m31202| 2015-07-09T14:14:31.953-0400 I COMMAND [repl writer worker 8] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.954-0400 m31102| 2015-07-09T14:14:31.954-0400 I COMMAND [repl writer worker 5] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.954-0400 m31101| 2015-07-09T14:14:31.954-0400 I COMMAND [repl writer worker 9] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.955-0400 m31201| 2015-07-09T14:14:31.954-0400 I COMMAND [repl writer worker 4] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.955-0400 m31200| 2015-07-09T14:14:31.955-0400 I COMMAND [conn62] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.956-0400 m31102| 2015-07-09T14:14:31.955-0400 I COMMAND [repl writer worker 9] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.956-0400 m31101| 2015-07-09T14:14:31.956-0400 I COMMAND [repl writer worker 5] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.956-0400 m31201| 2015-07-09T14:14:31.956-0400 I COMMAND [repl writer worker 14] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.957-0400 m31200| 2015-07-09T14:14:31.956-0400 I COMMAND [conn63] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.957-0400 m31202| 2015-07-09T14:14:31.957-0400 I COMMAND [repl writer worker 11] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.957-0400 m31201| 2015-07-09T14:14:31.957-0400 I COMMAND [repl writer worker 12] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.958-0400 m31202| 2015-07-09T14:14:31.958-0400 I COMMAND [repl writer worker 6] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.958-0400 m31101| 2015-07-09T14:14:31.958-0400 I COMMAND [repl writer worker 2] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.958-0400 m31201| 2015-07-09T14:14:31.958-0400 I COMMAND [repl writer worker 11] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.959-0400 m31102| 2015-07-09T14:14:31.959-0400 I COMMAND [repl writer worker 8] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.959-0400 m31101| 2015-07-09T14:14:31.959-0400 I COMMAND [repl writer worker 11] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.959-0400 m31202| 2015-07-09T14:14:31.959-0400 I COMMAND [repl writer worker 10] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.961-0400 m31201| 2015-07-09T14:14:31.960-0400 I COMMAND [repl writer worker 13] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.961-0400 m31101| 2015-07-09T14:14:31.960-0400 I COMMAND [repl writer worker 1] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.961-0400 m31102| 2015-07-09T14:14:31.960-0400 I COMMAND [repl writer worker 15] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.961-0400 m31202| 2015-07-09T14:14:31.961-0400 I COMMAND [repl writer worker 5] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.962-0400 m31202| 2015-07-09T14:14:31.962-0400 I COMMAND [repl writer worker 1] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.962-0400 m31102| 2015-07-09T14:14:31.962-0400 I COMMAND [repl writer worker 7] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.962-0400 m31201| 2015-07-09T14:14:31.962-0400 I COMMAND [repl writer worker 0] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.963-0400 m31101| 2015-07-09T14:14:31.962-0400 I COMMAND [repl writer worker 10] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.963-0400 m31102| 2015-07-09T14:14:31.963-0400 I COMMAND [repl writer worker 12] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.963-0400 m31202| 2015-07-09T14:14:31.963-0400 I COMMAND [repl writer worker 0] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.963-0400 m31101| 2015-07-09T14:14:31.963-0400 I COMMAND [repl writer worker 6] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.964-0400 m31101| 2015-07-09T14:14:31.964-0400 I COMMAND [repl writer worker 14] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.964-0400 m31202| 2015-07-09T14:14:31.964-0400 I COMMAND [repl writer worker 13] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.965-0400 m31102| 2015-07-09T14:14:31.964-0400 I COMMAND [repl writer worker 13] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.965-0400 m31102| 2015-07-09T14:14:31.965-0400 I COMMAND [repl writer worker 6] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.965-0400 m31202| 2015-07-09T14:14:31.965-0400 I COMMAND [repl writer worker 4] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.965-0400 m31101| 2015-07-09T14:14:31.965-0400 I COMMAND [repl writer worker 12] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.966-0400 m31202| 2015-07-09T14:14:31.966-0400 I COMMAND [repl writer worker 2] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.966-0400 m31102| 2015-07-09T14:14:31.966-0400 I COMMAND [repl writer worker 3] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:31.967-0400 m31102| 2015-07-09T14:14:31.967-0400 I COMMAND [repl writer worker 14] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.032-0400 m31200| 2015-07-09T14:14:32.031-0400 I INDEX [conn38] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.032-0400 m31200| 2015-07-09T14:14:32.031-0400 I INDEX [conn38] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.033-0400 m31100| 2015-07-09T14:14:32.032-0400 I INDEX [conn58] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.033-0400 m31100| 2015-07-09T14:14:32.032-0400 I INDEX [conn58] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.038-0400 m31200| 2015-07-09T14:14:32.037-0400 I INDEX [conn38] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.041-0400 m31100| 2015-07-09T14:14:32.040-0400 I INDEX [conn58] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.045-0400 m31200| 2015-07-09T14:14:32.045-0400 I INDEX [conn60] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.046-0400 m31200| 2015-07-09T14:14:32.045-0400 I INDEX [conn60] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.047-0400 m31202| 2015-07-09T14:14:32.047-0400 I INDEX [repl writer worker 3] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.048-0400 m31202| 2015-07-09T14:14:32.047-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.049-0400 m31201| 2015-07-09T14:14:32.049-0400 I INDEX [repl writer worker 2] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.050-0400 m31201| 2015-07-09T14:14:32.049-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.050-0400 m31100| 2015-07-09T14:14:32.049-0400 I INDEX [conn73] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.050-0400 m31100| 2015-07-09T14:14:32.049-0400 I INDEX [conn73] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.055-0400 m31102| 2015-07-09T14:14:32.054-0400 I INDEX [repl writer worker 0] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.055-0400 m31102| 2015-07-09T14:14:32.054-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.056-0400 m31101| 2015-07-09T14:14:32.054-0400 I INDEX [repl writer worker 0] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.056-0400 m31101| 2015-07-09T14:14:32.054-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.063-0400 m31200| 2015-07-09T14:14:32.063-0400 I INDEX [conn60] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.063-0400 m31201| 2015-07-09T14:14:32.063-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.069-0400 m31202| 2015-07-09T14:14:32.068-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.070-0400 m31101| 2015-07-09T14:14:32.068-0400 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.072-0400 m31100| 2015-07-09T14:14:32.072-0400 I INDEX [conn73] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.073-0400 m31102| 2015-07-09T14:14:32.072-0400 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.076-0400 m31201| 2015-07-09T14:14:32.074-0400 I INDEX [repl writer worker 8] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.076-0400 m31201| 2015-07-09T14:14:32.074-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.077-0400 m31200| 2015-07-09T14:14:32.074-0400 I INDEX [conn35] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.077-0400 m31200| 2015-07-09T14:14:32.074-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.081-0400 m31200| 2015-07-09T14:14:32.081-0400 I INDEX [conn35] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.082-0400 m31202| 2015-07-09T14:14:32.081-0400 I INDEX [repl writer worker 9] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.082-0400 m31202| 2015-07-09T14:14:32.081-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.095-0400 m31101| 2015-07-09T14:14:32.094-0400 I INDEX [repl writer worker 4] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.096-0400 m31201| 2015-07-09T14:14:32.094-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.096-0400 m31101| 2015-07-09T14:14:32.094-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.097-0400 m31102| 2015-07-09T14:14:32.096-0400 I INDEX [repl writer worker 11] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.097-0400 m31102| 2015-07-09T14:14:32.096-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.101-0400 m31202| 2015-07-09T14:14:32.101-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.102-0400 m31100| 2015-07-09T14:14:32.101-0400 I INDEX [conn56] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.102-0400 m31100| 2015-07-09T14:14:32.101-0400 I INDEX [conn56] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.108-0400 m31200| 2015-07-09T14:14:32.108-0400 I INDEX [conn30] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.109-0400 m31200| 2015-07-09T14:14:32.108-0400 I INDEX [conn30] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.115-0400 m31102| 2015-07-09T14:14:32.112-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.116-0400 m31100| 2015-07-09T14:14:32.112-0400 I INDEX [conn56] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.116-0400 m31201| 2015-07-09T14:14:32.113-0400 I INDEX [repl writer worker 10] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.116-0400 m31201| 2015-07-09T14:14:32.113-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.116-0400 m31202| 2015-07-09T14:14:32.114-0400 I INDEX [repl writer worker 7] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.116-0400 m31202| 2015-07-09T14:14:32.114-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.117-0400 m31101| 2015-07-09T14:14:32.116-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.119-0400 m31200| 2015-07-09T14:14:32.118-0400 I INDEX [conn30] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.122-0400 m31100| 2015-07-09T14:14:32.122-0400 I INDEX [conn45] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.122-0400 m31100| 2015-07-09T14:14:32.122-0400 I INDEX [conn45] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.127-0400 m31201| 2015-07-09T14:14:32.126-0400 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.127-0400 m31202| 2015-07-09T14:14:32.126-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.127-0400 m31102| 2015-07-09T14:14:32.126-0400 I INDEX [repl writer worker 1] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.128-0400 m31102| 2015-07-09T14:14:32.126-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.131-0400 m31200| 2015-07-09T14:14:32.131-0400 I INDEX [conn137] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.131-0400 m31200| 2015-07-09T14:14:32.131-0400 I INDEX [conn137] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.133-0400 m31100| 2015-07-09T14:14:32.132-0400 I INDEX [conn45] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.134-0400 m31101| 2015-07-09T14:14:32.134-0400 I INDEX [repl writer worker 3] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.134-0400 m31101| 2015-07-09T14:14:32.134-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.138-0400 m31102| 2015-07-09T14:14:32.138-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.141-0400 m31202| 2015-07-09T14:14:32.141-0400 I INDEX [repl writer worker 15] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.142-0400 m31202| 2015-07-09T14:14:32.141-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.142-0400 m31201| 2015-07-09T14:14:32.141-0400 I INDEX [repl writer worker 9] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.142-0400 m31201| 2015-07-09T14:14:32.141-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.143-0400 m31200| 2015-07-09T14:14:32.141-0400 I INDEX [conn137] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.143-0400 m31100| 2015-07-09T14:14:32.142-0400 I INDEX [conn57] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.143-0400 m31100| 2015-07-09T14:14:32.142-0400 I INDEX [conn57] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.147-0400 m31101| 2015-07-09T14:14:32.146-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.149-0400 m31102| 2015-07-09T14:14:32.148-0400 I INDEX [repl writer worker 10] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.149-0400 m31102| 2015-07-09T14:14:32.148-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.150-0400 m31202| 2015-07-09T14:14:32.150-0400 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.150-0400 m31201| 2015-07-09T14:14:32.150-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.156-0400 m31200| 2015-07-09T14:14:32.152-0400 I INDEX [conn38] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.156-0400 m31100| 2015-07-09T14:14:32.152-0400 I INDEX [conn57] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.157-0400 m31200| 2015-07-09T14:14:32.152-0400 I INDEX [conn38] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.158-0400 m31100| 2015-07-09T14:14:32.153-0400 I COMMAND [conn57] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo4: 1.0 }, name: "foo4_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 91513 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 111ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.159-0400 m31100| 2015-07-09T14:14:32.153-0400 I COMMAND [conn58] command db49.coll49 command: listIndexes { listIndexes: "coll49", cursor: { batchSize: 2.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:312 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 110558 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 110ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.159-0400 m31101| 2015-07-09T14:14:32.153-0400 I INDEX [repl writer worker 7] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.159-0400 m31101| 2015-07-09T14:14:32.153-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.159-0400 m31102| 2015-07-09T14:14:32.158-0400 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.168-0400 m31101| 2015-07-09T14:14:32.167-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.168-0400 m31201| 2015-07-09T14:14:32.167-0400 I INDEX [repl writer worker 5] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.169-0400 m31201| 2015-07-09T14:14:32.167-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.169-0400 m31202| 2015-07-09T14:14:32.167-0400 I INDEX [repl writer worker 12] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.169-0400 m31202| 2015-07-09T14:14:32.167-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.169-0400 m31200| 2015-07-09T14:14:32.168-0400 I INDEX [conn38] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.169-0400 m31100| 2015-07-09T14:14:32.167-0400 I INDEX [conn49] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.170-0400 m31100| 2015-07-09T14:14:32.167-0400 I INDEX [conn49] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.170-0400 m31200| 2015-07-09T14:14:32.169-0400 I COMMAND [conn38] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo7: 1.0 }, name: "foo7_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 94160 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 120ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.172-0400 m31102| 2015-07-09T14:14:32.171-0400 I INDEX [repl writer worker 2] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.173-0400 m31102| 2015-07-09T14:14:32.171-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.177-0400 m31202| 2015-07-09T14:14:32.177-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.180-0400 m31100| 2015-07-09T14:14:32.178-0400 I INDEX [conn49] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.182-0400 m31100| 2015-07-09T14:14:32.178-0400 I COMMAND [conn49] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo7: 1.0 }, name: "foo7_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 106402 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 130ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.182-0400 m31200| 2015-07-09T14:14:32.181-0400 I INDEX [conn81] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.183-0400 m31200| 2015-07-09T14:14:32.182-0400 I INDEX [conn81] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.183-0400 m31101| 2015-07-09T14:14:32.181-0400 I INDEX [repl writer worker 13] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.184-0400 m31101| 2015-07-09T14:14:32.182-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.184-0400 m31201| 2015-07-09T14:14:32.182-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.185-0400 m31102| 2015-07-09T14:14:32.184-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.187-0400 m31202| 2015-07-09T14:14:32.186-0400 I INDEX [repl writer worker 14] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.188-0400 m31202| 2015-07-09T14:14:32.186-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.188-0400 m31101| 2015-07-09T14:14:32.188-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.192-0400 m31201| 2015-07-09T14:14:32.191-0400 I INDEX [repl writer worker 3] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.192-0400 m31201| 2015-07-09T14:14:32.191-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.193-0400 m31200| 2015-07-09T14:14:32.192-0400 I INDEX [conn81] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.193-0400 m31100| 2015-07-09T14:14:32.191-0400 I INDEX [conn47] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.194-0400 m31100| 2015-07-09T14:14:32.191-0400 I INDEX [conn47] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.195-0400 m31200| 2015-07-09T14:14:32.193-0400 I COMMAND [conn81] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo8: 1.0 }, name: "foo8_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 114978 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 139ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.198-0400 m31202| 2015-07-09T14:14:32.197-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.204-0400 m31101| 2015-07-09T14:14:32.203-0400 I INDEX [repl writer worker 8] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.204-0400 m31101| 2015-07-09T14:14:32.203-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.206-0400 m31201| 2015-07-09T14:14:32.205-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.207-0400 m31100| 2015-07-09T14:14:32.205-0400 I INDEX [conn47] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.207-0400 m31200| 2015-07-09T14:14:32.205-0400 I INDEX [conn52] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.207-0400 m31200| 2015-07-09T14:14:32.205-0400 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.209-0400 m31100| 2015-07-09T14:14:32.209-0400 I COMMAND [conn47] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo8: 1.0 }, name: "foo8_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 124907 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 155ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.210-0400 m31102| 2015-07-09T14:14:32.210-0400 I INDEX [repl writer worker 4] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.211-0400 m31102| 2015-07-09T14:14:32.210-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.215-0400 m31101| 2015-07-09T14:14:32.215-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.217-0400 m31202| 2015-07-09T14:14:32.217-0400 I INDEX [repl writer worker 8] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.218-0400 m31202| 2015-07-09T14:14:32.217-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.222-0400 m31100| 2015-07-09T14:14:32.221-0400 I INDEX [conn50] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.223-0400 m31201| 2015-07-09T14:14:32.221-0400 I INDEX [repl writer worker 15] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.223-0400 m31100| 2015-07-09T14:14:32.221-0400 I INDEX [conn50] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.224-0400 m31201| 2015-07-09T14:14:32.221-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.231-0400 m31200| 2015-07-09T14:14:32.231-0400 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.232-0400 m31102| 2015-07-09T14:14:32.231-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.233-0400 m31202| 2015-07-09T14:14:32.231-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.235-0400 m31101| 2015-07-09T14:14:32.234-0400 I INDEX [repl writer worker 15] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.235-0400 m31101| 2015-07-09T14:14:32.234-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.239-0400 m31200| 2015-07-09T14:14:32.235-0400 I COMMAND [conn52] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo9: 1.0 }, name: "foo9_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 135435 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 178ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.244-0400 m31201| 2015-07-09T14:14:32.242-0400 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.244-0400 m31100| 2015-07-09T14:14:32.242-0400 I INDEX [conn50] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.244-0400 m31101| 2015-07-09T14:14:32.242-0400 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.245-0400 m31100| 2015-07-09T14:14:32.242-0400 I COMMAND [conn50] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo9: 1.0 }, name: "foo9_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 151198 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 184ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.249-0400 m31102| 2015-07-09T14:14:32.249-0400 I INDEX [repl writer worker 5] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.250-0400 m31102| 2015-07-09T14:14:32.249-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.257-0400 m31200| 2015-07-09T14:14:32.256-0400 I INDEX [conn28] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.258-0400 m31101| 2015-07-09T14:14:32.256-0400 I INDEX [repl writer worker 9] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.258-0400 m31201| 2015-07-09T14:14:32.256-0400 I INDEX [repl writer worker 6] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.259-0400 m31200| 2015-07-09T14:14:32.256-0400 I INDEX [conn28] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.260-0400 m31101| 2015-07-09T14:14:32.256-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.260-0400 m31201| 2015-07-09T14:14:32.256-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.260-0400 m31100| 2015-07-09T14:14:32.257-0400 I INDEX [conn33] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.261-0400 m31100| 2015-07-09T14:14:32.257-0400 I INDEX [conn33] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.261-0400 m31201| 2015-07-09T14:14:32.261-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.262-0400 m31202| 2015-07-09T14:14:32.261-0400 I INDEX [repl writer worker 11] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.262-0400 m31202| 2015-07-09T14:14:32.261-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.263-0400 m31200| 2015-07-09T14:14:32.263-0400 I INDEX [conn28] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.263-0400 m31102| 2015-07-09T14:14:32.263-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.265-0400 m31200| 2015-07-09T14:14:32.264-0400 I COMMAND [conn28] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo5: 1.0 }, name: "foo5_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 177878 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 206ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.265-0400 m31101| 2015-07-09T14:14:32.265-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.269-0400 m31100| 2015-07-09T14:14:32.266-0400 I INDEX [conn33] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.270-0400 m31100| 2015-07-09T14:14:32.267-0400 I COMMAND [conn33] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo5: 1.0 }, name: "foo5_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 184706 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 209ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.272-0400 m31202| 2015-07-09T14:14:32.271-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.273-0400 m31200| 2015-07-09T14:14:32.272-0400 I INDEX [conn19] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.273-0400 m31200| 2015-07-09T14:14:32.272-0400 I INDEX [conn19] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.279-0400 m31201| 2015-07-09T14:14:32.277-0400 I INDEX [repl writer worker 1] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.279-0400 m31201| 2015-07-09T14:14:32.277-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.282-0400 m31100| 2015-07-09T14:14:32.282-0400 I INDEX [conn55] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.283-0400 m31202| 2015-07-09T14:14:32.282-0400 I INDEX [repl writer worker 6] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.284-0400 m31100| 2015-07-09T14:14:32.282-0400 I INDEX [conn55] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.284-0400 m31202| 2015-07-09T14:14:32.282-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.284-0400 m31200| 2015-07-09T14:14:32.282-0400 I INDEX [conn19] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.284-0400 m31200| 2015-07-09T14:14:32.283-0400 I COMMAND [conn63] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.285-0400 m31200| 2015-07-09T14:14:32.283-0400 I COMMAND [conn19] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo6: 1.0 }, name: "foo6_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 204771 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 223ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.288-0400 m31201| 2015-07-09T14:14:32.285-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.289-0400 m31102| 2015-07-09T14:14:32.286-0400 I INDEX [repl writer worker 9] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.289-0400 m31102| 2015-07-09T14:14:32.286-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.290-0400 m31200| 2015-07-09T14:14:32.288-0400 I COMMAND [conn63] command db49.coll49 command: dropIndexes { deleteIndexes: "coll49", index: { foo4: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:114 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 126931 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 132ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.290-0400 m31200| 2015-07-09T14:14:32.288-0400 I COMMAND [conn64] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.291-0400 m31200| 2015-07-09T14:14:32.290-0400 I COMMAND [conn84] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.292-0400 m31200| 2015-07-09T14:14:32.290-0400 I COMMAND [conn64] command db49.coll49 command: dropIndexes { deleteIndexes: "coll49", index: { foo2: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:114 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 131014 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 133ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.293-0400 m31200| 2015-07-09T14:14:32.292-0400 I COMMAND [conn62] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.293-0400 m31101| 2015-07-09T14:14:32.293-0400 I INDEX [repl writer worker 5] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.294-0400 m31101| 2015-07-09T14:14:32.293-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.295-0400 m31200| 2015-07-09T14:14:32.292-0400 I COMMAND [conn84] command db49.coll49 command: dropIndexes { deleteIndexes: "coll49", index: { foo0: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:114 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 133022 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 135ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.298-0400 m31200| 2015-07-09T14:14:32.296-0400 I COMMAND [conn48] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.299-0400 m31200| 2015-07-09T14:14:32.296-0400 I COMMAND [conn62] command db49.coll49 command: dropIndexes { deleteIndexes: "coll49", index: { foo1: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:114 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 134641 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 138ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.302-0400 m31100| 2015-07-09T14:14:32.296-0400 I INDEX [conn55] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.302-0400 m31100| 2015-07-09T14:14:32.297-0400 I COMMAND [conn34] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.303-0400 m31100| 2015-07-09T14:14:32.297-0400 I COMMAND [conn55] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo6: 1.0 }, name: "foo6_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 208082 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 237ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.304-0400 m31102| 2015-07-09T14:14:32.300-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.304-0400 m31201| 2015-07-09T14:14:32.300-0400 I INDEX [repl writer worker 7] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.304-0400 m31201| 2015-07-09T14:14:32.300-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.305-0400 m31200| 2015-07-09T14:14:32.300-0400 I COMMAND [conn85] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.306-0400 m31100| 2015-07-09T14:14:32.302-0400 I COMMAND [conn34] command db49.coll49 command: dropIndexes { deleteIndexes: "coll49", index: { foo4: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:114 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 140827 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 145ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.307-0400 m31100| 2015-07-09T14:14:32.302-0400 I COMMAND [conn37] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.309-0400 m31100| 2015-07-09T14:14:32.307-0400 I COMMAND [conn37] command db49.coll49 command: dropIndexes { deleteIndexes: "coll49", index: { foo2: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:114 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 145298 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 150ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.309-0400 m31100| 2015-07-09T14:14:32.307-0400 I COMMAND [conn38] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.309-0400 m31202| 2015-07-09T14:14:32.307-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.309-0400 m31101| 2015-07-09T14:14:32.307-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.310-0400 m31201| 2015-07-09T14:14:32.308-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.311-0400 m31201| 2015-07-09T14:14:32.311-0400 I COMMAND [repl writer worker 4] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.313-0400 m31100| 2015-07-09T14:14:32.312-0400 I COMMAND [conn38] command db49.coll49 command: dropIndexes { deleteIndexes: "coll49", index: { foo0: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:114 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 149825 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 155ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.314-0400 m30998| 2015-07-09T14:14:32.312-0400 I SHARDING [conn303] sharded connection to test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.315-0400 m31102| 2015-07-09T14:14:32.312-0400 I INDEX [repl writer worker 8] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.315-0400 m31102| 2015-07-09T14:14:32.312-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.316-0400 m30998| 2015-07-09T14:14:32.313-0400 I SHARDING [conn303] retrying command: { listIndexes: "coll49", cursor: { batchSize: 2.0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.317-0400 m31100| 2015-07-09T14:14:32.312-0400 I COMMAND [conn49] command db49.coll49 command: listIndexes { listIndexes: "coll49", cursor: { batchSize: 2.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:312 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 130542 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 130ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.317-0400 m31100| 2015-07-09T14:14:32.314-0400 I NETWORK [conn33] end connection 127.0.0.1:62635 (99 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.318-0400 m31201| 2015-07-09T14:14:32.314-0400 I COMMAND [repl writer worker 14] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.318-0400 m30999| 2015-07-09T14:14:32.315-0400 I SHARDING [conn303] sharded connection to test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.318-0400 m30999| 2015-07-09T14:14:32.315-0400 I SHARDING [conn303] retrying command: { listIndexes: "coll49", cursor: { batchSize: 2.0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.318-0400 m31100| 2015-07-09T14:14:32.315-0400 I NETWORK [conn55] end connection 127.0.0.1:62745 (98 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.318-0400 m31100| 2015-07-09T14:14:32.315-0400 I COMMAND [conn35] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.318-0400 m31100| 2015-07-09T14:14:32.315-0400 I COMMAND [conn73] command db49.coll49 command: listIndexes { listIndexes: "coll49", cursor: { batchSize: 2.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:312 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 154578 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 157ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.319-0400 m31200| 2015-07-09T14:14:32.315-0400 I COMMAND [conn34] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.321-0400 m31201| 2015-07-09T14:14:32.320-0400 I COMMAND [repl writer worker 12] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.322-0400 m31100| 2015-07-09T14:14:32.321-0400 I COMMAND [conn35] command db49.coll49 command: dropIndexes { deleteIndexes: "coll49", index: { foo1: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:114 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 157560 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 163ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.323-0400 m31100| 2015-07-09T14:14:32.321-0400 I COMMAND [conn15] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.323-0400 m31102| 2015-07-09T14:14:32.321-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.324-0400 m31202| 2015-07-09T14:14:32.321-0400 I INDEX [repl writer worker 10] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.324-0400 m31202| 2015-07-09T14:14:32.321-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.325-0400 m31100| 2015-07-09T14:14:32.323-0400 I COMMAND [conn15] command db49.coll49 command: dropIndexes { deleteIndexes: "coll49", index: { foo8: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:114 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 110996 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 113ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.326-0400 m31100| 2015-07-09T14:14:32.324-0400 I COMMAND [conn132] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.326-0400 m31200| 2015-07-09T14:14:32.324-0400 I COMMAND [conn47] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.327-0400 m31201| 2015-07-09T14:14:32.326-0400 I COMMAND [repl writer worker 11] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.328-0400 m31101| 2015-07-09T14:14:32.327-0400 I INDEX [repl writer worker 2] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.328-0400 m31101| 2015-07-09T14:14:32.327-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.328-0400 m31100| 2015-07-09T14:14:32.327-0400 I COMMAND [conn39] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.332-0400 m31200| 2015-07-09T14:14:32.331-0400 I COMMAND [conn85] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.332-0400 m31100| 2015-07-09T14:14:32.332-0400 I COMMAND [conn32] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.333-0400 m31202| 2015-07-09T14:14:32.332-0400 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.333-0400 m31201| 2015-07-09T14:14:32.332-0400 I COMMAND [repl writer worker 13] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.333-0400 m31100| 2015-07-09T14:14:32.333-0400 I COMMAND [conn132] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.334-0400 m31102| 2015-07-09T14:14:32.334-0400 I INDEX [repl writer worker 15] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.335-0400 m31102| 2015-07-09T14:14:32.334-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.336-0400 m31200| 2015-07-09T14:14:32.336-0400 I COMMAND [conn48] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.337-0400 m31202| 2015-07-09T14:14:32.336-0400 I COMMAND [repl writer worker 5] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.337-0400 m31201| 2015-07-09T14:14:32.337-0400 I COMMAND [repl writer worker 0] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.338-0400 m31100| 2015-07-09T14:14:32.337-0400 I COMMAND [conn15] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.340-0400 m31202| 2015-07-09T14:14:32.340-0400 I COMMAND [repl writer worker 1] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.341-0400 m31101| 2015-07-09T14:14:32.341-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.342-0400 m31201| 2015-07-09T14:14:32.341-0400 I COMMAND [repl writer worker 2] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.342-0400 m31202| 2015-07-09T14:14:32.342-0400 I COMMAND [repl writer worker 0] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.344-0400 m31201| 2015-07-09T14:14:32.344-0400 I COMMAND [repl writer worker 8] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.345-0400 m31102| 2015-07-09T14:14:32.345-0400 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.346-0400 m31102| 2015-07-09T14:14:32.346-0400 I COMMAND [repl writer worker 7] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.346-0400 m31201| 2015-07-09T14:14:32.346-0400 I COMMAND [repl writer worker 10] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.346-0400 m31101| 2015-07-09T14:14:32.346-0400 I COMMAND [repl writer worker 11] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.347-0400 m31202| 2015-07-09T14:14:32.346-0400 I COMMAND [repl writer worker 13] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.348-0400 m31101| 2015-07-09T14:14:32.347-0400 I COMMAND [repl writer worker 1] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.348-0400 m31201| 2015-07-09T14:14:32.347-0400 I COMMAND [repl writer worker 9] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.348-0400 m31102| 2015-07-09T14:14:32.347-0400 I COMMAND [repl writer worker 12] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.349-0400 m31202| 2015-07-09T14:14:32.348-0400 I COMMAND [repl writer worker 4] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.349-0400 m31101| 2015-07-09T14:14:32.348-0400 I COMMAND [repl writer worker 10] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.349-0400 m31102| 2015-07-09T14:14:32.349-0400 I COMMAND [repl writer worker 13] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.349-0400 m31202| 2015-07-09T14:14:32.349-0400 I COMMAND [repl writer worker 2] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.350-0400 m31101| 2015-07-09T14:14:32.349-0400 I COMMAND [repl writer worker 6] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.350-0400 m31102| 2015-07-09T14:14:32.350-0400 I COMMAND [repl writer worker 6] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.350-0400 m31202| 2015-07-09T14:14:32.350-0400 I COMMAND [repl writer worker 3] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.351-0400 m31101| 2015-07-09T14:14:32.350-0400 I COMMAND [repl writer worker 14] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.351-0400 m31202| 2015-07-09T14:14:32.351-0400 I COMMAND [repl writer worker 9] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.352-0400 m31102| 2015-07-09T14:14:32.351-0400 I COMMAND [repl writer worker 3] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.352-0400 m31101| 2015-07-09T14:14:32.351-0400 I COMMAND [repl writer worker 12] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.352-0400 m31202| 2015-07-09T14:14:32.352-0400 I COMMAND [repl writer worker 7] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.353-0400 m31102| 2015-07-09T14:14:32.352-0400 I COMMAND [repl writer worker 14] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.353-0400 m31101| 2015-07-09T14:14:32.353-0400 I COMMAND [repl writer worker 0] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.354-0400 m31101| 2015-07-09T14:14:32.353-0400 I COMMAND [repl writer worker 4] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.354-0400 m31202| 2015-07-09T14:14:32.354-0400 I COMMAND [repl writer worker 15] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.354-0400 m31102| 2015-07-09T14:14:32.354-0400 I COMMAND [repl writer worker 0] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.355-0400 m31101| 2015-07-09T14:14:32.355-0400 I COMMAND [repl writer worker 3] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.355-0400 m31102| 2015-07-09T14:14:32.355-0400 I COMMAND [repl writer worker 11] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.356-0400 m31102| 2015-07-09T14:14:32.356-0400 I COMMAND [repl writer worker 1] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.356-0400 m31101| 2015-07-09T14:14:32.356-0400 I COMMAND [repl writer worker 7] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.357-0400 m31102| 2015-07-09T14:14:32.356-0400 I COMMAND [repl writer worker 10] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.410-0400 m31200| 2015-07-09T14:14:32.410-0400 I INDEX [conn19] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.410-0400 m31200| 2015-07-09T14:14:32.410-0400 I INDEX [conn19] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.413-0400 m31100| 2015-07-09T14:14:32.412-0400 I INDEX [conn47] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.413-0400 m31100| 2015-07-09T14:14:32.412-0400 I INDEX [conn47] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.414-0400 m31200| 2015-07-09T14:14:32.413-0400 I INDEX [conn19] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.418-0400 m31200| 2015-07-09T14:14:32.418-0400 I INDEX [conn81] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.418-0400 m31200| 2015-07-09T14:14:32.418-0400 I INDEX [conn81] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.425-0400 m31100| 2015-07-09T14:14:32.424-0400 I INDEX [conn47] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.427-0400 m31201| 2015-07-09T14:14:32.426-0400 I INDEX [repl writer worker 5] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.427-0400 m31201| 2015-07-09T14:14:32.427-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.435-0400 m31200| 2015-07-09T14:14:32.435-0400 I INDEX [conn81] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.438-0400 m31202| 2015-07-09T14:14:32.437-0400 I INDEX [repl writer worker 12] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.439-0400 m31202| 2015-07-09T14:14:32.437-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.439-0400 m31102| 2015-07-09T14:14:32.437-0400 I INDEX [repl writer worker 2] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.440-0400 m31102| 2015-07-09T14:14:32.437-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.441-0400 m31100| 2015-07-09T14:14:32.440-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63685 #168 (99 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.443-0400 m31100| 2015-07-09T14:14:32.443-0400 I INDEX [conn45] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.444-0400 m31100| 2015-07-09T14:14:32.443-0400 I INDEX [conn45] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.447-0400 m31200| 2015-07-09T14:14:32.447-0400 I INDEX [conn137] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.448-0400 m31200| 2015-07-09T14:14:32.447-0400 I INDEX [conn137] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.448-0400 m31201| 2015-07-09T14:14:32.447-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.451-0400 m31101| 2015-07-09T14:14:32.450-0400 I INDEX [repl writer worker 13] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.451-0400 m31101| 2015-07-09T14:14:32.450-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.459-0400 m31102| 2015-07-09T14:14:32.459-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.461-0400 m31100| 2015-07-09T14:14:32.460-0400 I INDEX [conn45] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.461-0400 m31202| 2015-07-09T14:14:32.460-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.462-0400 m31200| 2015-07-09T14:14:32.462-0400 I INDEX [conn137] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.470-0400 m31101| 2015-07-09T14:14:32.470-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.471-0400 m31201| 2015-07-09T14:14:32.470-0400 I INDEX [repl writer worker 3] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.471-0400 m31201| 2015-07-09T14:14:32.470-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.475-0400 m31100| 2015-07-09T14:14:32.474-0400 I INDEX [conn56] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.475-0400 m31100| 2015-07-09T14:14:32.474-0400 I INDEX [conn56] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.479-0400 m31200| 2015-07-09T14:14:32.478-0400 I INDEX [conn28] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.479-0400 m31200| 2015-07-09T14:14:32.478-0400 I INDEX [conn28] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.483-0400 m31202| 2015-07-09T14:14:32.482-0400 I INDEX [repl writer worker 14] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.484-0400 m31202| 2015-07-09T14:14:32.482-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.484-0400 m31102| 2015-07-09T14:14:32.482-0400 I INDEX [repl writer worker 4] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.484-0400 m31102| 2015-07-09T14:14:32.482-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.489-0400 m31100| 2015-07-09T14:14:32.488-0400 I INDEX [conn56] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.491-0400 m31101| 2015-07-09T14:14:32.490-0400 I INDEX [repl writer worker 8] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.491-0400 m31101| 2015-07-09T14:14:32.490-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.495-0400 m31201| 2015-07-09T14:14:32.495-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.497-0400 m31102| 2015-07-09T14:14:32.497-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.506-0400 m31202| 2015-07-09T14:14:32.505-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.506-0400 m31200| 2015-07-09T14:14:32.505-0400 I INDEX [conn28] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.507-0400 m31100| 2015-07-09T14:14:32.505-0400 I INDEX [conn50] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.507-0400 m31100| 2015-07-09T14:14:32.505-0400 I INDEX [conn50] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.509-0400 m31101| 2015-07-09T14:14:32.509-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.511-0400 m31201| 2015-07-09T14:14:32.511-0400 I INDEX [repl writer worker 15] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.511-0400 m31201| 2015-07-09T14:14:32.511-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.517-0400 m31102| 2015-07-09T14:14:32.515-0400 I INDEX [repl writer worker 5] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.517-0400 m31102| 2015-07-09T14:14:32.515-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.518-0400 m31100| 2015-07-09T14:14:32.516-0400 I INDEX [conn50] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.518-0400 m31100| 2015-07-09T14:14:32.516-0400 I COMMAND [conn15] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.518-0400 m31101| 2015-07-09T14:14:32.516-0400 I INDEX [repl writer worker 15] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.519-0400 m31101| 2015-07-09T14:14:32.516-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.519-0400 m31202| 2015-07-09T14:14:32.516-0400 I INDEX [repl writer worker 8] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.519-0400 m31202| 2015-07-09T14:14:32.516-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.520-0400 m31200| 2015-07-09T14:14:32.517-0400 I INDEX [conn19] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.521-0400 m31200| 2015-07-09T14:14:32.517-0400 I INDEX [conn19] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.521-0400 m31201| 2015-07-09T14:14:32.521-0400 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.526-0400 m31102| 2015-07-09T14:14:32.525-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.527-0400 m31200| 2015-07-09T14:14:32.527-0400 I INDEX [conn19] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.528-0400 m31200| 2015-07-09T14:14:32.528-0400 I COMMAND [conn48] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.529-0400 m31200| 2015-07-09T14:14:32.528-0400 I COMMAND [conn19] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo8: 1.0 }, name: "foo8_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 80678 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 101ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.532-0400 m31202| 2015-07-09T14:14:32.532-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.532-0400 m31101| 2015-07-09T14:14:32.532-0400 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.533-0400 m31200| 2015-07-09T14:14:32.532-0400 I COMMAND [conn48] command db49.coll49 command: dropIndexes { deleteIndexes: "coll49", index: { foo4: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:114 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 101798 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 105ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.540-0400 m31102| 2015-07-09T14:14:32.539-0400 I INDEX [repl writer worker 9] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.541-0400 m31100| 2015-07-09T14:14:32.539-0400 I INDEX [conn47] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.541-0400 m31102| 2015-07-09T14:14:32.539-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.541-0400 m31100| 2015-07-09T14:14:32.540-0400 I INDEX [conn47] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.541-0400 m31201| 2015-07-09T14:14:32.539-0400 I INDEX [repl writer worker 6] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.541-0400 m31201| 2015-07-09T14:14:32.539-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.553-0400 m31200| 2015-07-09T14:14:32.553-0400 I INDEX [conn52] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.554-0400 m31200| 2015-07-09T14:14:32.553-0400 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.555-0400 m31202| 2015-07-09T14:14:32.555-0400 I INDEX [repl writer worker 11] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.556-0400 m31202| 2015-07-09T14:14:32.555-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.556-0400 m31201| 2015-07-09T14:14:32.556-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.557-0400 m31101| 2015-07-09T14:14:32.557-0400 I INDEX [repl writer worker 9] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.557-0400 m31101| 2015-07-09T14:14:32.557-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.558-0400 m31100| 2015-07-09T14:14:32.555-0400 I INDEX [conn47] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.558-0400 m31100| 2015-07-09T14:14:32.556-0400 I COMMAND [conn47] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo8: 1.0 }, name: "foo8_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 94599 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 130ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.562-0400 m31102| 2015-07-09T14:14:32.561-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.563-0400 m31102| 2015-07-09T14:14:32.563-0400 I COMMAND [repl writer worker 8] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.569-0400 m31100| 2015-07-09T14:14:32.568-0400 I INDEX [conn73] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.569-0400 m31100| 2015-07-09T14:14:32.568-0400 I INDEX [conn73] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.570-0400 m31202| 2015-07-09T14:14:32.570-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.572-0400 m31200| 2015-07-09T14:14:32.570-0400 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.573-0400 m31200| 2015-07-09T14:14:32.571-0400 I COMMAND [conn52] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo9: 1.0 }, name: "foo9_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 103987 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 143ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.573-0400 m31101| 2015-07-09T14:14:32.573-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.575-0400 m31101| 2015-07-09T14:14:32.575-0400 I COMMAND [repl writer worker 5] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.580-0400 m31201| 2015-07-09T14:14:32.579-0400 I INDEX [repl writer worker 1] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.581-0400 m31201| 2015-07-09T14:14:32.579-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.582-0400 m31200| 2015-07-09T14:14:32.579-0400 I INDEX [conn38] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.582-0400 m31200| 2015-07-09T14:14:32.579-0400 I INDEX [conn38] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.585-0400 m31100| 2015-07-09T14:14:32.584-0400 I INDEX [conn73] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.586-0400 m31102| 2015-07-09T14:14:32.584-0400 I INDEX [repl writer worker 15] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.586-0400 m31102| 2015-07-09T14:14:32.584-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.587-0400 m31100| 2015-07-09T14:14:32.586-0400 I COMMAND [conn73] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo9: 1.0 }, name: "foo9_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 128017 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 157ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.597-0400 m31202| 2015-07-09T14:14:32.595-0400 I INDEX [repl writer worker 6] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.597-0400 m31200| 2015-07-09T14:14:32.595-0400 I INDEX [conn38] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.598-0400 m31202| 2015-07-09T14:14:32.595-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.599-0400 m31200| 2015-07-09T14:14:32.596-0400 I COMMAND [conn38] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo7: 1.0 }, name: "foo7_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 137400 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 162ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.605-0400 m31201| 2015-07-09T14:14:32.603-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.606-0400 m31101| 2015-07-09T14:14:32.604-0400 I INDEX [repl writer worker 2] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.606-0400 m31101| 2015-07-09T14:14:32.604-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.606-0400 m31201| 2015-07-09T14:14:32.605-0400 I COMMAND [repl writer worker 7] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.611-0400 m31102| 2015-07-09T14:14:32.609-0400 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.612-0400 m31100| 2015-07-09T14:14:32.609-0400 I INDEX [conn49] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.612-0400 m31200| 2015-07-09T14:14:32.609-0400 I INDEX [conn60] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.612-0400 m31100| 2015-07-09T14:14:32.609-0400 I INDEX [conn49] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.613-0400 m31200| 2015-07-09T14:14:32.609-0400 I INDEX [conn60] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.613-0400 m31202| 2015-07-09T14:14:32.611-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.614-0400 m31202| 2015-07-09T14:14:32.614-0400 I COMMAND [repl writer worker 10] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.617-0400 m31201| 2015-07-09T14:14:32.617-0400 I INDEX [repl writer worker 4] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.618-0400 m31201| 2015-07-09T14:14:32.617-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.619-0400 m31100| 2015-07-09T14:14:32.618-0400 I INDEX [conn49] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.621-0400 m31100| 2015-07-09T14:14:32.619-0400 I COMMAND [conn49] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo7: 1.0 }, name: "foo7_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 152085 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 185ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.621-0400 m31101| 2015-07-09T14:14:32.618-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.629-0400 m31200| 2015-07-09T14:14:32.629-0400 I INDEX [conn60] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.630-0400 m31201| 2015-07-09T14:14:32.630-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.632-0400 m31200| 2015-07-09T14:14:32.630-0400 I COMMAND [conn60] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo3: 1.0 }, name: "foo3_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 161615 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 195ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.635-0400 m31102| 2015-07-09T14:14:32.634-0400 I INDEX [repl writer worker 7] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.635-0400 m31102| 2015-07-09T14:14:32.634-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.636-0400 m31202| 2015-07-09T14:14:32.636-0400 I INDEX [repl writer worker 5] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.636-0400 m31202| 2015-07-09T14:14:32.636-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.641-0400 m31100| 2015-07-09T14:14:32.641-0400 I INDEX [conn58] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.642-0400 m31100| 2015-07-09T14:14:32.641-0400 I INDEX [conn58] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.645-0400 m31201| 2015-07-09T14:14:32.644-0400 I INDEX [repl writer worker 14] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.645-0400 m31201| 2015-07-09T14:14:32.644-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.646-0400 m31101| 2015-07-09T14:14:32.644-0400 I INDEX [repl writer worker 11] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.646-0400 m31101| 2015-07-09T14:14:32.644-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.648-0400 m31200| 2015-07-09T14:14:32.647-0400 I INDEX [conn80] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.648-0400 m31200| 2015-07-09T14:14:32.647-0400 I INDEX [conn80] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.648-0400 m31102| 2015-07-09T14:14:32.646-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.650-0400 m31201| 2015-07-09T14:14:32.650-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.654-0400 m31101| 2015-07-09T14:14:32.653-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.659-0400 m31100| 2015-07-09T14:14:32.656-0400 I INDEX [conn58] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.659-0400 m31202| 2015-07-09T14:14:32.657-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.660-0400 m31100| 2015-07-09T14:14:32.657-0400 I COMMAND [conn58] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo3: 1.0 }, name: "foo3_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 184669 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 222ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.660-0400 m31200| 2015-07-09T14:14:32.656-0400 I INDEX [conn80] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.661-0400 m31200| 2015-07-09T14:14:32.658-0400 I COMMAND [conn80] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo5: 1.0 }, name: "foo5_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 188029 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 215ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.666-0400 m31102| 2015-07-09T14:14:32.665-0400 I INDEX [repl writer worker 12] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.667-0400 m31102| 2015-07-09T14:14:32.665-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.668-0400 m31200| 2015-07-09T14:14:32.667-0400 I INDEX [conn30] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.668-0400 m31200| 2015-07-09T14:14:32.667-0400 I INDEX [conn30] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.675-0400 m31101| 2015-07-09T14:14:32.675-0400 I INDEX [repl writer worker 1] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.676-0400 m31101| 2015-07-09T14:14:32.675-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.677-0400 m31201| 2015-07-09T14:14:32.675-0400 I INDEX [repl writer worker 12] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.678-0400 m31201| 2015-07-09T14:14:32.675-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.678-0400 m31100| 2015-07-09T14:14:32.675-0400 I INDEX [conn168] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.679-0400 m31100| 2015-07-09T14:14:32.675-0400 I INDEX [conn168] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.679-0400 m31202| 2015-07-09T14:14:32.677-0400 I INDEX [repl writer worker 1] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.680-0400 m31202| 2015-07-09T14:14:32.677-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.687-0400 m31200| 2015-07-09T14:14:32.686-0400 I INDEX [conn30] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.687-0400 m31201| 2015-07-09T14:14:32.686-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.687-0400 m31102| 2015-07-09T14:14:32.686-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.688-0400 m31200| 2015-07-09T14:14:32.686-0400 I COMMAND [conn30] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo6: 1.0 }, name: "foo6_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 215327 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 243ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.688-0400 m31200| 2015-07-09T14:14:32.686-0400 I COMMAND [conn85] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.690-0400 m31100| 2015-07-09T14:14:32.689-0400 I INDEX [conn168] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.691-0400 m31200| 2015-07-09T14:14:32.689-0400 I COMMAND [conn85] command db49.coll49 command: dropIndexes { deleteIndexes: "coll49", index: { foo1: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:114 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 168945 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 171ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.691-0400 m31200| 2015-07-09T14:14:32.690-0400 I COMMAND [conn48] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.692-0400 m31100| 2015-07-09T14:14:32.690-0400 I COMMAND [conn168] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo5: 1.0 }, name: "foo5_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 216477 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 248ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.694-0400 m31101| 2015-07-09T14:14:32.693-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.698-0400 m31200| 2015-07-09T14:14:32.697-0400 I COMMAND [conn48] command db49.coll49 command: dropIndexes { deleteIndexes: "coll49", index: { foo8: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:114 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 129918 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 136ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.698-0400 m31100| 2015-07-09T14:14:32.697-0400 I INDEX [conn57] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.698-0400 m31100| 2015-07-09T14:14:32.697-0400 I INDEX [conn57] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.699-0400 m31202| 2015-07-09T14:14:32.699-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.705-0400 m31201| 2015-07-09T14:14:32.704-0400 I INDEX [repl writer worker 11] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.705-0400 m31201| 2015-07-09T14:14:32.704-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.705-0400 m31102| 2015-07-09T14:14:32.704-0400 I INDEX [repl writer worker 13] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.706-0400 m31102| 2015-07-09T14:14:32.704-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.706-0400 m31101| 2015-07-09T14:14:32.704-0400 I INDEX [repl writer worker 10] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.706-0400 m31101| 2015-07-09T14:14:32.704-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.711-0400 m31200| 2015-07-09T14:14:32.710-0400 I INDEX [conn19] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.712-0400 m31200| 2015-07-09T14:14:32.710-0400 I INDEX [conn19] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.714-0400 m31100| 2015-07-09T14:14:32.714-0400 I INDEX [conn57] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.719-0400 m31100| 2015-07-09T14:14:32.714-0400 I COMMAND [conn57] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo6: 1.0 }, name: "foo6_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 247806 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 271ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.720-0400 m31100| 2015-07-09T14:14:32.715-0400 I COMMAND [conn73] command db49.coll49 command: listIndexes { listIndexes: "coll49", cursor: { batchSize: 2.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:312 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 126562 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 126ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.720-0400 m31100| 2015-07-09T14:14:32.715-0400 I COMMAND [conn56] command db49.coll49 command: listIndexes { listIndexes: "coll49", cursor: { batchSize: 2.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:312 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 224329 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 224ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.721-0400 m31100| 2015-07-09T14:14:32.715-0400 I COMMAND [conn45] command db49.coll49 command: listIndexes { listIndexes: "coll49", cursor: { batchSize: 2.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:312 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 251664 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 252ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.722-0400 m31100| 2015-07-09T14:14:32.718-0400 I COMMAND [conn132] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.722-0400 m31201| 2015-07-09T14:14:32.719-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.723-0400 m31202| 2015-07-09T14:14:32.719-0400 I INDEX [repl writer worker 0] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.723-0400 m31202| 2015-07-09T14:14:32.719-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.723-0400 m31102| 2015-07-09T14:14:32.721-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.725-0400 m31100| 2015-07-09T14:14:32.724-0400 I COMMAND [conn132] command db49.coll49 command: dropIndexes { deleteIndexes: "coll49", index: { foo1: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:114 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 200846 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 206ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.726-0400 m31100| 2015-07-09T14:14:32.724-0400 I COMMAND [conn15] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.726-0400 m31101| 2015-07-09T14:14:32.725-0400 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.727-0400 m31100| 2015-07-09T14:14:32.726-0400 I COMMAND [conn15] command db49.coll49 command: dropIndexes { deleteIndexes: "coll49", index: { foo8: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:114 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 163976 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 165ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.730-0400 m31200| 2015-07-09T14:14:32.730-0400 I INDEX [conn19] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.732-0400 m31200| 2015-07-09T14:14:32.732-0400 I COMMAND [conn47] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.736-0400 m31202| 2015-07-09T14:14:32.736-0400 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.737-0400 m31102| 2015-07-09T14:14:32.736-0400 I INDEX [repl writer worker 6] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.737-0400 m31102| 2015-07-09T14:14:32.736-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.738-0400 m31200| 2015-07-09T14:14:32.737-0400 I COMMAND [conn34] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.743-0400 m31200| 2015-07-09T14:14:32.742-0400 I COMMAND [conn84] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.743-0400 m31201| 2015-07-09T14:14:32.742-0400 I INDEX [repl writer worker 13] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.743-0400 m31201| 2015-07-09T14:14:32.742-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.744-0400 m31101| 2015-07-09T14:14:32.743-0400 I INDEX [repl writer worker 6] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.744-0400 m31101| 2015-07-09T14:14:32.743-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.745-0400 m31200| 2015-07-09T14:14:32.745-0400 I COMMAND [conn62] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.746-0400 m31100| 2015-07-09T14:14:32.745-0400 I INDEX [conn47] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.746-0400 m31100| 2015-07-09T14:14:32.745-0400 I INDEX [conn47] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.751-0400 m31102| 2015-07-09T14:14:32.749-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.752-0400 m31200| 2015-07-09T14:14:32.749-0400 I COMMAND [conn65] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.752-0400 m31202| 2015-07-09T14:14:32.749-0400 I INDEX [repl writer worker 13] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.753-0400 m31202| 2015-07-09T14:14:32.749-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.759-0400 m31101| 2015-07-09T14:14:32.759-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.760-0400 m31201| 2015-07-09T14:14:32.759-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.761-0400 m31201| 2015-07-09T14:14:32.761-0400 I COMMAND [repl writer worker 0] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.764-0400 m31100| 2015-07-09T14:14:32.763-0400 I INDEX [conn47] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.764-0400 m31201| 2015-07-09T14:14:32.763-0400 I COMMAND [repl writer worker 2] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.766-0400 m31100| 2015-07-09T14:14:32.765-0400 I COMMAND [conn32] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.767-0400 m31100| 2015-07-09T14:14:32.765-0400 I COMMAND [conn47] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo4: 1.0 }, name: "foo4_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 91147 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 130ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.768-0400 m31202| 2015-07-09T14:14:32.766-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.772-0400 m31102| 2015-07-09T14:14:32.769-0400 I INDEX [repl writer worker 3] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.772-0400 m31102| 2015-07-09T14:14:32.769-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.773-0400 m31101| 2015-07-09T14:14:32.769-0400 I INDEX [repl writer worker 14] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.773-0400 m31101| 2015-07-09T14:14:32.769-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.774-0400 m31100| 2015-07-09T14:14:32.771-0400 I COMMAND [conn39] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.774-0400 m31200| 2015-07-09T14:14:32.771-0400 I COMMAND [conn48] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.775-0400 m31202| 2015-07-09T14:14:32.774-0400 I INDEX [repl writer worker 4] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.775-0400 m31202| 2015-07-09T14:14:32.774-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.776-0400 m31100| 2015-07-09T14:14:32.775-0400 I COMMAND [conn38] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.776-0400 m31200| 2015-07-09T14:14:32.773-0400 I COMMAND [conn64] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.776-0400 m31200| 2015-07-09T14:14:32.776-0400 I COMMAND [conn63] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.779-0400 m31100| 2015-07-09T14:14:32.779-0400 I COMMAND [conn35] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.780-0400 m31101| 2015-07-09T14:14:32.779-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.780-0400 m31201| 2015-07-09T14:14:32.779-0400 I INDEX [repl writer worker 8] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.780-0400 m31201| 2015-07-09T14:14:32.779-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.781-0400 m31101| 2015-07-09T14:14:32.780-0400 I COMMAND [repl writer worker 12] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.784-0400 m31102| 2015-07-09T14:14:32.781-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.784-0400 m31100| 2015-07-09T14:14:32.781-0400 I COMMAND [conn36] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.784-0400 m31101| 2015-07-09T14:14:32.782-0400 I COMMAND [repl writer worker 0] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.784-0400 m31100| 2015-07-09T14:14:32.783-0400 I COMMAND [conn15] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.784-0400 m31102| 2015-07-09T14:14:32.784-0400 I COMMAND [repl writer worker 14] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.785-0400 m31100| 2015-07-09T14:14:32.785-0400 I COMMAND [conn34] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.788-0400 m31102| 2015-07-09T14:14:32.787-0400 I COMMAND [repl writer worker 0] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.788-0400 m31100| 2015-07-09T14:14:32.788-0400 I COMMAND [conn37] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.790-0400 m31202| 2015-07-09T14:14:32.789-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.791-0400 m31201| 2015-07-09T14:14:32.790-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.791-0400 m31202| 2015-07-09T14:14:32.791-0400 I COMMAND [repl writer worker 2] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.792-0400 m31201| 2015-07-09T14:14:32.791-0400 I COMMAND [repl writer worker 10] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.792-0400 m31201| 2015-07-09T14:14:32.792-0400 I COMMAND [repl writer worker 9] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.794-0400 m31202| 2015-07-09T14:14:32.794-0400 I COMMAND [repl writer worker 3] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.796-0400 m31101| 2015-07-09T14:14:32.796-0400 I INDEX [repl writer worker 4] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.796-0400 m31101| 2015-07-09T14:14:32.796-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.797-0400 m31201| 2015-07-09T14:14:32.796-0400 I COMMAND [repl writer worker 5] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.797-0400 m31102| 2015-07-09T14:14:32.797-0400 I INDEX [repl writer worker 11] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.797-0400 m31102| 2015-07-09T14:14:32.797-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.807-0400 m31102| 2015-07-09T14:14:32.806-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.808-0400 m31102| 2015-07-09T14:14:32.806-0400 I COMMAND [repl writer worker 1] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.809-0400 m31201| 2015-07-09T14:14:32.807-0400 I COMMAND [repl writer worker 3] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.809-0400 m31201| 2015-07-09T14:14:32.808-0400 I COMMAND [repl writer worker 15] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.810-0400 m31101| 2015-07-09T14:14:32.809-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.812-0400 m31102| 2015-07-09T14:14:32.810-0400 I COMMAND [repl writer worker 10] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.813-0400 m31101| 2015-07-09T14:14:32.810-0400 I COMMAND [repl writer worker 3] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.813-0400 m31201| 2015-07-09T14:14:32.811-0400 I COMMAND [repl writer worker 6] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.813-0400 m31201| 2015-07-09T14:14:32.813-0400 I COMMAND [repl writer worker 1] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.814-0400 m31202| 2015-07-09T14:14:32.813-0400 I INDEX [repl writer worker 9] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.814-0400 m31202| 2015-07-09T14:14:32.813-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.814-0400 m31101| 2015-07-09T14:14:32.813-0400 I COMMAND [repl writer worker 7] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.815-0400 m31101| 2015-07-09T14:14:32.814-0400 I COMMAND [repl writer worker 13] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.815-0400 m31102| 2015-07-09T14:14:32.815-0400 I COMMAND [repl writer worker 2] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.815-0400 m31201| 2015-07-09T14:14:32.815-0400 I COMMAND [repl writer worker 7] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.816-0400 m31101| 2015-07-09T14:14:32.816-0400 I COMMAND [repl writer worker 8] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.818-0400 m31101| 2015-07-09T14:14:32.818-0400 I COMMAND [repl writer worker 15] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.818-0400 m31102| 2015-07-09T14:14:32.816-0400 I COMMAND [repl writer worker 4] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.819-0400 m31102| 2015-07-09T14:14:32.818-0400 I COMMAND [repl writer worker 5] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.819-0400 m31202| 2015-07-09T14:14:32.818-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.819-0400 m31101| 2015-07-09T14:14:32.819-0400 I COMMAND [repl writer worker 9] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.820-0400 m31102| 2015-07-09T14:14:32.820-0400 I COMMAND [repl writer worker 9] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.820-0400 m31202| 2015-07-09T14:14:32.820-0400 I COMMAND [repl writer worker 7] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.820-0400 m31101| 2015-07-09T14:14:32.820-0400 I COMMAND [repl writer worker 5] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.821-0400 m31102| 2015-07-09T14:14:32.821-0400 I COMMAND [repl writer worker 8] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.821-0400 m31101| 2015-07-09T14:14:32.821-0400 I COMMAND [repl writer worker 2] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.822-0400 m31102| 2015-07-09T14:14:32.821-0400 I COMMAND [repl writer worker 15] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.822-0400 m31202| 2015-07-09T14:14:32.822-0400 I COMMAND [repl writer worker 15] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.823-0400 m31202| 2015-07-09T14:14:32.822-0400 I COMMAND [repl writer worker 12] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.824-0400 m31202| 2015-07-09T14:14:32.824-0400 I COMMAND [repl writer worker 14] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.825-0400 m31202| 2015-07-09T14:14:32.825-0400 I COMMAND [repl writer worker 8] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.826-0400 m31202| 2015-07-09T14:14:32.826-0400 I COMMAND [repl writer worker 11] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.827-0400 m31202| 2015-07-09T14:14:32.827-0400 I COMMAND [repl writer worker 6] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.828-0400 m31202| 2015-07-09T14:14:32.828-0400 I COMMAND [repl writer worker 10] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.832-0400 m31200| 2015-07-09T14:14:32.831-0400 I INDEX [conn80] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.832-0400 m31200| 2015-07-09T14:14:32.831-0400 I INDEX [conn80] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.839-0400 m31100| 2015-07-09T14:14:32.838-0400 I INDEX [conn47] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.839-0400 m31100| 2015-07-09T14:14:32.839-0400 I INDEX [conn47] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.840-0400 m31200| 2015-07-09T14:14:32.839-0400 I INDEX [conn80] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.845-0400 m31100| 2015-07-09T14:14:32.844-0400 I INDEX [conn47] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.848-0400 m31200| 2015-07-09T14:14:32.847-0400 I INDEX [conn19] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.848-0400 m31200| 2015-07-09T14:14:32.847-0400 I INDEX [conn19] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.850-0400 m31201| 2015-07-09T14:14:32.849-0400 I INDEX [repl writer worker 4] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.850-0400 m31201| 2015-07-09T14:14:32.849-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.850-0400 m31202| 2015-07-09T14:14:32.849-0400 I INDEX [repl writer worker 5] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.850-0400 m31202| 2015-07-09T14:14:32.849-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.858-0400 m31101| 2015-07-09T14:14:32.857-0400 I INDEX [repl writer worker 11] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.858-0400 m31101| 2015-07-09T14:14:32.857-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.858-0400 m31102| 2015-07-09T14:14:32.857-0400 I INDEX [repl writer worker 7] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.859-0400 m31102| 2015-07-09T14:14:32.857-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.863-0400 m31200| 2015-07-09T14:14:32.861-0400 I INDEX [conn19] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.864-0400 m31200| 2015-07-09T14:14:32.863-0400 I COMMAND [conn63] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.867-0400 m31202| 2015-07-09T14:14:32.866-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.869-0400 m31102| 2015-07-09T14:14:32.868-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.869-0400 m31100| 2015-07-09T14:14:32.868-0400 I INDEX [conn49] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.869-0400 m31100| 2015-07-09T14:14:32.868-0400 I INDEX [conn49] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.871-0400 m31201| 2015-07-09T14:14:32.870-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.871-0400 m31101| 2015-07-09T14:14:32.870-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.873-0400 m31202| 2015-07-09T14:14:32.872-0400 I INDEX [repl writer worker 1] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.873-0400 m31202| 2015-07-09T14:14:32.872-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.876-0400 m31100| 2015-07-09T14:14:32.876-0400 I INDEX [conn49] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.877-0400 m31100| 2015-07-09T14:14:32.877-0400 I COMMAND [conn37] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.881-0400 m31200| 2015-07-09T14:14:32.880-0400 I INDEX [conn60] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.882-0400 m31200| 2015-07-09T14:14:32.880-0400 I INDEX [conn60] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.886-0400 m31202| 2015-07-09T14:14:32.885-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.886-0400 m31201| 2015-07-09T14:14:32.885-0400 I INDEX [repl writer worker 14] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.887-0400 m31201| 2015-07-09T14:14:32.885-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.887-0400 m31202| 2015-07-09T14:14:32.886-0400 I COMMAND [repl writer worker 0] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.888-0400 m31100| 2015-07-09T14:14:32.887-0400 I INDEX [conn73] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.888-0400 m31100| 2015-07-09T14:14:32.887-0400 I INDEX [conn73] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.891-0400 m31200| 2015-07-09T14:14:32.889-0400 I INDEX [conn60] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.891-0400 m31101| 2015-07-09T14:14:32.889-0400 I INDEX [repl writer worker 1] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.892-0400 m31101| 2015-07-09T14:14:32.889-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.895-0400 m31102| 2015-07-09T14:14:32.894-0400 I INDEX [repl writer worker 12] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.895-0400 m31102| 2015-07-09T14:14:32.894-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.901-0400 m31201| 2015-07-09T14:14:32.901-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.902-0400 m31201| 2015-07-09T14:14:32.901-0400 I COMMAND [repl writer worker 12] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.908-0400 m31200| 2015-07-09T14:14:32.907-0400 I INDEX [conn38] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.909-0400 m31200| 2015-07-09T14:14:32.907-0400 I INDEX [conn38] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.909-0400 m31100| 2015-07-09T14:14:32.907-0400 I INDEX [conn73] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.909-0400 m31102| 2015-07-09T14:14:32.907-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.910-0400 m31202| 2015-07-09T14:14:32.907-0400 I INDEX [repl writer worker 13] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.910-0400 m31202| 2015-07-09T14:14:32.907-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.911-0400 m31101| 2015-07-09T14:14:32.910-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.913-0400 m31101| 2015-07-09T14:14:32.912-0400 I COMMAND [repl writer worker 10] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.914-0400 m31102| 2015-07-09T14:14:32.913-0400 I COMMAND [repl writer worker 13] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.919-0400 m31202| 2015-07-09T14:14:32.917-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.919-0400 m31200| 2015-07-09T14:14:32.917-0400 I INDEX [conn38] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.919-0400 m31200| 2015-07-09T14:14:32.918-0400 I COMMAND [conn62] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.920-0400 m31201| 2015-07-09T14:14:32.919-0400 I INDEX [repl writer worker 11] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.920-0400 m31201| 2015-07-09T14:14:32.920-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.930-0400 m31202| 2015-07-09T14:14:32.929-0400 I INDEX [repl writer worker 4] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.930-0400 m31101| 2015-07-09T14:14:32.929-0400 I INDEX [repl writer worker 6] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.931-0400 m31202| 2015-07-09T14:14:32.929-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.931-0400 m31101| 2015-07-09T14:14:32.929-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.937-0400 m31102| 2015-07-09T14:14:32.935-0400 I INDEX [repl writer worker 6] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.938-0400 m31102| 2015-07-09T14:14:32.935-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.938-0400 m31100| 2015-07-09T14:14:32.935-0400 I INDEX [conn58] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.938-0400 m31100| 2015-07-09T14:14:32.935-0400 I INDEX [conn58] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.939-0400 m31200| 2015-07-09T14:14:32.936-0400 I INDEX [conn19] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.939-0400 m31200| 2015-07-09T14:14:32.936-0400 I INDEX [conn19] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.941-0400 m31202| 2015-07-09T14:14:32.940-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.941-0400 m31202| 2015-07-09T14:14:32.941-0400 I COMMAND [repl writer worker 2] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.942-0400 m31201| 2015-07-09T14:14:32.942-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.944-0400 m31101| 2015-07-09T14:14:32.943-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.945-0400 m31102| 2015-07-09T14:14:32.945-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.947-0400 m31100| 2015-07-09T14:14:32.946-0400 I INDEX [conn58] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.948-0400 m31100| 2015-07-09T14:14:32.947-0400 I COMMAND [conn36] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.949-0400 m31200| 2015-07-09T14:14:32.949-0400 I INDEX [conn19] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.958-0400 m31100| 2015-07-09T14:14:32.957-0400 I INDEX [conn47] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.959-0400 m31201| 2015-07-09T14:14:32.957-0400 I INDEX [repl writer worker 13] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.959-0400 m31100| 2015-07-09T14:14:32.957-0400 I INDEX [conn47] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.960-0400 m31201| 2015-07-09T14:14:32.957-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.960-0400 m31101| 2015-07-09T14:14:32.958-0400 I INDEX [repl writer worker 14] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.960-0400 m31101| 2015-07-09T14:14:32.958-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.961-0400 m31200| 2015-07-09T14:14:32.958-0400 I INDEX [conn80] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.961-0400 m31200| 2015-07-09T14:14:32.958-0400 I INDEX [conn80] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.962-0400 m31102| 2015-07-09T14:14:32.958-0400 I INDEX [repl writer worker 3] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.962-0400 m31102| 2015-07-09T14:14:32.958-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.963-0400 m31202| 2015-07-09T14:14:32.961-0400 I INDEX [repl writer worker 3] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.963-0400 m31202| 2015-07-09T14:14:32.961-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.970-0400 m31100| 2015-07-09T14:14:32.968-0400 I INDEX [conn47] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.970-0400 m31201| 2015-07-09T14:14:32.968-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.970-0400 m31102| 2015-07-09T14:14:32.968-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.971-0400 m31200| 2015-07-09T14:14:32.968-0400 I INDEX [conn80] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.971-0400 m31101| 2015-07-09T14:14:32.968-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.972-0400 m31201| 2015-07-09T14:14:32.971-0400 I COMMAND [repl writer worker 0] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.973-0400 m31202| 2015-07-09T14:14:32.972-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.973-0400 m31101| 2015-07-09T14:14:32.973-0400 I COMMAND [repl writer worker 12] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.977-0400 m31102| 2015-07-09T14:14:32.976-0400 I COMMAND [repl writer worker 14] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.979-0400 m31100| 2015-07-09T14:14:32.977-0400 I INDEX [conn49] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.979-0400 m31100| 2015-07-09T14:14:32.977-0400 I INDEX [conn49] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.980-0400 m31100| 2015-07-09T14:14:32.979-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63686 #169 (100 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.981-0400 m31201| 2015-07-09T14:14:32.981-0400 I INDEX [repl writer worker 2] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.982-0400 m31201| 2015-07-09T14:14:32.981-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.984-0400 m31200| 2015-07-09T14:14:32.983-0400 I INDEX [conn52] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.985-0400 m31200| 2015-07-09T14:14:32.984-0400 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.987-0400 m31101| 2015-07-09T14:14:32.986-0400 I INDEX [repl writer worker 0] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.987-0400 m31202| 2015-07-09T14:14:32.986-0400 I INDEX [repl writer worker 9] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.988-0400 m31202| 2015-07-09T14:14:32.987-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.988-0400 m31101| 2015-07-09T14:14:32.987-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.990-0400 m31100| 2015-07-09T14:14:32.989-0400 I INDEX [conn49] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.990-0400 m31102| 2015-07-09T14:14:32.990-0400 I INDEX [repl writer worker 0] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.994-0400 m31102| 2015-07-09T14:14:32.990-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.995-0400 m31100| 2015-07-09T14:14:32.990-0400 I COMMAND [conn49] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo9: 1.0 }, name: "foo9_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 85504 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 106ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.996-0400 m31200| 2015-07-09T14:14:32.992-0400 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.996-0400 m31200| 2015-07-09T14:14:32.994-0400 I COMMAND [conn52] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo7: 1.0 }, name: "foo7_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 86690 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 108ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:32.997-0400 m31201| 2015-07-09T14:14:32.994-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.000-0400 m31101| 2015-07-09T14:14:32.999-0400 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.003-0400 m31202| 2015-07-09T14:14:33.003-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.004-0400 m31100| 2015-07-09T14:14:33.003-0400 I INDEX [conn45] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.004-0400 m31100| 2015-07-09T14:14:33.003-0400 I INDEX [conn45] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.009-0400 m31102| 2015-07-09T14:14:33.009-0400 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.010-0400 m31201| 2015-07-09T14:14:33.009-0400 I INDEX [repl writer worker 8] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.011-0400 m31201| 2015-07-09T14:14:33.009-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.015-0400 m31200| 2015-07-09T14:14:33.014-0400 I INDEX [conn30] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.015-0400 m31200| 2015-07-09T14:14:33.014-0400 I INDEX [conn30] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.016-0400 m31202| 2015-07-09T14:14:33.015-0400 I INDEX [repl writer worker 7] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.017-0400 m31202| 2015-07-09T14:14:33.015-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.017-0400 m31100| 2015-07-09T14:14:33.015-0400 I INDEX [conn45] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.018-0400 m31100| 2015-07-09T14:14:33.016-0400 I COMMAND [conn45] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo6: 1.0 }, name: "foo6_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 103231 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 128ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.020-0400 m31101| 2015-07-09T14:14:33.020-0400 I INDEX [repl writer worker 4] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.020-0400 m31101| 2015-07-09T14:14:33.020-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.023-0400 m31201| 2015-07-09T14:14:33.021-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.027-0400 m31102| 2015-07-09T14:14:33.026-0400 I INDEX [repl writer worker 11] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.028-0400 m31102| 2015-07-09T14:14:33.026-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.036-0400 m31202| 2015-07-09T14:14:33.035-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.036-0400 m31200| 2015-07-09T14:14:33.035-0400 I INDEX [conn30] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.036-0400 m31100| 2015-07-09T14:14:33.035-0400 I INDEX [conn168] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.037-0400 m31100| 2015-07-09T14:14:33.035-0400 I INDEX [conn168] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.038-0400 m31101| 2015-07-09T14:14:33.036-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.038-0400 m31200| 2015-07-09T14:14:33.036-0400 I COMMAND [conn30] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo6: 1.0 }, name: "foo6_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 105759 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 148ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.041-0400 m31102| 2015-07-09T14:14:33.041-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.042-0400 m31201| 2015-07-09T14:14:33.041-0400 I INDEX [repl writer worker 10] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.042-0400 m31201| 2015-07-09T14:14:33.041-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.050-0400 m31100| 2015-07-09T14:14:33.049-0400 I INDEX [conn168] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.052-0400 m31100| 2015-07-09T14:14:33.051-0400 I COMMAND [conn168] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo7: 1.0 }, name: "foo7_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 128582 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 162ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.053-0400 m31200| 2015-07-09T14:14:33.050-0400 I INDEX [conn137] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.053-0400 m31200| 2015-07-09T14:14:33.050-0400 I INDEX [conn137] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.054-0400 m31101| 2015-07-09T14:14:33.052-0400 I INDEX [repl writer worker 3] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.054-0400 m31101| 2015-07-09T14:14:33.052-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.055-0400 m31202| 2015-07-09T14:14:33.053-0400 I INDEX [repl writer worker 15] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.055-0400 m31202| 2015-07-09T14:14:33.053-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.057-0400 m31201| 2015-07-09T14:14:33.054-0400 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.059-0400 m31100| 2015-07-09T14:14:33.058-0400 I INDEX [conn57] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.060-0400 m31100| 2015-07-09T14:14:33.058-0400 I INDEX [conn57] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.061-0400 m31102| 2015-07-09T14:14:33.061-0400 I INDEX [repl writer worker 1] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.062-0400 m31102| 2015-07-09T14:14:33.061-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.069-0400 m31200| 2015-07-09T14:14:33.068-0400 I INDEX [conn137] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.071-0400 m31200| 2015-07-09T14:14:33.070-0400 I COMMAND [conn137] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo0: 1.0 }, name: "foo0_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 146382 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 180ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.072-0400 m31101| 2015-07-09T14:14:33.072-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.075-0400 m31102| 2015-07-09T14:14:33.074-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.076-0400 m31202| 2015-07-09T14:14:33.074-0400 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.077-0400 m31201| 2015-07-09T14:14:33.075-0400 I INDEX [repl writer worker 9] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.078-0400 m31201| 2015-07-09T14:14:33.075-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.079-0400 m31100| 2015-07-09T14:14:33.078-0400 I INDEX [conn57] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.080-0400 m31100| 2015-07-09T14:14:33.079-0400 I COMMAND [conn57] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo0: 1.0 }, name: "foo0_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 160384 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 188ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.083-0400 m31101| 2015-07-09T14:14:33.083-0400 I INDEX [repl writer worker 7] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.084-0400 m31101| 2015-07-09T14:14:33.083-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.090-0400 m31102| 2015-07-09T14:14:33.089-0400 I INDEX [repl writer worker 10] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.090-0400 m31102| 2015-07-09T14:14:33.089-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.090-0400 m31202| 2015-07-09T14:14:33.089-0400 I INDEX [repl writer worker 12] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.090-0400 m31202| 2015-07-09T14:14:33.089-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.091-0400 m31200| 2015-07-09T14:14:33.089-0400 I INDEX [conn81] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.091-0400 m31200| 2015-07-09T14:14:33.089-0400 I INDEX [conn81] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.095-0400 m31100| 2015-07-09T14:14:33.094-0400 I INDEX [conn56] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.095-0400 m31100| 2015-07-09T14:14:33.094-0400 I INDEX [conn56] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.096-0400 m31201| 2015-07-09T14:14:33.094-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.098-0400 m31101| 2015-07-09T14:14:33.097-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.101-0400 m31200| 2015-07-09T14:14:33.101-0400 I INDEX [conn81] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.101-0400 m31202| 2015-07-09T14:14:33.101-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.103-0400 m31200| 2015-07-09T14:14:33.103-0400 I COMMAND [conn81] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo4: 1.0 }, name: "foo4_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 178893 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 211ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.103-0400 m31200| 2015-07-09T14:14:33.103-0400 I COMMAND [conn65] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.106-0400 m31102| 2015-07-09T14:14:33.104-0400 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.107-0400 m31200| 2015-07-09T14:14:33.104-0400 I COMMAND [conn65] command db49.coll49 command: dropIndexes { deleteIndexes: "coll49", index: { foo5: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:114 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 189136 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 190ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.115-0400 m31100| 2015-07-09T14:14:33.114-0400 I INDEX [conn56] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.116-0400 m31202| 2015-07-09T14:14:33.115-0400 I INDEX [repl writer worker 14] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.116-0400 m31202| 2015-07-09T14:14:33.115-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.118-0400 m31100| 2015-07-09T14:14:33.115-0400 I COMMAND [conn56] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo4: 1.0 }, name: "foo4_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 187360 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 223ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.120-0400 m31100| 2015-07-09T14:14:33.115-0400 I COMMAND [conn35] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.121-0400 m31201| 2015-07-09T14:14:33.119-0400 I INDEX [repl writer worker 5] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.121-0400 m31201| 2015-07-09T14:14:33.119-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.122-0400 m31101| 2015-07-09T14:14:33.119-0400 I INDEX [repl writer worker 13] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.122-0400 m30998| 2015-07-09T14:14:33.119-0400 I SHARDING [conn304] sharded connection to test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.122-0400 m31101| 2015-07-09T14:14:33.119-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.123-0400 m30998| 2015-07-09T14:14:33.120-0400 I SHARDING [conn304] retrying command: { listIndexes: "coll49", cursor: { batchSize: 2.0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.123-0400 m31102| 2015-07-09T14:14:33.119-0400 I INDEX [repl writer worker 2] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.123-0400 m31102| 2015-07-09T14:14:33.119-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.126-0400 m31100| 2015-07-09T14:14:33.119-0400 I COMMAND [conn35] command db49.coll49 command: dropIndexes { deleteIndexes: "coll49", index: { foo5: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:114 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 202001 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 205ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.126-0400 m31100| 2015-07-09T14:14:33.120-0400 I NETWORK [conn168] end connection 127.0.0.1:63685 (99 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.127-0400 m31100| 2015-07-09T14:14:33.121-0400 I COMMAND [conn47] command db49.coll49 command: listIndexes { listIndexes: "coll49", cursor: { batchSize: 2.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:312 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 148552 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 150ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.127-0400 m31200| 2015-07-09T14:14:33.122-0400 I INDEX [conn19] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.128-0400 m31200| 2015-07-09T14:14:33.123-0400 I INDEX [conn19] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.131-0400 m31100| 2015-07-09T14:14:33.124-0400 I COMMAND [conn49] command db49.coll49 command: listIndexes { listIndexes: "coll49", cursor: { batchSize: 2.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:312 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 127478 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 129ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.132-0400 m31100| 2015-07-09T14:14:33.124-0400 I COMMAND [conn58] command db49.coll49 command: listIndexes { listIndexes: "coll49", cursor: { batchSize: 2.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:312 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 173486 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 176ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.132-0400 m31202| 2015-07-09T14:14:33.128-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.132-0400 m31202| 2015-07-09T14:14:33.130-0400 I COMMAND [repl writer worker 8] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.133-0400 m31101| 2015-07-09T14:14:33.133-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.135-0400 m31102| 2015-07-09T14:14:33.133-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.141-0400 m31201| 2015-07-09T14:14:33.140-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.145-0400 m31100| 2015-07-09T14:14:33.143-0400 I INDEX [conn169] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.145-0400 m31100| 2015-07-09T14:14:33.144-0400 I INDEX [conn169] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.146-0400 m31102| 2015-07-09T14:14:33.146-0400 I INDEX [repl writer worker 4] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.147-0400 m31102| 2015-07-09T14:14:33.146-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.148-0400 m31200| 2015-07-09T14:14:33.147-0400 I INDEX [conn19] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.150-0400 m31200| 2015-07-09T14:14:33.149-0400 I COMMAND [conn19] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo8: 1.0 }, name: "foo8_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 120545 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 165ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.150-0400 m31101| 2015-07-09T14:14:33.149-0400 I INDEX [repl writer worker 8] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.150-0400 m31101| 2015-07-09T14:14:33.149-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.159-0400 m31102| 2015-07-09T14:14:33.158-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.161-0400 m31201| 2015-07-09T14:14:33.161-0400 I INDEX [repl writer worker 3] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.162-0400 m31201| 2015-07-09T14:14:33.161-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.163-0400 m31200| 2015-07-09T14:14:33.162-0400 I INDEX [conn80] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.164-0400 m31200| 2015-07-09T14:14:33.162-0400 I INDEX [conn80] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.164-0400 m31100| 2015-07-09T14:14:33.162-0400 I INDEX [conn169] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.166-0400 m31100| 2015-07-09T14:14:33.163-0400 I COMMAND [conn169] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo8: 1.0 }, name: "foo8_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 140234 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 179ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.166-0400 m31102| 2015-07-09T14:14:33.164-0400 I COMMAND [repl writer worker 5] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.174-0400 m31202| 2015-07-09T14:14:33.173-0400 I INDEX [repl writer worker 11] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.174-0400 m31202| 2015-07-09T14:14:33.173-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.174-0400 m31101| 2015-07-09T14:14:33.173-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.175-0400 m31201| 2015-07-09T14:14:33.174-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.176-0400 m31101| 2015-07-09T14:14:33.175-0400 I COMMAND [repl writer worker 15] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.178-0400 m31201| 2015-07-09T14:14:33.177-0400 I COMMAND [repl writer worker 15] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.178-0400 m31200| 2015-07-09T14:14:33.177-0400 I INDEX [conn80] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.179-0400 m31200| 2015-07-09T14:14:33.178-0400 I COMMAND [conn63] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.181-0400 m31200| 2015-07-09T14:14:33.178-0400 I COMMAND [conn80] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo1: 1.0 }, name: "foo1_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 99335 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 127ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.182-0400 m31102| 2015-07-09T14:14:33.180-0400 I INDEX [repl writer worker 9] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.182-0400 m31102| 2015-07-09T14:14:33.180-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.183-0400 m31100| 2015-07-09T14:14:33.180-0400 I INDEX [conn73] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.184-0400 m31100| 2015-07-09T14:14:33.180-0400 I INDEX [conn73] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.184-0400 m31200| 2015-07-09T14:14:33.181-0400 I COMMAND [conn64] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.184-0400 m31202| 2015-07-09T14:14:33.182-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.185-0400 m31200| 2015-07-09T14:14:33.182-0400 I COMMAND [conn65] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.187-0400 m31102| 2015-07-09T14:14:33.187-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.188-0400 m31100| 2015-07-09T14:14:33.187-0400 I INDEX [conn73] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.189-0400 m31101| 2015-07-09T14:14:33.187-0400 I INDEX [repl writer worker 9] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.189-0400 m31101| 2015-07-09T14:14:33.187-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.190-0400 m31100| 2015-07-09T14:14:33.189-0400 I COMMAND [conn37] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.190-0400 m31100| 2015-07-09T14:14:33.189-0400 I COMMAND [conn73] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo1: 1.0 }, name: "foo1_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 112932 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 139ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.194-0400 m31201| 2015-07-09T14:14:33.193-0400 I INDEX [repl writer worker 6] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.195-0400 m31201| 2015-07-09T14:14:33.194-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.195-0400 m30999| 2015-07-09T14:14:33.194-0400 I SHARDING [conn306] sharded connection to test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.195-0400 m30999| 2015-07-09T14:14:33.194-0400 I SHARDING [conn306] retrying command: { listIndexes: "coll49", cursor: { batchSize: 2.0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.195-0400 m31100| 2015-07-09T14:14:33.194-0400 I NETWORK [conn169] end connection 127.0.0.1:63686 (98 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.196-0400 m31200| 2015-07-09T14:14:33.196-0400 I COMMAND [conn62] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.197-0400 m31202| 2015-07-09T14:14:33.196-0400 I INDEX [repl writer worker 6] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.197-0400 m31202| 2015-07-09T14:14:33.196-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.199-0400 m31100| 2015-07-09T14:14:33.199-0400 I COMMAND [conn34] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.202-0400 m31200| 2015-07-09T14:14:33.202-0400 I COMMAND [conn34] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.203-0400 m31102| 2015-07-09T14:14:33.203-0400 I INDEX [repl writer worker 8] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.203-0400 m31102| 2015-07-09T14:14:33.203-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.206-0400 m31101| 2015-07-09T14:14:33.204-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.206-0400 m31100| 2015-07-09T14:14:33.205-0400 I COMMAND [conn35] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.208-0400 m31201| 2015-07-09T14:14:33.207-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.210-0400 m31200| 2015-07-09T14:14:33.209-0400 I COMMAND [conn47] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.210-0400 m31100| 2015-07-09T14:14:33.209-0400 I COMMAND [conn36] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.211-0400 m31102| 2015-07-09T14:14:33.209-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.213-0400 m31102| 2015-07-09T14:14:33.212-0400 I COMMAND [repl writer worker 15] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.214-0400 m31100| 2015-07-09T14:14:33.213-0400 I COMMAND [conn32] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.217-0400 m31200| 2015-07-09T14:14:33.216-0400 I COMMAND [conn63] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.217-0400 m31202| 2015-07-09T14:14:33.216-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.218-0400 m31102| 2015-07-09T14:14:33.217-0400 I COMMAND [repl writer worker 7] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.219-0400 m31100| 2015-07-09T14:14:33.219-0400 I COMMAND [conn37] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.219-0400 m31101| 2015-07-09T14:14:33.219-0400 I INDEX [repl writer worker 5] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.219-0400 m31101| 2015-07-09T14:14:33.219-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.219-0400 m31200| 2015-07-09T14:14:33.219-0400 I COMMAND [conn64] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.222-0400 m31102| 2015-07-09T14:14:33.222-0400 I COMMAND [repl writer worker 12] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.223-0400 m31202| 2015-07-09T14:14:33.222-0400 I COMMAND [repl writer worker 10] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.223-0400 m31201| 2015-07-09T14:14:33.223-0400 I INDEX [repl writer worker 1] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.224-0400 m31201| 2015-07-09T14:14:33.223-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.224-0400 m31100| 2015-07-09T14:14:33.223-0400 I COMMAND [conn39] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.224-0400 m31100| 2015-07-09T14:14:33.224-0400 I COMMAND [conn34] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.226-0400 m31101| 2015-07-09T14:14:33.225-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.227-0400 m31101| 2015-07-09T14:14:33.226-0400 I COMMAND [repl writer worker 2] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.228-0400 m31202| 2015-07-09T14:14:33.227-0400 I COMMAND [repl writer worker 5] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.228-0400 m31102| 2015-07-09T14:14:33.227-0400 I COMMAND [repl writer worker 13] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.231-0400 m31200| 2015-07-09T14:14:33.228-0400 I INDEX [conn52] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.231-0400 m31200| 2015-07-09T14:14:33.229-0400 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.231-0400 m31102| 2015-07-09T14:14:33.229-0400 I COMMAND [repl writer worker 6] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.231-0400 m31102| 2015-07-09T14:14:33.231-0400 I COMMAND [repl writer worker 3] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.232-0400 m31101| 2015-07-09T14:14:33.231-0400 I COMMAND [repl writer worker 11] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.233-0400 m31201| 2015-07-09T14:14:33.232-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.233-0400 m31202| 2015-07-09T14:14:33.232-0400 I COMMAND [repl writer worker 1] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.234-0400 m31102| 2015-07-09T14:14:33.233-0400 I COMMAND [repl writer worker 14] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.234-0400 m31201| 2015-07-09T14:14:33.234-0400 I COMMAND [repl writer worker 7] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.234-0400 m31101| 2015-07-09T14:14:33.234-0400 I COMMAND [repl writer worker 1] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.236-0400 m31100| 2015-07-09T14:14:33.235-0400 I INDEX [conn73] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.236-0400 m31100| 2015-07-09T14:14:33.235-0400 I INDEX [conn73] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.237-0400 m31201| 2015-07-09T14:14:33.236-0400 I COMMAND [repl writer worker 4] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.237-0400 m31102| 2015-07-09T14:14:33.236-0400 I COMMAND [repl writer worker 0] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.238-0400 m31202| 2015-07-09T14:14:33.237-0400 I COMMAND [repl writer worker 0] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.240-0400 m31200| 2015-07-09T14:14:33.239-0400 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.240-0400 m31202| 2015-07-09T14:14:33.240-0400 I COMMAND [repl writer worker 13] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.240-0400 m31101| 2015-07-09T14:14:33.240-0400 I COMMAND [repl writer worker 10] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.241-0400 m31201| 2015-07-09T14:14:33.240-0400 I COMMAND [repl writer worker 14] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.241-0400 m31202| 2015-07-09T14:14:33.241-0400 I COMMAND [repl writer worker 4] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.242-0400 m31201| 2015-07-09T14:14:33.241-0400 I COMMAND [repl writer worker 12] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.243-0400 m31201| 2015-07-09T14:14:33.242-0400 I COMMAND [repl writer worker 11] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.243-0400 m31202| 2015-07-09T14:14:33.243-0400 I COMMAND [repl writer worker 2] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.244-0400 m31202| 2015-07-09T14:14:33.244-0400 I COMMAND [repl writer worker 3] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.245-0400 m31101| 2015-07-09T14:14:33.245-0400 I COMMAND [repl writer worker 6] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.246-0400 m31201| 2015-07-09T14:14:33.245-0400 I COMMAND [repl writer worker 13] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.246-0400 m31100| 2015-07-09T14:14:33.246-0400 I INDEX [conn73] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.247-0400 m31101| 2015-07-09T14:14:33.247-0400 I COMMAND [repl writer worker 14] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.247-0400 m31201| 2015-07-09T14:14:33.247-0400 I COMMAND [repl writer worker 0] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.249-0400 m31202| 2015-07-09T14:14:33.248-0400 I INDEX [repl writer worker 9] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.249-0400 m31202| 2015-07-09T14:14:33.248-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.249-0400 m31200| 2015-07-09T14:14:33.248-0400 I COMMAND [conn64] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.249-0400 m31100| 2015-07-09T14:14:33.248-0400 I COMMAND [conn34] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.251-0400 m31101| 2015-07-09T14:14:33.249-0400 I COMMAND [repl writer worker 12] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.252-0400 m31100| 2015-07-09T14:14:33.249-0400 I COMMAND [conn39] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.252-0400 m31200| 2015-07-09T14:14:33.250-0400 I COMMAND [conn47] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.252-0400 m31101| 2015-07-09T14:14:33.251-0400 I COMMAND [repl writer worker 0] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.253-0400 m31201| 2015-07-09T14:14:33.252-0400 I COMMAND [repl writer worker 2] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.253-0400 m31202| 2015-07-09T14:14:33.253-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.255-0400 m31102| 2015-07-09T14:14:33.253-0400 I INDEX [repl writer worker 11] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.255-0400 m31102| 2015-07-09T14:14:33.253-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.256-0400 m31202| 2015-07-09T14:14:33.255-0400 I COMMAND [repl writer worker 7] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.261-0400 m31101| 2015-07-09T14:14:33.261-0400 I INDEX [repl writer worker 4] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.262-0400 m31101| 2015-07-09T14:14:33.261-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.262-0400 m31202| 2015-07-09T14:14:33.261-0400 I COMMAND [repl writer worker 15] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.262-0400 m31201| 2015-07-09T14:14:33.262-0400 I INDEX [repl writer worker 8] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.263-0400 m31201| 2015-07-09T14:14:33.262-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.264-0400 m31102| 2015-07-09T14:14:33.263-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.264-0400 m31102| 2015-07-09T14:14:33.264-0400 I COMMAND [repl writer worker 1] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.267-0400 m31102| 2015-07-09T14:14:33.266-0400 I COMMAND [repl writer worker 10] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.268-0400 m31201| 2015-07-09T14:14:33.268-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.269-0400 m31201| 2015-07-09T14:14:33.269-0400 I COMMAND [repl writer worker 10] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.270-0400 m31201| 2015-07-09T14:14:33.270-0400 I COMMAND [repl writer worker 9] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.271-0400 m31101| 2015-07-09T14:14:33.271-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.272-0400 m31101| 2015-07-09T14:14:33.272-0400 I COMMAND [repl writer worker 3] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.273-0400 m31101| 2015-07-09T14:14:33.273-0400 I COMMAND [repl writer worker 7] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.302-0400 m31100| 2015-07-09T14:14:33.302-0400 I INDEX [conn47] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.303-0400 m31200| 2015-07-09T14:14:33.301-0400 I INDEX [conn19] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.303-0400 m31100| 2015-07-09T14:14:33.302-0400 I INDEX [conn47] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.304-0400 m31200| 2015-07-09T14:14:33.302-0400 I INDEX [conn19] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.309-0400 m31200| 2015-07-09T14:14:33.309-0400 I INDEX [conn19] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.318-0400 m31200| 2015-07-09T14:14:33.317-0400 I INDEX [conn81] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.318-0400 m31100| 2015-07-09T14:14:33.317-0400 I INDEX [conn47] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.319-0400 m31200| 2015-07-09T14:14:33.317-0400 I INDEX [conn81] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.322-0400 m31201| 2015-07-09T14:14:33.322-0400 I INDEX [repl writer worker 5] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.323-0400 m31201| 2015-07-09T14:14:33.322-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.323-0400 m31202| 2015-07-09T14:14:33.322-0400 I INDEX [repl writer worker 12] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.324-0400 m31202| 2015-07-09T14:14:33.322-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.334-0400 m31200| 2015-07-09T14:14:33.333-0400 I INDEX [conn81] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.334-0400 m31100| 2015-07-09T14:14:33.333-0400 I INDEX [conn57] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.335-0400 m31100| 2015-07-09T14:14:33.334-0400 I INDEX [conn57] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.341-0400 m31202| 2015-07-09T14:14:33.340-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.341-0400 m31101| 2015-07-09T14:14:33.341-0400 I INDEX [repl writer worker 13] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.342-0400 m31102| 2015-07-09T14:14:33.341-0400 I INDEX [repl writer worker 2] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.342-0400 m31101| 2015-07-09T14:14:33.341-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.343-0400 m31102| 2015-07-09T14:14:33.341-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.350-0400 m31100| 2015-07-09T14:14:33.348-0400 I INDEX [conn57] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.350-0400 m31201| 2015-07-09T14:14:33.348-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.353-0400 m31100| 2015-07-09T14:14:33.352-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63687 #170 (99 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.354-0400 m31200| 2015-07-09T14:14:33.354-0400 I INDEX [conn52] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.355-0400 m31200| 2015-07-09T14:14:33.354-0400 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.355-0400 m31100| 2015-07-09T14:14:33.355-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63688 #171 (100 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.358-0400 m31101| 2015-07-09T14:14:33.357-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.364-0400 m31102| 2015-07-09T14:14:33.363-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.365-0400 m31202| 2015-07-09T14:14:33.363-0400 I INDEX [repl writer worker 14] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.366-0400 m31202| 2015-07-09T14:14:33.363-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.372-0400 m31201| 2015-07-09T14:14:33.371-0400 I INDEX [repl writer worker 3] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.372-0400 m31100| 2015-07-09T14:14:33.371-0400 I INDEX [conn73] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.372-0400 m31201| 2015-07-09T14:14:33.371-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.373-0400 m31100| 2015-07-09T14:14:33.371-0400 I INDEX [conn73] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.374-0400 m31101| 2015-07-09T14:14:33.373-0400 I INDEX [repl writer worker 8] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.374-0400 m31101| 2015-07-09T14:14:33.373-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.378-0400 m31202| 2015-07-09T14:14:33.377-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.379-0400 m31200| 2015-07-09T14:14:33.378-0400 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.382-0400 m31201| 2015-07-09T14:14:33.381-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.382-0400 m31102| 2015-07-09T14:14:33.382-0400 I INDEX [repl writer worker 4] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.382-0400 m31102| 2015-07-09T14:14:33.382-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.386-0400 m31100| 2015-07-09T14:14:33.386-0400 I INDEX [conn73] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.387-0400 m31101| 2015-07-09T14:14:33.386-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.397-0400 m31102| 2015-07-09T14:14:33.394-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.398-0400 m31200| 2015-07-09T14:14:33.394-0400 I INDEX [conn80] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.398-0400 m31200| 2015-07-09T14:14:33.395-0400 I INDEX [conn80] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.398-0400 m31202| 2015-07-09T14:14:33.396-0400 I INDEX [repl writer worker 8] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.398-0400 m31202| 2015-07-09T14:14:33.396-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.398-0400 m31201| 2015-07-09T14:14:33.397-0400 I INDEX [repl writer worker 15] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.399-0400 m31201| 2015-07-09T14:14:33.397-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.399-0400 m31101| 2015-07-09T14:14:33.398-0400 I INDEX [repl writer worker 15] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.399-0400 m31101| 2015-07-09T14:14:33.398-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.403-0400 m31100| 2015-07-09T14:14:33.402-0400 I INDEX [conn58] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.403-0400 m31100| 2015-07-09T14:14:33.402-0400 I INDEX [conn58] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.406-0400 m31200| 2015-07-09T14:14:33.405-0400 I INDEX [conn80] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.410-0400 m31202| 2015-07-09T14:14:33.410-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.413-0400 m31201| 2015-07-09T14:14:33.411-0400 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.414-0400 m31101| 2015-07-09T14:14:33.413-0400 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.419-0400 m31200| 2015-07-09T14:14:33.417-0400 I INDEX [conn38] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.419-0400 m31200| 2015-07-09T14:14:33.417-0400 I INDEX [conn38] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.420-0400 m31100| 2015-07-09T14:14:33.417-0400 I INDEX [conn58] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.421-0400 m31202| 2015-07-09T14:14:33.420-0400 I INDEX [repl writer worker 11] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.422-0400 m31202| 2015-07-09T14:14:33.420-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.422-0400 m31102| 2015-07-09T14:14:33.421-0400 I INDEX [repl writer worker 5] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.423-0400 m31102| 2015-07-09T14:14:33.421-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.423-0400 m31201| 2015-07-09T14:14:33.423-0400 I INDEX [repl writer worker 6] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.423-0400 m31201| 2015-07-09T14:14:33.423-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.432-0400 m31101| 2015-07-09T14:14:33.432-0400 I INDEX [repl writer worker 9] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.432-0400 m31101| 2015-07-09T14:14:33.432-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.433-0400 m31102| 2015-07-09T14:14:33.433-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.435-0400 m31100| 2015-07-09T14:14:33.435-0400 I INDEX [conn50] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.435-0400 m31100| 2015-07-09T14:14:33.435-0400 I INDEX [conn50] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.436-0400 m31200| 2015-07-09T14:14:33.435-0400 I INDEX [conn38] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.436-0400 m31200| 2015-07-09T14:14:33.436-0400 I COMMAND [conn38] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo1: 1.0 }, name: "foo1_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 85535 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 115ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.437-0400 m31201| 2015-07-09T14:14:33.437-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.450-0400 m31100| 2015-07-09T14:14:33.449-0400 I INDEX [conn50] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.451-0400 m31101| 2015-07-09T14:14:33.449-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.451-0400 m31202| 2015-07-09T14:14:33.449-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.452-0400 m31102| 2015-07-09T14:14:33.449-0400 I INDEX [repl writer worker 9] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.452-0400 m31102| 2015-07-09T14:14:33.449-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.453-0400 m31200| 2015-07-09T14:14:33.450-0400 I INDEX [conn19] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.453-0400 m31200| 2015-07-09T14:14:33.450-0400 I INDEX [conn19] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.454-0400 m31100| 2015-07-09T14:14:33.451-0400 I COMMAND [conn50] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo1: 1.0 }, name: "foo1_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 98289 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 131ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.457-0400 m31202| 2015-07-09T14:14:33.457-0400 I INDEX [repl writer worker 6] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.458-0400 m31202| 2015-07-09T14:14:33.457-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.458-0400 m31201| 2015-07-09T14:14:33.457-0400 I INDEX [repl writer worker 1] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.458-0400 m31201| 2015-07-09T14:14:33.457-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.462-0400 m31102| 2015-07-09T14:14:33.461-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.462-0400 m31200| 2015-07-09T14:14:33.461-0400 I INDEX [conn19] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.464-0400 m31101| 2015-07-09T14:14:33.463-0400 I INDEX [repl writer worker 5] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.465-0400 m31101| 2015-07-09T14:14:33.463-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.465-0400 m31200| 2015-07-09T14:14:33.464-0400 I COMMAND [conn19] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo0: 1.0 }, name: "foo0_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 110412 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 138ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.466-0400 m31100| 2015-07-09T14:14:33.464-0400 I INDEX [conn45] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.466-0400 m31100| 2015-07-09T14:14:33.464-0400 I INDEX [conn45] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.468-0400 m31201| 2015-07-09T14:14:33.467-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.470-0400 m31102| 2015-07-09T14:14:33.469-0400 I INDEX [repl writer worker 8] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.470-0400 m31102| 2015-07-09T14:14:33.469-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.473-0400 m31202| 2015-07-09T14:14:33.472-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.474-0400 m31101| 2015-07-09T14:14:33.473-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.479-0400 m31100| 2015-07-09T14:14:33.478-0400 I INDEX [conn45] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.482-0400 m31100| 2015-07-09T14:14:33.479-0400 I COMMAND [conn45] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo0: 1.0 }, name: "foo0_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 126468 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 154ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.482-0400 m31102| 2015-07-09T14:14:33.481-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.485-0400 m31200| 2015-07-09T14:14:33.481-0400 I INDEX [conn60] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.486-0400 m31200| 2015-07-09T14:14:33.481-0400 I INDEX [conn60] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.486-0400 m31201| 2015-07-09T14:14:33.482-0400 I INDEX [repl writer worker 7] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.486-0400 m31201| 2015-07-09T14:14:33.482-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.486-0400 m31202| 2015-07-09T14:14:33.485-0400 I INDEX [repl writer worker 10] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.487-0400 m31202| 2015-07-09T14:14:33.485-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.492-0400 m31100| 2015-07-09T14:14:33.492-0400 I INDEX [conn49] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.494-0400 m31100| 2015-07-09T14:14:33.492-0400 I INDEX [conn49] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.496-0400 m31102| 2015-07-09T14:14:33.496-0400 I INDEX [repl writer worker 15] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.497-0400 m31102| 2015-07-09T14:14:33.496-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.501-0400 m31201| 2015-07-09T14:14:33.501-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.502-0400 m31101| 2015-07-09T14:14:33.501-0400 I INDEX [repl writer worker 2] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.502-0400 m31101| 2015-07-09T14:14:33.501-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.503-0400 m31200| 2015-07-09T14:14:33.501-0400 I INDEX [conn60] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.504-0400 m31202| 2015-07-09T14:14:33.502-0400 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.505-0400 m31200| 2015-07-09T14:14:33.503-0400 I COMMAND [conn60] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo3: 1.0 }, name: "foo3_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 137700 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 176ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.505-0400 m31100| 2015-07-09T14:14:33.505-0400 I INDEX [conn49] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.507-0400 m31100| 2015-07-09T14:14:33.505-0400 I COMMAND [conn49] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo3: 1.0 }, name: "foo3_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 152864 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 179ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.511-0400 m31101| 2015-07-09T14:14:33.511-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.514-0400 m31200| 2015-07-09T14:14:33.514-0400 I INDEX [conn137] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.514-0400 m31200| 2015-07-09T14:14:33.514-0400 I INDEX [conn137] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.515-0400 m31201| 2015-07-09T14:14:33.514-0400 I INDEX [repl writer worker 4] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.515-0400 m31201| 2015-07-09T14:14:33.514-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.518-0400 m31102| 2015-07-09T14:14:33.518-0400 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.520-0400 m31202| 2015-07-09T14:14:33.518-0400 I INDEX [repl writer worker 5] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.520-0400 m31100| 2015-07-09T14:14:33.518-0400 I INDEX [conn56] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.521-0400 m31202| 2015-07-09T14:14:33.518-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.521-0400 m31100| 2015-07-09T14:14:33.518-0400 I INDEX [conn56] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.525-0400 m31101| 2015-07-09T14:14:33.524-0400 I INDEX [repl writer worker 11] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.525-0400 m31101| 2015-07-09T14:14:33.524-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.527-0400 m31200| 2015-07-09T14:14:33.524-0400 I INDEX [conn137] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.528-0400 m31200| 2015-07-09T14:14:33.525-0400 I COMMAND [conn137] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo2: 1.0 }, name: "foo2_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 171381 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 193ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.533-0400 m31201| 2015-07-09T14:14:33.532-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.534-0400 m31202| 2015-07-09T14:14:33.533-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.534-0400 m31100| 2015-07-09T14:14:33.533-0400 I INDEX [conn56] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.534-0400 m31102| 2015-07-09T14:14:33.533-0400 I INDEX [repl writer worker 7] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.534-0400 m31102| 2015-07-09T14:14:33.534-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.538-0400 m31100| 2015-07-09T14:14:33.536-0400 I COMMAND [conn56] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo2: 1.0 }, name: "foo2_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 173953 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 204ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.542-0400 m31200| 2015-07-09T14:14:33.542-0400 I INDEX [conn81] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.542-0400 m31200| 2015-07-09T14:14:33.542-0400 I INDEX [conn81] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.544-0400 m31101| 2015-07-09T14:14:33.544-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.546-0400 m31201| 2015-07-09T14:14:33.546-0400 I INDEX [repl writer worker 14] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.546-0400 m31201| 2015-07-09T14:14:33.546-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.546-0400 m31102| 2015-07-09T14:14:33.546-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.551-0400 m31202| 2015-07-09T14:14:33.551-0400 I INDEX [repl writer worker 1] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.552-0400 m31202| 2015-07-09T14:14:33.551-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.555-0400 m31100| 2015-07-09T14:14:33.554-0400 I INDEX [conn170] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.555-0400 m31100| 2015-07-09T14:14:33.555-0400 I INDEX [conn170] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.562-0400 m31200| 2015-07-09T14:14:33.561-0400 I INDEX [conn81] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.563-0400 m31200| 2015-07-09T14:14:33.562-0400 I COMMAND [conn81] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo8: 1.0 }, name: "foo8_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 171214 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 207ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.564-0400 m31201| 2015-07-09T14:14:33.561-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.572-0400 m31102| 2015-07-09T14:14:33.570-0400 I INDEX [repl writer worker 12] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.575-0400 m31102| 2015-07-09T14:14:33.570-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.576-0400 m31101| 2015-07-09T14:14:33.570-0400 I INDEX [repl writer worker 1] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.576-0400 m31101| 2015-07-09T14:14:33.570-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.577-0400 m31100| 2015-07-09T14:14:33.572-0400 I INDEX [conn170] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.578-0400 m31100| 2015-07-09T14:14:33.572-0400 I COMMAND [conn170] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo8: 1.0 }, name: "foo8_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 180984 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 217ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.589-0400 m31202| 2015-07-09T14:14:33.587-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.589-0400 m31200| 2015-07-09T14:14:33.588-0400 I INDEX [conn28] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.589-0400 m31200| 2015-07-09T14:14:33.588-0400 I INDEX [conn28] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.590-0400 m31102| 2015-07-09T14:14:33.589-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.590-0400 m31201| 2015-07-09T14:14:33.589-0400 I INDEX [repl writer worker 12] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.590-0400 m31201| 2015-07-09T14:14:33.589-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.594-0400 m31101| 2015-07-09T14:14:33.594-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.594-0400 m31100| 2015-07-09T14:14:33.594-0400 I INDEX [conn171] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.595-0400 m31100| 2015-07-09T14:14:33.594-0400 I INDEX [conn171] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.598-0400 m31202| 2015-07-09T14:14:33.597-0400 I INDEX [repl writer worker 0] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.599-0400 m31202| 2015-07-09T14:14:33.597-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.603-0400 m31200| 2015-07-09T14:14:33.602-0400 I INDEX [conn28] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.603-0400 m31201| 2015-07-09T14:14:33.602-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.604-0400 m31101| 2015-07-09T14:14:33.602-0400 I INDEX [repl writer worker 10] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.604-0400 m31101| 2015-07-09T14:14:33.602-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.604-0400 m31102| 2015-07-09T14:14:33.602-0400 I INDEX [repl writer worker 13] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.604-0400 m31102| 2015-07-09T14:14:33.603-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.606-0400 m31200| 2015-07-09T14:14:33.604-0400 I COMMAND [conn28] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo5: 1.0 }, name: "foo5_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 205424 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 247ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.607-0400 m31200| 2015-07-09T14:14:33.604-0400 I COMMAND [conn64] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.610-0400 m31202| 2015-07-09T14:14:33.608-0400 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.611-0400 m31100| 2015-07-09T14:14:33.608-0400 I INDEX [conn171] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.611-0400 m31100| 2015-07-09T14:14:33.610-0400 I COMMAND [conn37] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.612-0400 m31100| 2015-07-09T14:14:33.610-0400 I COMMAND [conn171] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo5: 1.0 }, name: "foo5_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 215755 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 252ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.613-0400 m31200| 2015-07-09T14:14:33.608-0400 I COMMAND [conn64] command db49.coll49 command: dropIndexes { deleteIndexes: "coll49", index: { foo6: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:114 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 183172 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 187ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.613-0400 m31200| 2015-07-09T14:14:33.608-0400 I COMMAND [conn47] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.614-0400 m31101| 2015-07-09T14:14:33.611-0400 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.614-0400 m31200| 2015-07-09T14:14:33.613-0400 I COMMAND [conn34] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.615-0400 m31200| 2015-07-09T14:14:33.613-0400 I COMMAND [conn47] command db49.coll49 command: dropIndexes { deleteIndexes: "coll49", index: { foo9: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:114 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 187429 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 192ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.616-0400 m31200| 2015-07-09T14:14:33.614-0400 I COMMAND [conn34] command db49.coll49 command: dropIndexes { deleteIndexes: "coll49", index: { foo7: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:114 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 191966 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 193ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.616-0400 m31200| 2015-07-09T14:14:33.615-0400 I COMMAND [conn63] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.616-0400 m31100| 2015-07-09T14:14:33.614-0400 I COMMAND [conn39] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.617-0400 m31100| 2015-07-09T14:14:33.614-0400 I COMMAND [conn37] command db49.coll49 command: dropIndexes { deleteIndexes: "coll49", index: { foo6: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:114 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 189282 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 193ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.620-0400 m31102| 2015-07-09T14:14:33.619-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.620-0400 m31100| 2015-07-09T14:14:33.619-0400 I COMMAND [conn39] command db49.coll49 command: dropIndexes { deleteIndexes: "coll49", index: { foo9: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:114 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 193469 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 197ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.620-0400 m31100| 2015-07-09T14:14:33.619-0400 I COMMAND [conn32] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.621-0400 m31202| 2015-07-09T14:14:33.619-0400 I INDEX [repl writer worker 13] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.621-0400 m31202| 2015-07-09T14:14:33.619-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.625-0400 m31200| 2015-07-09T14:14:33.624-0400 I COMMAND [conn63] command db49.coll49 command: dropIndexes { deleteIndexes: "coll49", index: { foo0: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:114 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 132468 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 141ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.625-0400 m31200| 2015-07-09T14:14:33.624-0400 I COMMAND [conn62] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.628-0400 m31100| 2015-07-09T14:14:33.625-0400 I COMMAND [conn32] command db49.coll49 command: dropIndexes { deleteIndexes: "coll49", index: { foo7: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:114 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 197641 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 204ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.629-0400 m31100| 2015-07-09T14:14:33.626-0400 I COMMAND [conn47] command db49.coll49 command: listIndexes { listIndexes: "coll49", cursor: { batchSize: 2.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:312 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 203459 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 203ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.630-0400 m31100| 2015-07-09T14:14:33.626-0400 I COMMAND [conn49] command db49.coll49 command: listIndexes { listIndexes: "coll49", cursor: { batchSize: 2.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:312 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 119060 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 119ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.630-0400 m31100| 2015-07-09T14:14:33.626-0400 I COMMAND [conn50] command db49.coll49 command: listIndexes { listIndexes: "coll49", cursor: { batchSize: 2.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:312 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 173431 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 173ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.631-0400 m31201| 2015-07-09T14:14:33.627-0400 I INDEX [repl writer worker 11] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.631-0400 m31201| 2015-07-09T14:14:33.627-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.632-0400 m30999| 2015-07-09T14:14:33.627-0400 I SHARDING [conn306] sharded connection to test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.633-0400 m30999| 2015-07-09T14:14:33.627-0400 I SHARDING [conn306] retrying command: { listIndexes: "coll49", cursor: { batchSize: 2.0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.634-0400 m31100| 2015-07-09T14:14:33.628-0400 I COMMAND [conn34] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.634-0400 m31100| 2015-07-09T14:14:33.629-0400 I NETWORK [conn170] end connection 127.0.0.1:63687 (99 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.634-0400 m31101| 2015-07-09T14:14:33.628-0400 I INDEX [repl writer worker 6] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.634-0400 m31101| 2015-07-09T14:14:33.629-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.634-0400 m31200| 2015-07-09T14:14:33.630-0400 I COMMAND [conn34] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.634-0400 m31102| 2015-07-09T14:14:33.633-0400 I INDEX [repl writer worker 6] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.635-0400 m31102| 2015-07-09T14:14:33.633-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.638-0400 m31201| 2015-07-09T14:14:33.637-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.639-0400 m31200| 2015-07-09T14:14:33.638-0400 I COMMAND [conn64] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.639-0400 m31201| 2015-07-09T14:14:33.638-0400 I COMMAND [repl writer worker 13] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.643-0400 m31100| 2015-07-09T14:14:33.639-0400 I COMMAND [conn34] command db49.coll49 command: dropIndexes { deleteIndexes: "coll49", index: { foo0: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:114 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 146030 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 156ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.643-0400 m31100| 2015-07-09T14:14:33.639-0400 I COMMAND [conn36] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.643-0400 m31202| 2015-07-09T14:14:33.639-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.644-0400 m31202| 2015-07-09T14:14:33.640-0400 I COMMAND [repl writer worker 4] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.645-0400 m31200| 2015-07-09T14:14:33.643-0400 I COMMAND [conn47] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.647-0400 m31101| 2015-07-09T14:14:33.642-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.647-0400 m31100| 2015-07-09T14:14:33.643-0400 I COMMAND [conn39] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.647-0400 m31101| 2015-07-09T14:14:33.644-0400 I COMMAND [repl writer worker 14] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.648-0400 m31201| 2015-07-09T14:14:33.644-0400 I COMMAND [repl writer worker 0] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.648-0400 m31102| 2015-07-09T14:14:33.645-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.648-0400 m31100| 2015-07-09T14:14:33.646-0400 I COMMAND [conn15] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.648-0400 m31202| 2015-07-09T14:14:33.646-0400 I COMMAND [repl writer worker 2] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.648-0400 m31102| 2015-07-09T14:14:33.647-0400 I COMMAND [repl writer worker 3] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.648-0400 m31100| 2015-07-09T14:14:33.647-0400 I COMMAND [conn32] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.649-0400 m31200| 2015-07-09T14:14:33.647-0400 I COMMAND [conn64] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.649-0400 m31101| 2015-07-09T14:14:33.648-0400 I COMMAND [repl writer worker 12] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.649-0400 m31100| 2015-07-09T14:14:33.648-0400 I COMMAND [conn15] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.650-0400 m31201| 2015-07-09T14:14:33.649-0400 I COMMAND [repl writer worker 2] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.650-0400 m31101| 2015-07-09T14:14:33.650-0400 I COMMAND [repl writer worker 0] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.651-0400 m31202| 2015-07-09T14:14:33.650-0400 I COMMAND [repl writer worker 3] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.651-0400 m31102| 2015-07-09T14:14:33.651-0400 I COMMAND [repl writer worker 14] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.651-0400 m31101| 2015-07-09T14:14:33.651-0400 I COMMAND [repl writer worker 4] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.652-0400 m31201| 2015-07-09T14:14:33.651-0400 I COMMAND [repl writer worker 8] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.652-0400 m31202| 2015-07-09T14:14:33.652-0400 I COMMAND [repl writer worker 9] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.653-0400 m31101| 2015-07-09T14:14:33.653-0400 I COMMAND [repl writer worker 3] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.654-0400 m31202| 2015-07-09T14:14:33.653-0400 I COMMAND [repl writer worker 7] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.654-0400 m31102| 2015-07-09T14:14:33.653-0400 I COMMAND [repl writer worker 0] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.654-0400 m31201| 2015-07-09T14:14:33.654-0400 I COMMAND [repl writer worker 10] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.654-0400 m31101| 2015-07-09T14:14:33.654-0400 I COMMAND [repl writer worker 7] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.655-0400 m31102| 2015-07-09T14:14:33.655-0400 I COMMAND [repl writer worker 11] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.656-0400 m31102| 2015-07-09T14:14:33.655-0400 I COMMAND [repl writer worker 1] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.657-0400 m31201| 2015-07-09T14:14:33.656-0400 I COMMAND [repl writer worker 9] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.657-0400 m31101| 2015-07-09T14:14:33.656-0400 I COMMAND [repl writer worker 13] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.657-0400 m31202| 2015-07-09T14:14:33.656-0400 I COMMAND [repl writer worker 15] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.658-0400 m31200| 2015-07-09T14:14:33.657-0400 I COMMAND [conn64] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.658-0400 m31100| 2015-07-09T14:14:33.657-0400 I COMMAND [conn15] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.658-0400 m31202| 2015-07-09T14:14:33.658-0400 I COMMAND [repl writer worker 12] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.659-0400 m31102| 2015-07-09T14:14:33.658-0400 I COMMAND [repl writer worker 10] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.659-0400 m31201| 2015-07-09T14:14:33.659-0400 I COMMAND [repl writer worker 5] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.659-0400 m31101| 2015-07-09T14:14:33.659-0400 I COMMAND [repl writer worker 8] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.660-0400 m31102| 2015-07-09T14:14:33.659-0400 I COMMAND [repl writer worker 2] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.660-0400 m31202| 2015-07-09T14:14:33.660-0400 I COMMAND [repl writer worker 14] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.661-0400 m31101| 2015-07-09T14:14:33.660-0400 I COMMAND [repl writer worker 15] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.661-0400 m31201| 2015-07-09T14:14:33.661-0400 I COMMAND [repl writer worker 3] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.661-0400 m31102| 2015-07-09T14:14:33.661-0400 I COMMAND [repl writer worker 4] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.662-0400 m31101| 2015-07-09T14:14:33.662-0400 I COMMAND [repl writer worker 9] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.662-0400 m31202| 2015-07-09T14:14:33.662-0400 I COMMAND [repl writer worker 8] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.663-0400 m31201| 2015-07-09T14:14:33.662-0400 I COMMAND [repl writer worker 15] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.663-0400 m31102| 2015-07-09T14:14:33.663-0400 I COMMAND [repl writer worker 5] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.664-0400 m31201| 2015-07-09T14:14:33.663-0400 I COMMAND [repl writer worker 6] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.664-0400 m31202| 2015-07-09T14:14:33.663-0400 I COMMAND [repl writer worker 11] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.664-0400 m31102| 2015-07-09T14:14:33.664-0400 I COMMAND [repl writer worker 9] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.723-0400 m31200| 2015-07-09T14:14:33.722-0400 I INDEX [conn81] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.723-0400 m31100| 2015-07-09T14:14:33.722-0400 I INDEX [conn56] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.724-0400 m31200| 2015-07-09T14:14:33.722-0400 I INDEX [conn81] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.724-0400 m31100| 2015-07-09T14:14:33.722-0400 I INDEX [conn56] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.731-0400 m31100| 2015-07-09T14:14:33.730-0400 I INDEX [conn56] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.733-0400 m31200| 2015-07-09T14:14:33.732-0400 I INDEX [conn81] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.740-0400 m31101| 2015-07-09T14:14:33.739-0400 I INDEX [repl writer worker 5] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.740-0400 m31101| 2015-07-09T14:14:33.739-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.746-0400 m31202| 2015-07-09T14:14:33.745-0400 I INDEX [repl writer worker 6] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.746-0400 m31202| 2015-07-09T14:14:33.745-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.747-0400 m31200| 2015-07-09T14:14:33.745-0400 I INDEX [conn28] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.747-0400 m31200| 2015-07-09T14:14:33.745-0400 I INDEX [conn28] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.748-0400 m31100| 2015-07-09T14:14:33.748-0400 I INDEX [conn50] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.748-0400 m31100| 2015-07-09T14:14:33.748-0400 I INDEX [conn50] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.753-0400 m31101| 2015-07-09T14:14:33.753-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.753-0400 m31201| 2015-07-09T14:14:33.753-0400 I INDEX [repl writer worker 1] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.754-0400 m31201| 2015-07-09T14:14:33.753-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.762-0400 m31100| 2015-07-09T14:14:33.761-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63689 #172 (100 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.764-0400 m31202| 2015-07-09T14:14:33.764-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.765-0400 m31102| 2015-07-09T14:14:33.764-0400 I INDEX [repl writer worker 8] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.765-0400 m31102| 2015-07-09T14:14:33.764-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.767-0400 m31200| 2015-07-09T14:14:33.766-0400 I INDEX [conn28] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.768-0400 m31201| 2015-07-09T14:14:33.767-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.772-0400 m31102| 2015-07-09T14:14:33.772-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.775-0400 m31100| 2015-07-09T14:14:33.775-0400 I INDEX [conn50] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.776-0400 m31201| 2015-07-09T14:14:33.775-0400 I INDEX [repl writer worker 7] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.776-0400 m31201| 2015-07-09T14:14:33.775-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.781-0400 m31200| 2015-07-09T14:14:33.781-0400 I INDEX [conn60] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.781-0400 m31200| 2015-07-09T14:14:33.781-0400 I INDEX [conn60] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.782-0400 m31202| 2015-07-09T14:14:33.781-0400 I INDEX [repl writer worker 10] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.782-0400 m31202| 2015-07-09T14:14:33.781-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.790-0400 m31102| 2015-07-09T14:14:33.789-0400 I INDEX [repl writer worker 15] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.791-0400 m31102| 2015-07-09T14:14:33.789-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.791-0400 m31100| 2015-07-09T14:14:33.789-0400 I INDEX [conn49] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.791-0400 m31100| 2015-07-09T14:14:33.789-0400 I INDEX [conn49] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.796-0400 m31201| 2015-07-09T14:14:33.796-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.796-0400 m31101| 2015-07-09T14:14:33.796-0400 I INDEX [repl writer worker 2] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.797-0400 m31101| 2015-07-09T14:14:33.796-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.797-0400 m31202| 2015-07-09T14:14:33.797-0400 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.799-0400 m31100| 2015-07-09T14:14:33.798-0400 I INDEX [conn49] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.800-0400 m31102| 2015-07-09T14:14:33.798-0400 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.800-0400 m31200| 2015-07-09T14:14:33.800-0400 I INDEX [conn60] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.807-0400 m31101| 2015-07-09T14:14:33.807-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.812-0400 m31200| 2015-07-09T14:14:33.811-0400 I INDEX [conn81] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.812-0400 m31200| 2015-07-09T14:14:33.811-0400 I INDEX [conn81] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.814-0400 m31100| 2015-07-09T14:14:33.813-0400 I INDEX [conn47] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.815-0400 m31201| 2015-07-09T14:14:33.813-0400 I INDEX [repl writer worker 4] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.815-0400 m31100| 2015-07-09T14:14:33.814-0400 I INDEX [conn47] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.816-0400 m31201| 2015-07-09T14:14:33.814-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.816-0400 m31102| 2015-07-09T14:14:33.815-0400 I INDEX [repl writer worker 7] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.816-0400 m31102| 2015-07-09T14:14:33.815-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.816-0400 m31202| 2015-07-09T14:14:33.814-0400 I INDEX [repl writer worker 5] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.816-0400 m31202| 2015-07-09T14:14:33.814-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.820-0400 m31101| 2015-07-09T14:14:33.820-0400 I INDEX [repl writer worker 11] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.821-0400 m31101| 2015-07-09T14:14:33.820-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.827-0400 m31202| 2015-07-09T14:14:33.826-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.827-0400 m31100| 2015-07-09T14:14:33.826-0400 I INDEX [conn47] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.827-0400 m31200| 2015-07-09T14:14:33.826-0400 I INDEX [conn81] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.830-0400 m31102| 2015-07-09T14:14:33.830-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.830-0400 m31201| 2015-07-09T14:14:33.829-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.834-0400 m31101| 2015-07-09T14:14:33.833-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.837-0400 m31100| 2015-07-09T14:14:33.837-0400 I INDEX [conn171] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.837-0400 m31100| 2015-07-09T14:14:33.837-0400 I INDEX [conn171] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.840-0400 m31102| 2015-07-09T14:14:33.839-0400 I INDEX [repl writer worker 12] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.840-0400 m31102| 2015-07-09T14:14:33.839-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.845-0400 m31202| 2015-07-09T14:14:33.844-0400 I INDEX [repl writer worker 1] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.845-0400 m31202| 2015-07-09T14:14:33.844-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.846-0400 m31200| 2015-07-09T14:14:33.844-0400 I INDEX [conn38] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.846-0400 m31201| 2015-07-09T14:14:33.844-0400 I INDEX [repl writer worker 14] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.847-0400 m31200| 2015-07-09T14:14:33.844-0400 I INDEX [conn38] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.847-0400 m31201| 2015-07-09T14:14:33.844-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.848-0400 m31101| 2015-07-09T14:14:33.846-0400 I INDEX [repl writer worker 1] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.848-0400 m31101| 2015-07-09T14:14:33.846-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.848-0400 m31102| 2015-07-09T14:14:33.847-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.851-0400 m31200| 2015-07-09T14:14:33.850-0400 I INDEX [conn38] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.853-0400 m31200| 2015-07-09T14:14:33.852-0400 I COMMAND [conn38] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo5: 1.0 }, name: "foo5_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 83869 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 106ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.855-0400 m31100| 2015-07-09T14:14:33.853-0400 I INDEX [conn171] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.855-0400 m31202| 2015-07-09T14:14:33.853-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.856-0400 m31100| 2015-07-09T14:14:33.854-0400 I COMMAND [conn171] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo5: 1.0 }, name: "foo5_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 81339 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 108ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.859-0400 m31101| 2015-07-09T14:14:33.859-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.861-0400 m31201| 2015-07-09T14:14:33.859-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.862-0400 m31100| 2015-07-09T14:14:33.860-0400 I INDEX [conn45] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.862-0400 m31100| 2015-07-09T14:14:33.861-0400 I INDEX [conn45] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.870-0400 m31200| 2015-07-09T14:14:33.869-0400 I INDEX [conn80] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.870-0400 m31200| 2015-07-09T14:14:33.870-0400 I INDEX [conn80] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.872-0400 m31202| 2015-07-09T14:14:33.871-0400 I INDEX [repl writer worker 0] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.872-0400 m31202| 2015-07-09T14:14:33.871-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.874-0400 m31100| 2015-07-09T14:14:33.873-0400 I INDEX [conn45] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.874-0400 m31102| 2015-07-09T14:14:33.873-0400 I INDEX [repl writer worker 13] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.875-0400 m31102| 2015-07-09T14:14:33.873-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.875-0400 m31101| 2015-07-09T14:14:33.873-0400 I INDEX [repl writer worker 10] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.876-0400 m31101| 2015-07-09T14:14:33.873-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.876-0400 m31201| 2015-07-09T14:14:33.874-0400 I INDEX [repl writer worker 12] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.877-0400 m31201| 2015-07-09T14:14:33.874-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.878-0400 m31100| 2015-07-09T14:14:33.874-0400 I COMMAND [conn45] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo2: 1.0 }, name: "foo2_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 105449 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 125ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.879-0400 m31200| 2015-07-09T14:14:33.879-0400 I INDEX [conn80] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.882-0400 m31200| 2015-07-09T14:14:33.880-0400 I COMMAND [conn80] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo3: 1.0 }, name: "foo3_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 102545 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 130ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.884-0400 m31102| 2015-07-09T14:14:33.883-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.885-0400 m31100| 2015-07-09T14:14:33.883-0400 I INDEX [conn58] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.886-0400 m31100| 2015-07-09T14:14:33.883-0400 I INDEX [conn58] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.887-0400 m31202| 2015-07-09T14:14:33.884-0400 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.888-0400 m31201| 2015-07-09T14:14:33.883-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.888-0400 m31101| 2015-07-09T14:14:33.884-0400 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.890-0400 m31200| 2015-07-09T14:14:33.887-0400 I INDEX [conn137] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.890-0400 m31200| 2015-07-09T14:14:33.887-0400 I INDEX [conn137] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.898-0400 m31201| 2015-07-09T14:14:33.896-0400 I INDEX [repl writer worker 11] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.899-0400 m31201| 2015-07-09T14:14:33.896-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.899-0400 m31100| 2015-07-09T14:14:33.897-0400 I INDEX [conn58] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.900-0400 m31100| 2015-07-09T14:14:33.898-0400 I COMMAND [conn58] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo3: 1.0 }, name: "foo3_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 125690 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 149ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.902-0400 m31102| 2015-07-09T14:14:33.901-0400 I INDEX [repl writer worker 6] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.902-0400 m31102| 2015-07-09T14:14:33.901-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.902-0400 m31202| 2015-07-09T14:14:33.902-0400 I INDEX [repl writer worker 13] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.903-0400 m31202| 2015-07-09T14:14:33.902-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.907-0400 m31200| 2015-07-09T14:14:33.906-0400 I INDEX [conn137] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.907-0400 m31200| 2015-07-09T14:14:33.906-0400 I COMMAND [conn137] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo2: 1.0 }, name: "foo2_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 130783 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 157ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.908-0400 m31101| 2015-07-09T14:14:33.908-0400 I INDEX [repl writer worker 6] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.909-0400 m31101| 2015-07-09T14:14:33.908-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.919-0400 m31202| 2015-07-09T14:14:33.919-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.919-0400 m31201| 2015-07-09T14:14:33.919-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.920-0400 m31102| 2015-07-09T14:14:33.919-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.920-0400 m31100| 2015-07-09T14:14:33.919-0400 I INDEX [conn73] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.920-0400 m31100| 2015-07-09T14:14:33.919-0400 I INDEX [conn73] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.923-0400 m31200| 2015-07-09T14:14:33.923-0400 I INDEX [conn52] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:33.923-0400 m31200| 2015-07-09T14:14:33.923-0400 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.208-0400 m31101| 2015-07-09T14:14:33.925-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.209-0400 m31102| 2015-07-09T14:14:33.931-0400 I INDEX [repl writer worker 3] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.209-0400 m31102| 2015-07-09T14:14:33.931-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.210-0400 m31100| 2015-07-09T14:14:33.931-0400 I INDEX [conn73] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.211-0400 m31100| 2015-07-09T14:14:33.933-0400 I COMMAND [conn73] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo1: 1.0 }, name: "foo1_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 148005 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 182ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.212-0400 m31202| 2015-07-09T14:14:33.937-0400 I INDEX [repl writer worker 4] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.213-0400 m31202| 2015-07-09T14:14:33.937-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.213-0400 m31201| 2015-07-09T14:14:33.937-0400 I INDEX [repl writer worker 13] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.213-0400 m31201| 2015-07-09T14:14:33.937-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.214-0400 m31200| 2015-07-09T14:14:33.939-0400 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.214-0400 m31200| 2015-07-09T14:14:33.940-0400 I COMMAND [conn52] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo1: 1.0 }, name: "foo1_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 156029 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 190ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.214-0400 m31101| 2015-07-09T14:14:33.944-0400 I INDEX [repl writer worker 14] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.215-0400 m31101| 2015-07-09T14:14:33.944-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.215-0400 m31202| 2015-07-09T14:14:33.950-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.216-0400 m31201| 2015-07-09T14:14:33.950-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.216-0400 m31100| 2015-07-09T14:14:33.950-0400 I INDEX [conn57] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.216-0400 m31100| 2015-07-09T14:14:33.950-0400 I INDEX [conn57] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.217-0400 m31102| 2015-07-09T14:14:33.953-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.217-0400 m31200| 2015-07-09T14:14:33.956-0400 I INDEX [conn19] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.218-0400 m31200| 2015-07-09T14:14:33.956-0400 I INDEX [conn19] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.218-0400 m31101| 2015-07-09T14:14:33.959-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.219-0400 m31100| 2015-07-09T14:14:33.964-0400 I INDEX [conn57] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.220-0400 m31100| 2015-07-09T14:14:33.965-0400 I COMMAND [conn57] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo8: 1.0 }, name: "foo8_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 179625 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 211ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.221-0400 m31201| 2015-07-09T14:14:33.971-0400 I INDEX [repl writer worker 0] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.222-0400 m31201| 2015-07-09T14:14:33.972-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.222-0400 m31102| 2015-07-09T14:14:33.973-0400 I INDEX [repl writer worker 14] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.223-0400 m31102| 2015-07-09T14:14:33.973-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.223-0400 m31202| 2015-07-09T14:14:33.973-0400 I INDEX [repl writer worker 2] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.223-0400 m31202| 2015-07-09T14:14:33.973-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.224-0400 m31200| 2015-07-09T14:14:33.976-0400 I INDEX [conn19] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.224-0400 m31200| 2015-07-09T14:14:33.977-0400 I COMMAND [conn19] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo8: 1.0 }, name: "foo8_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 187015 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 223ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.225-0400 m31201| 2015-07-09T14:14:33.981-0400 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.225-0400 m31101| 2015-07-09T14:14:33.981-0400 I INDEX [repl writer worker 12] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.225-0400 m31101| 2015-07-09T14:14:33.981-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.226-0400 m31100| 2015-07-09T14:14:33.981-0400 I INDEX [conn172] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.226-0400 m31100| 2015-07-09T14:14:33.981-0400 I INDEX [conn172] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.228-0400 m31202| 2015-07-09T14:14:33.987-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.228-0400 m31102| 2015-07-09T14:14:33.992-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.228-0400 m31101| 2015-07-09T14:14:34.008-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.229-0400 m31100| 2015-07-09T14:14:34.008-0400 I INDEX [conn172] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.230-0400 m31100| 2015-07-09T14:14:34.009-0400 I COMMAND [conn172] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo4: 1.0 }, name: "foo4_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 201619 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 246ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.231-0400 m31100| 2015-07-09T14:14:34.010-0400 I COMMAND [conn32] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.232-0400 m31100| 2015-07-09T14:14:34.012-0400 I COMMAND [conn32] command db49.coll49 command: dropIndexes { deleteIndexes: "coll49", index: { foo9: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:114 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 232532 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 234ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.232-0400 m31100| 2015-07-09T14:14:34.012-0400 I COMMAND [conn15] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.232-0400 m31200| 2015-07-09T14:14:34.012-0400 I INDEX [conn30] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.232-0400 m31200| 2015-07-09T14:14:34.013-0400 I INDEX [conn30] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.233-0400 m31201| 2015-07-09T14:14:34.019-0400 I INDEX [repl writer worker 2] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.233-0400 m31201| 2015-07-09T14:14:34.019-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.233-0400 m31202| 2015-07-09T14:14:34.019-0400 I INDEX [repl writer worker 3] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.233-0400 m31202| 2015-07-09T14:14:34.019-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.234-0400 m31100| 2015-07-09T14:14:34.019-0400 I COMMAND [conn15] command db49.coll49 command: dropIndexes { deleteIndexes: "coll49", index: { foo6: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:114 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 211374 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 217ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.236-0400 m31100| 2015-07-09T14:14:34.019-0400 I COMMAND [conn171] command db49.$cmd command: listIndexes { listIndexes: "coll49", cursor: { batchSize: 2.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 exception: [db49.coll49] shard version not ok: version epoch mismatch detected for db49.coll49, the collection may have been dropped and recreated ( ns : db49.coll49, received : 0|0||000000000000000000000000, wanted : 2|3||559eba05ca4787b9985d1e00, send ) code:13388 numYields:0 reslen:391 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 163884 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 164ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.236-0400 m30998| 2015-07-09T14:14:34.020-0400 I SHARDING [conn303] sharded connection to test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.237-0400 m30998| 2015-07-09T14:14:34.021-0400 I SHARDING [conn303] retrying command: { listIndexes: "coll49", cursor: { batchSize: 2.0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.239-0400 m31100| 2015-07-09T14:14:34.019-0400 I COMMAND [conn49] command db49.coll49 command: listIndexes { listIndexes: "coll49", cursor: { batchSize: 2.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:312 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 216899 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 217ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.239-0400 m31102| 2015-07-09T14:14:34.022-0400 I INDEX [repl writer worker 0] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.239-0400 m31102| 2015-07-09T14:14:34.022-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.239-0400 m31101| 2015-07-09T14:14:34.024-0400 I INDEX [repl writer worker 0] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.240-0400 m31101| 2015-07-09T14:14:34.024-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.240-0400 m31100| 2015-07-09T14:14:34.020-0400 I COMMAND [conn58] command db49.coll49 command: listIndexes { listIndexes: "coll49", cursor: { batchSize: 2.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:312 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 119153 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 119ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.241-0400 m31100| 2015-07-09T14:14:34.021-0400 I NETWORK [conn171] end connection 127.0.0.1:63688 (99 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.241-0400 m31100| 2015-07-09T14:14:34.022-0400 I COMMAND [conn34] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.242-0400 m31100| 2015-07-09T14:14:34.026-0400 I COMMAND [conn34] command db49.coll49 command: dropIndexes { deleteIndexes: "coll49", index: { foo0: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:114 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 190988 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 195ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.242-0400 m31100| 2015-07-09T14:14:34.026-0400 I COMMAND [conn37] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.243-0400 m31200| 2015-07-09T14:14:34.027-0400 I INDEX [conn30] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.244-0400 m31200| 2015-07-09T14:14:34.028-0400 I COMMAND [conn30] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo4: 1.0 }, name: "foo4_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 214099 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 264ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.244-0400 m31200| 2015-07-09T14:14:34.028-0400 I COMMAND [conn47] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.245-0400 m31200| 2015-07-09T14:14:34.033-0400 I COMMAND [conn47] command db49.coll49 command: dropIndexes { deleteIndexes: "coll49", index: { foo9: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:114 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 250673 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 256ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.245-0400 m31200| 2015-07-09T14:14:34.034-0400 I COMMAND [conn64] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.245-0400 m30999| 2015-07-09T14:14:34.034-0400 I NETWORK [conn305] end connection 127.0.0.1:63678 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.246-0400 m31100| 2015-07-09T14:14:34.034-0400 I COMMAND [conn37] command db49.coll49 command: dropIndexes { deleteIndexes: "coll49", index: { foo2: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:114 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 118341 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 125ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.246-0400 m31100| 2015-07-09T14:14:34.035-0400 I COMMAND [conn39] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.246-0400 m31102| 2015-07-09T14:14:34.036-0400 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.247-0400 m31201| 2015-07-09T14:14:34.040-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.248-0400 m31100| 2015-07-09T14:14:34.041-0400 I COMMAND [conn15] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.248-0400 m31101| 2015-07-09T14:14:34.045-0400 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.248-0400 m31200| 2015-07-09T14:14:34.045-0400 I COMMAND [conn64] command db49.coll49 command: dropIndexes { deleteIndexes: "coll49", index: { foo6: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:114 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 232497 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 243ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.248-0400 m31200| 2015-07-09T14:14:34.045-0400 I COMMAND [conn63] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.249-0400 m31200| 2015-07-09T14:14:34.053-0400 I COMMAND [conn63] command db49.coll49 command: dropIndexes { deleteIndexes: "coll49", index: { foo0: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:114 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 214157 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 221ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.250-0400 m31100| 2015-07-09T14:14:34.053-0400 I COMMAND [conn32] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.250-0400 m31202| 2015-07-09T14:14:34.053-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.250-0400 m31102| 2015-07-09T14:14:34.054-0400 I INDEX [repl writer worker 11] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.251-0400 m31102| 2015-07-09T14:14:34.054-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.251-0400 m31200| 2015-07-09T14:14:34.054-0400 I COMMAND [conn48] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.252-0400 m31200| 2015-07-09T14:14:34.055-0400 I COMMAND [conn48] command db49.coll49 command: dropIndexes { deleteIndexes: "coll49", index: { foo2: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:114 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 145631 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 147ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.252-0400 m31200| 2015-07-09T14:14:34.056-0400 I COMMAND [conn34] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.252-0400 m31100| 2015-07-09T14:14:34.058-0400 I COMMAND [conn39] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.252-0400 m31201| 2015-07-09T14:14:34.058-0400 I INDEX [repl writer worker 8] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.252-0400 m31201| 2015-07-09T14:14:34.058-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.253-0400 m31200| 2015-07-09T14:14:34.060-0400 I COMMAND [conn84] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.254-0400 m31202| 2015-07-09T14:14:34.066-0400 I INDEX [repl writer worker 9] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.254-0400 m31202| 2015-07-09T14:14:34.066-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.255-0400 m31200| 2015-07-09T14:14:34.066-0400 I COMMAND [conn62] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.255-0400 m31101| 2015-07-09T14:14:34.066-0400 I INDEX [repl writer worker 4] build index on: db49.coll49 properties: { v: 1, key: { foo4: 1.0 }, name: "foo4_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.255-0400 m31101| 2015-07-09T14:14:34.066-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.257-0400 m31100| 2015-07-09T14:14:34.066-0400 I COMMAND [conn35] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.258-0400 m31200| 2015-07-09T14:14:34.067-0400 I COMMAND [conn47] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.258-0400 m31201| 2015-07-09T14:14:34.069-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.259-0400 m31201| 2015-07-09T14:14:34.071-0400 I COMMAND [repl writer worker 10] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.261-0400 m31200| 2015-07-09T14:14:34.071-0400 I COMMAND [conn65] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.261-0400 m31102| 2015-07-09T14:14:34.072-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.262-0400 m31102| 2015-07-09T14:14:34.073-0400 I COMMAND [repl writer worker 1] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.262-0400 m31202| 2015-07-09T14:14:34.074-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.262-0400 m31102| 2015-07-09T14:14:34.075-0400 I COMMAND [repl writer worker 10] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.262-0400 m31202| 2015-07-09T14:14:34.075-0400 I COMMAND [repl writer worker 7] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.262-0400 m31201| 2015-07-09T14:14:34.076-0400 I COMMAND [repl writer worker 9] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.262-0400 m31101| 2015-07-09T14:14:34.077-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.263-0400 m31201| 2015-07-09T14:14:34.077-0400 I COMMAND [repl writer worker 5] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.263-0400 m31202| 2015-07-09T14:14:34.078-0400 I COMMAND [repl writer worker 15] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.263-0400 m31102| 2015-07-09T14:14:34.078-0400 I COMMAND [repl writer worker 2] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.263-0400 m31101| 2015-07-09T14:14:34.078-0400 I COMMAND [repl writer worker 3] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.264-0400 m31202| 2015-07-09T14:14:34.080-0400 I COMMAND [repl writer worker 12] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.264-0400 m31101| 2015-07-09T14:14:34.080-0400 I COMMAND [repl writer worker 7] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.264-0400 m31102| 2015-07-09T14:14:34.080-0400 I COMMAND [repl writer worker 4] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.264-0400 m31201| 2015-07-09T14:14:34.081-0400 I COMMAND [repl writer worker 3] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.265-0400 m31102| 2015-07-09T14:14:34.083-0400 I COMMAND [repl writer worker 5] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.265-0400 m31202| 2015-07-09T14:14:34.083-0400 I COMMAND [repl writer worker 14] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.265-0400 m31101| 2015-07-09T14:14:34.083-0400 I COMMAND [repl writer worker 13] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.265-0400 m31201| 2015-07-09T14:14:34.084-0400 I COMMAND [repl writer worker 15] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.265-0400 m31202| 2015-07-09T14:14:34.084-0400 I COMMAND [repl writer worker 8] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.265-0400 m31102| 2015-07-09T14:14:34.085-0400 I COMMAND [repl writer worker 9] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.266-0400 m31202| 2015-07-09T14:14:34.085-0400 I COMMAND [repl writer worker 11] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.266-0400 m31101| 2015-07-09T14:14:34.086-0400 I COMMAND [repl writer worker 8] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.266-0400 m31201| 2015-07-09T14:14:34.086-0400 I COMMAND [repl writer worker 6] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.266-0400 m31102| 2015-07-09T14:14:34.086-0400 I COMMAND [repl writer worker 8] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.266-0400 m31101| 2015-07-09T14:14:34.087-0400 I COMMAND [repl writer worker 15] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.266-0400 m31102| 2015-07-09T14:14:34.088-0400 I COMMAND [repl writer worker 15] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.267-0400 m31201| 2015-07-09T14:14:34.088-0400 I COMMAND [repl writer worker 1] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.267-0400 m31102| 2015-07-09T14:14:34.088-0400 I COMMAND [repl writer worker 7] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.268-0400 m31202| 2015-07-09T14:14:34.088-0400 I COMMAND [repl writer worker 6] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.268-0400 m31101| 2015-07-09T14:14:34.089-0400 I COMMAND [repl writer worker 9] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.268-0400 m31202| 2015-07-09T14:14:34.090-0400 I COMMAND [repl writer worker 10] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.268-0400 m31201| 2015-07-09T14:14:34.090-0400 I COMMAND [repl writer worker 7] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.269-0400 m31101| 2015-07-09T14:14:34.091-0400 I COMMAND [repl writer worker 5] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.269-0400 m31202| 2015-07-09T14:14:34.091-0400 I COMMAND [repl writer worker 5] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.269-0400 m31101| 2015-07-09T14:14:34.092-0400 I COMMAND [repl writer worker 2] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.270-0400 m31201| 2015-07-09T14:14:34.092-0400 I COMMAND [repl writer worker 4] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.270-0400 m31101| 2015-07-09T14:14:34.093-0400 I COMMAND [repl writer worker 11] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.270-0400 m31100| 2015-07-09T14:14:34.142-0400 I INDEX [conn58] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.271-0400 m31100| 2015-07-09T14:14:34.142-0400 I INDEX [conn58] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.271-0400 m31200| 2015-07-09T14:14:34.150-0400 I INDEX [conn52] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.272-0400 m31200| 2015-07-09T14:14:34.150-0400 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.272-0400 m31100| 2015-07-09T14:14:34.152-0400 I INDEX [conn58] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.272-0400 m31200| 2015-07-09T14:14:34.161-0400 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.273-0400 m31100| 2015-07-09T14:14:34.161-0400 I INDEX [conn172] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.274-0400 m31100| 2015-07-09T14:14:34.161-0400 I INDEX [conn172] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.274-0400 m31101| 2015-07-09T14:14:34.164-0400 I INDEX [repl writer worker 1] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.274-0400 m31101| 2015-07-09T14:14:34.164-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.275-0400 m31102| 2015-07-09T14:14:34.172-0400 I INDEX [repl writer worker 12] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.275-0400 m31102| 2015-07-09T14:14:34.172-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.275-0400 m31101| 2015-07-09T14:14:34.186-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.276-0400 m31200| 2015-07-09T14:14:34.186-0400 I INDEX [conn30] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.276-0400 m31200| 2015-07-09T14:14:34.186-0400 I INDEX [conn30] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.277-0400 m31100| 2015-07-09T14:14:34.188-0400 I INDEX [conn172] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.277-0400 m31201| 2015-07-09T14:14:34.189-0400 I INDEX [repl writer worker 14] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.277-0400 m31201| 2015-07-09T14:14:34.189-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.277-0400 m31202| 2015-07-09T14:14:34.189-0400 I INDEX [repl writer worker 1] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.278-0400 m31202| 2015-07-09T14:14:34.189-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.278-0400 m31102| 2015-07-09T14:14:34.199-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.278-0400 m31201| 2015-07-09T14:14:34.200-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.279-0400 m31202| 2015-07-09T14:14:34.200-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.279-0400 m31200| 2015-07-09T14:14:34.202-0400 I INDEX [conn30] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.279-0400 m31100| 2015-07-09T14:14:34.206-0400 I INDEX [conn57] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.280-0400 m31100| 2015-07-09T14:14:34.206-0400 I INDEX [conn57] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.280-0400 m31102| 2015-07-09T14:14:34.211-0400 I INDEX [repl writer worker 13] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.280-0400 m31102| 2015-07-09T14:14:34.211-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.281-0400 m31101| 2015-07-09T14:14:34.211-0400 I INDEX [repl writer worker 10] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.281-0400 m31101| 2015-07-09T14:14:34.211-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.281-0400 m31202| 2015-07-09T14:14:34.217-0400 I INDEX [repl writer worker 0] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.283-0400 m31202| 2015-07-09T14:14:34.217-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.284-0400 m31201| 2015-07-09T14:14:34.217-0400 I INDEX [repl writer worker 12] build index on: db49.coll49 properties: { v: 1, key: { foo6: 1.0 }, name: "foo6_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.284-0400 m31201| 2015-07-09T14:14:34.217-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.284-0400 m31100| 2015-07-09T14:14:34.218-0400 I INDEX [conn57] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.284-0400 m31200| 2015-07-09T14:14:34.217-0400 I INDEX [conn19] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.284-0400 m31200| 2015-07-09T14:14:34.217-0400 I INDEX [conn19] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.285-0400 m31101| 2015-07-09T14:14:34.226-0400 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.285-0400 m31200| 2015-07-09T14:14:34.226-0400 I INDEX [conn19] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.286-0400 m31102| 2015-07-09T14:14:34.228-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.286-0400 m31201| 2015-07-09T14:14:34.236-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.287-0400 m31202| 2015-07-09T14:14:34.236-0400 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.287-0400 m31200| 2015-07-09T14:14:34.240-0400 I INDEX [conn137] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.287-0400 m31200| 2015-07-09T14:14:34.240-0400 I INDEX [conn137] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.288-0400 m31101| 2015-07-09T14:14:34.240-0400 I INDEX [repl writer worker 6] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.289-0400 m31100| 2015-07-09T14:14:34.240-0400 I INDEX [conn45] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.289-0400 m31101| 2015-07-09T14:14:34.241-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.290-0400 m31100| 2015-07-09T14:14:34.241-0400 I INDEX [conn45] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.290-0400 m31201| 2015-07-09T14:14:34.249-0400 I INDEX [repl writer worker 11] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.291-0400 m31201| 2015-07-09T14:14:34.249-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.291-0400 m31102| 2015-07-09T14:14:34.249-0400 I INDEX [repl writer worker 6] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.291-0400 m31102| 2015-07-09T14:14:34.249-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.291-0400 m31202| 2015-07-09T14:14:34.252-0400 I INDEX [repl writer worker 13] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.292-0400 m31202| 2015-07-09T14:14:34.252-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.292-0400 m31101| 2015-07-09T14:14:34.253-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.292-0400 m31200| 2015-07-09T14:14:34.253-0400 I INDEX [conn137] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.293-0400 m31200| 2015-07-09T14:14:34.254-0400 I COMMAND [conn65] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.293-0400 m31102| 2015-07-09T14:14:34.256-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.293-0400 m31201| 2015-07-09T14:14:34.256-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.294-0400 m31100| 2015-07-09T14:14:34.255-0400 I INDEX [conn45] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.294-0400 m31100| 2015-07-09T14:14:34.256-0400 I COMMAND [conn35] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.294-0400 m31202| 2015-07-09T14:14:34.259-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.294-0400 m31200| 2015-07-09T14:14:34.266-0400 I INDEX [conn52] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.294-0400 m31200| 2015-07-09T14:14:34.267-0400 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.295-0400 m31100| 2015-07-09T14:14:34.267-0400 I INDEX [conn58] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.295-0400 m31100| 2015-07-09T14:14:34.267-0400 I INDEX [conn58] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.295-0400 m31102| 2015-07-09T14:14:34.272-0400 I INDEX [repl writer worker 3] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.295-0400 m31102| 2015-07-09T14:14:34.273-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.295-0400 m31201| 2015-07-09T14:14:34.273-0400 I INDEX [repl writer worker 13] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.295-0400 m31201| 2015-07-09T14:14:34.273-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.295-0400 m31202| 2015-07-09T14:14:34.275-0400 I INDEX [repl writer worker 4] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.296-0400 m31202| 2015-07-09T14:14:34.275-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.296-0400 m31100| 2015-07-09T14:14:34.281-0400 I INDEX [conn58] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.296-0400 m31101| 2015-07-09T14:14:34.281-0400 I INDEX [repl writer worker 14] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.297-0400 m31101| 2015-07-09T14:14:34.281-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.297-0400 m31102| 2015-07-09T14:14:34.282-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.297-0400 m31200| 2015-07-09T14:14:34.282-0400 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.298-0400 m31200| 2015-07-09T14:14:34.283-0400 I COMMAND [conn52] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo1: 1.0 }, name: "foo1_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 90539 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 119ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.298-0400 m31102| 2015-07-09T14:14:34.284-0400 I COMMAND [repl writer worker 14] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.298-0400 m31201| 2015-07-09T14:14:34.285-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.299-0400 m31100| 2015-07-09T14:14:34.284-0400 I COMMAND [conn58] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo1: 1.0 }, name: "foo1_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 94734 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 120ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.299-0400 m31201| 2015-07-09T14:14:34.286-0400 I COMMAND [repl writer worker 0] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.299-0400 m31101| 2015-07-09T14:14:34.292-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.299-0400 m31100| 2015-07-09T14:14:34.292-0400 I INDEX [conn47] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.299-0400 m31100| 2015-07-09T14:14:34.292-0400 I INDEX [conn47] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.299-0400 m31101| 2015-07-09T14:14:34.293-0400 I COMMAND [repl writer worker 12] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.299-0400 m31202| 2015-07-09T14:14:34.294-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.299-0400 m31202| 2015-07-09T14:14:34.296-0400 I COMMAND [repl writer worker 2] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.303-0400 m31200| 2015-07-09T14:14:34.303-0400 I INDEX [conn81] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.303-0400 m31200| 2015-07-09T14:14:34.303-0400 I INDEX [conn81] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.309-0400 m31201| 2015-07-09T14:14:34.309-0400 I INDEX [repl writer worker 2] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.310-0400 m31201| 2015-07-09T14:14:34.309-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.310-0400 m31102| 2015-07-09T14:14:34.309-0400 I INDEX [repl writer worker 0] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.311-0400 m31102| 2015-07-09T14:14:34.309-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.315-0400 m31100| 2015-07-09T14:14:34.315-0400 I INDEX [conn47] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.317-0400 m31100| 2015-07-09T14:14:34.315-0400 I COMMAND [conn47] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo8: 1.0 }, name: "foo8_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 116448 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 147ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.318-0400 m31202| 2015-07-09T14:14:34.318-0400 I INDEX [repl writer worker 3] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.318-0400 m31101| 2015-07-09T14:14:34.318-0400 I INDEX [repl writer worker 0] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.318-0400 m31202| 2015-07-09T14:14:34.318-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.319-0400 m31101| 2015-07-09T14:14:34.318-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.322-0400 m31200| 2015-07-09T14:14:34.322-0400 I INDEX [conn81] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.323-0400 m31200| 2015-07-09T14:14:34.322-0400 I COMMAND [conn81] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo8: 1.0 }, name: "foo8_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 115805 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 154ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.331-0400 m31202| 2015-07-09T14:14:34.331-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.332-0400 m31100| 2015-07-09T14:14:34.331-0400 I INDEX [conn73] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.332-0400 m31100| 2015-07-09T14:14:34.331-0400 I INDEX [conn73] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.335-0400 m31102| 2015-07-09T14:14:34.334-0400 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.336-0400 m31201| 2015-07-09T14:14:34.334-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.337-0400 m31101| 2015-07-09T14:14:34.337-0400 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.347-0400 m31100| 2015-07-09T14:14:34.346-0400 I INDEX [conn73] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.348-0400 m31202| 2015-07-09T14:14:34.346-0400 I INDEX [repl writer worker 9] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.348-0400 m31202| 2015-07-09T14:14:34.346-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.348-0400 m31102| 2015-07-09T14:14:34.347-0400 I INDEX [repl writer worker 11] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.349-0400 m31102| 2015-07-09T14:14:34.347-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.349-0400 m31200| 2015-07-09T14:14:34.346-0400 I INDEX [conn80] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.349-0400 m31200| 2015-07-09T14:14:34.346-0400 I INDEX [conn80] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.349-0400 m31201| 2015-07-09T14:14:34.347-0400 I INDEX [repl writer worker 8] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.350-0400 m31201| 2015-07-09T14:14:34.347-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.351-0400 m31100| 2015-07-09T14:14:34.348-0400 I COMMAND [conn73] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo3: 1.0 }, name: "foo3_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 145131 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 177ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.352-0400 m31202| 2015-07-09T14:14:34.351-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.352-0400 m31102| 2015-07-09T14:14:34.351-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.353-0400 m31101| 2015-07-09T14:14:34.351-0400 I INDEX [repl writer worker 4] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.353-0400 m31101| 2015-07-09T14:14:34.351-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.357-0400 m31100| 2015-07-09T14:14:34.357-0400 I INDEX [conn49] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.358-0400 m31100| 2015-07-09T14:14:34.357-0400 I INDEX [conn49] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.361-0400 m31101| 2015-07-09T14:14:34.360-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.362-0400 m31100| 2015-07-09T14:14:34.361-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63691 #173 (100 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.365-0400 m31201| 2015-07-09T14:14:34.364-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.365-0400 m31102| 2015-07-09T14:14:34.364-0400 I INDEX [repl writer worker 1] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.365-0400 m31102| 2015-07-09T14:14:34.364-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.366-0400 m31200| 2015-07-09T14:14:34.366-0400 I INDEX [conn80] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.368-0400 m31200| 2015-07-09T14:14:34.367-0400 I COMMAND [conn80] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo3: 1.0 }, name: "foo3_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 151863 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 196ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.370-0400 m31100| 2015-07-09T14:14:34.369-0400 I INDEX [conn49] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.374-0400 m31100| 2015-07-09T14:14:34.371-0400 I COMMAND [conn49] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo7: 1.0 }, name: "foo7_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 174294 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 196ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.374-0400 m31102| 2015-07-09T14:14:34.372-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.375-0400 m31101| 2015-07-09T14:14:34.374-0400 I INDEX [repl writer worker 3] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.375-0400 m31101| 2015-07-09T14:14:34.374-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.378-0400 m31202| 2015-07-09T14:14:34.377-0400 I INDEX [repl writer worker 7] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.378-0400 m31202| 2015-07-09T14:14:34.377-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.379-0400 m31201| 2015-07-09T14:14:34.378-0400 I INDEX [repl writer worker 10] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.379-0400 m31201| 2015-07-09T14:14:34.378-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.382-0400 m31200| 2015-07-09T14:14:34.381-0400 I INDEX [conn38] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.382-0400 m31200| 2015-07-09T14:14:34.382-0400 I INDEX [conn38] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.388-0400 m31100| 2015-07-09T14:14:34.388-0400 I INDEX [conn50] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.388-0400 m31100| 2015-07-09T14:14:34.388-0400 I INDEX [conn50] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.392-0400 m31102| 2015-07-09T14:14:34.392-0400 I INDEX [repl writer worker 10] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.392-0400 m31102| 2015-07-09T14:14:34.392-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.393-0400 m31101| 2015-07-09T14:14:34.393-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.403-0400 m31201| 2015-07-09T14:14:34.402-0400 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.404-0400 m31200| 2015-07-09T14:14:34.401-0400 I INDEX [conn38] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.404-0400 m31200| 2015-07-09T14:14:34.402-0400 I COMMAND [conn38] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo7: 1.0 }, name: "foo7_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 193617 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 228ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.405-0400 m31202| 2015-07-09T14:14:34.403-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.405-0400 m31100| 2015-07-09T14:14:34.405-0400 I INDEX [conn50] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.412-0400 m31100| 2015-07-09T14:14:34.406-0400 I COMMAND [conn50] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo5: 1.0 }, name: "foo5_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 194866 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 229ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.413-0400 m31100| 2015-07-09T14:14:34.406-0400 I COMMAND [conn57] command db49.coll49 command: listIndexes { listIndexes: "coll49", cursor: { batchSize: 2.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:312 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 177905 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 178ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.414-0400 m31100| 2015-07-09T14:14:34.406-0400 I COMMAND [conn172] command db49.$cmd command: listIndexes { listIndexes: "coll49", cursor: { batchSize: 2.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 exception: [db49.coll49] shard version not ok: version epoch mismatch detected for db49.coll49, the collection may have been dropped and recreated ( ns : db49.coll49, received : 0|0||000000000000000000000000, wanted : 2|3||559eba05ca4787b9985d1e00, send ) code:13388 numYields:0 reslen:391 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 202003 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 202ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.414-0400 m31100| 2015-07-09T14:14:34.407-0400 I NETWORK [conn172] end connection 127.0.0.1:63689 (99 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.415-0400 m30999| 2015-07-09T14:14:34.407-0400 I SHARDING [conn303] sharded connection to test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.415-0400 m30999| 2015-07-09T14:14:34.407-0400 I SHARDING [conn303] retrying command: { listIndexes: "coll49", cursor: { batchSize: 2.0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.417-0400 m31100| 2015-07-09T14:14:34.410-0400 I COMMAND [conn45] command db49.coll49 command: listIndexes { listIndexes: "coll49", cursor: { batchSize: 2.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:312 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 148590 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 152ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.417-0400 m31102| 2015-07-09T14:14:34.411-0400 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.417-0400 m31200| 2015-07-09T14:14:34.412-0400 I INDEX [conn60] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.417-0400 m31101| 2015-07-09T14:14:34.412-0400 I INDEX [repl writer worker 7] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.417-0400 m31200| 2015-07-09T14:14:34.412-0400 I INDEX [conn60] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.417-0400 m31101| 2015-07-09T14:14:34.412-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.418-0400 m31100| 2015-07-09T14:14:34.412-0400 I COMMAND [conn58] command db49.coll49 command: listIndexes { listIndexes: "coll49", cursor: { batchSize: 2.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:312 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 120052 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 125ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.419-0400 m31202| 2015-07-09T14:14:34.415-0400 I INDEX [repl writer worker 15] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.419-0400 m31202| 2015-07-09T14:14:34.415-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.421-0400 m31201| 2015-07-09T14:14:34.421-0400 I INDEX [repl writer worker 9] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.422-0400 m31201| 2015-07-09T14:14:34.421-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.426-0400 m31202| 2015-07-09T14:14:34.426-0400 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.427-0400 m31200| 2015-07-09T14:14:34.426-0400 I INDEX [conn60] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.427-0400 m31100| 2015-07-09T14:14:34.426-0400 I INDEX [conn173] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.428-0400 m31100| 2015-07-09T14:14:34.426-0400 I INDEX [conn173] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.429-0400 m31200| 2015-07-09T14:14:34.426-0400 I COMMAND [conn60] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo5: 1.0 }, name: "foo5_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 225725 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 249ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.432-0400 m31101| 2015-07-09T14:14:34.432-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.441-0400 m31201| 2015-07-09T14:14:34.439-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.442-0400 m31102| 2015-07-09T14:14:34.440-0400 I INDEX [repl writer worker 2] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.443-0400 m31100| 2015-07-09T14:14:34.440-0400 I INDEX [conn173] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.443-0400 m31102| 2015-07-09T14:14:34.440-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.444-0400 m31100| 2015-07-09T14:14:34.443-0400 I COMMAND [conn15] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.445-0400 m31100| 2015-07-09T14:14:34.444-0400 I COMMAND [conn35] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.448-0400 m31200| 2015-07-09T14:14:34.444-0400 I INDEX [conn52] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.448-0400 m31200| 2015-07-09T14:14:34.444-0400 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.448-0400 m30999| 2015-07-09T14:14:34.446-0400 I NETWORK [conn303] end connection 127.0.0.1:63675 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.448-0400 m31100| 2015-07-09T14:14:34.448-0400 I COMMAND [conn37] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.449-0400 m31202| 2015-07-09T14:14:34.448-0400 I INDEX [repl writer worker 12] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.449-0400 m31202| 2015-07-09T14:14:34.448-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.449-0400 m31102| 2015-07-09T14:14:34.448-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.452-0400 m31100| 2015-07-09T14:14:34.451-0400 I COMMAND [conn34] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.455-0400 m31100| 2015-07-09T14:14:34.454-0400 I COMMAND [conn39] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.455-0400 m31201| 2015-07-09T14:14:34.454-0400 I INDEX [repl writer worker 5] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.456-0400 m31201| 2015-07-09T14:14:34.454-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.456-0400 m31101| 2015-07-09T14:14:34.454-0400 I INDEX [repl writer worker 13] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.456-0400 m31101| 2015-07-09T14:14:34.454-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.457-0400 m31100| 2015-07-09T14:14:34.455-0400 I COMMAND [conn32] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.457-0400 m31100| 2015-07-09T14:14:34.456-0400 I COMMAND [conn36] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.459-0400 m31200| 2015-07-09T14:14:34.458-0400 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.460-0400 m31200| 2015-07-09T14:14:34.459-0400 I COMMAND [conn84] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.461-0400 m30998| 2015-07-09T14:14:34.460-0400 I SHARDING [conn302] sharded connection to test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.462-0400 m31200| 2015-07-09T14:14:34.460-0400 I COMMAND [conn65] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.464-0400 m31102| 2015-07-09T14:14:34.460-0400 I INDEX [repl writer worker 4] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.464-0400 m30998| 2015-07-09T14:14:34.460-0400 I SHARDING [conn302] retrying command: { listIndexes: "coll49", cursor: { batchSize: 2.0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.465-0400 m31102| 2015-07-09T14:14:34.460-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.466-0400 m31100| 2015-07-09T14:14:34.462-0400 I NETWORK [conn173] end connection 127.0.0.1:63691 (98 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.466-0400 m31200| 2015-07-09T14:14:34.463-0400 I COMMAND [conn48] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.467-0400 m31100| 2015-07-09T14:14:34.463-0400 I COMMAND [conn36] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.467-0400 m31202| 2015-07-09T14:14:34.464-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.467-0400 m31101| 2015-07-09T14:14:34.465-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.469-0400 m31201| 2015-07-09T14:14:34.468-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.469-0400 m31102| 2015-07-09T14:14:34.469-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.470-0400 m31200| 2015-07-09T14:14:34.470-0400 I COMMAND [conn63] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.473-0400 m31202| 2015-07-09T14:14:34.472-0400 I INDEX [repl writer worker 14] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.473-0400 m31202| 2015-07-09T14:14:34.472-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.473-0400 m31102| 2015-07-09T14:14:34.472-0400 I COMMAND [repl writer worker 5] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.475-0400 m31200| 2015-07-09T14:14:34.475-0400 I COMMAND [conn47] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.476-0400 m31101| 2015-07-09T14:14:34.475-0400 I INDEX [repl writer worker 8] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.476-0400 m31101| 2015-07-09T14:14:34.475-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.476-0400 m31200| 2015-07-09T14:14:34.476-0400 I COMMAND [conn62] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.477-0400 m31102| 2015-07-09T14:14:34.476-0400 I COMMAND [repl writer worker 9] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.478-0400 m31201| 2015-07-09T14:14:34.477-0400 I INDEX [repl writer worker 3] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.478-0400 m31200| 2015-07-09T14:14:34.477-0400 I COMMAND [conn34] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.478-0400 m31201| 2015-07-09T14:14:34.477-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.479-0400 m31102| 2015-07-09T14:14:34.477-0400 I COMMAND [repl writer worker 8] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.481-0400 m31200| 2015-07-09T14:14:34.480-0400 I COMMAND [conn85] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.481-0400 m31202| 2015-07-09T14:14:34.480-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.481-0400 m31102| 2015-07-09T14:14:34.480-0400 I COMMAND [repl writer worker 15] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.481-0400 m31202| 2015-07-09T14:14:34.481-0400 I COMMAND [repl writer worker 8] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.483-0400 m31202| 2015-07-09T14:14:34.483-0400 I COMMAND [repl writer worker 11] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.484-0400 m31202| 2015-07-09T14:14:34.484-0400 I COMMAND [repl writer worker 6] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.484-0400 m31101| 2015-07-09T14:14:34.484-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.485-0400 m31201| 2015-07-09T14:14:34.485-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.485-0400 m31102| 2015-07-09T14:14:34.485-0400 I COMMAND [repl writer worker 7] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.486-0400 m31101| 2015-07-09T14:14:34.485-0400 I COMMAND [repl writer worker 15] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.487-0400 m31101| 2015-07-09T14:14:34.487-0400 I COMMAND [repl writer worker 9] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.488-0400 m31202| 2015-07-09T14:14:34.487-0400 I COMMAND [repl writer worker 10] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.488-0400 m31201| 2015-07-09T14:14:34.485-0400 I COMMAND [repl writer worker 15] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.488-0400 m31102| 2015-07-09T14:14:34.488-0400 I COMMAND [repl writer worker 12] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.488-0400 m31202| 2015-07-09T14:14:34.488-0400 I COMMAND [repl writer worker 5] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.488-0400 m31101| 2015-07-09T14:14:34.488-0400 I COMMAND [repl writer worker 5] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.489-0400 m31102| 2015-07-09T14:14:34.489-0400 I COMMAND [repl writer worker 13] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.489-0400 m31202| 2015-07-09T14:14:34.489-0400 I COMMAND [repl writer worker 1] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.490-0400 m31101| 2015-07-09T14:14:34.489-0400 I COMMAND [repl writer worker 2] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.490-0400 m31101| 2015-07-09T14:14:34.490-0400 I COMMAND [repl writer worker 11] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.491-0400 m31201| 2015-07-09T14:14:34.490-0400 I COMMAND [repl writer worker 6] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.491-0400 m31202| 2015-07-09T14:14:34.490-0400 I COMMAND [repl writer worker 0] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.491-0400 m31201| 2015-07-09T14:14:34.491-0400 I COMMAND [repl writer worker 1] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.492-0400 m31202| 2015-07-09T14:14:34.491-0400 I COMMAND [repl writer worker 13] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.492-0400 m31101| 2015-07-09T14:14:34.491-0400 I COMMAND [repl writer worker 1] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.492-0400 m31101| 2015-07-09T14:14:34.492-0400 I COMMAND [repl writer worker 10] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.492-0400 m31201| 2015-07-09T14:14:34.492-0400 I COMMAND [repl writer worker 7] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.493-0400 m31101| 2015-07-09T14:14:34.493-0400 I COMMAND [repl writer worker 6] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.494-0400 m31201| 2015-07-09T14:14:34.494-0400 I COMMAND [repl writer worker 4] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.495-0400 m31201| 2015-07-09T14:14:34.494-0400 I COMMAND [repl writer worker 14] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.496-0400 m31201| 2015-07-09T14:14:34.495-0400 I COMMAND [repl writer worker 12] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.496-0400 m31201| 2015-07-09T14:14:34.496-0400 I COMMAND [repl writer worker 11] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.569-0400 m31100| 2015-07-09T14:14:34.568-0400 I INDEX [conn57] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.570-0400 m31100| 2015-07-09T14:14:34.568-0400 I INDEX [conn57] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.570-0400 m30999| 2015-07-09T14:14:34.569-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:14:34.566-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30999:1436464534:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.571-0400 m31200| 2015-07-09T14:14:34.571-0400 I INDEX [conn81] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.571-0400 m31200| 2015-07-09T14:14:34.571-0400 I INDEX [conn81] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.576-0400 m31100| 2015-07-09T14:14:34.576-0400 I INDEX [conn57] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.577-0400 m31102| 2015-07-09T14:14:34.577-0400 I COMMAND [repl writer worker 6] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.578-0400 m31200| 2015-07-09T14:14:34.577-0400 I INDEX [conn81] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.584-0400 m31100| 2015-07-09T14:14:34.584-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63692 #174 (99 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.587-0400 m31100| 2015-07-09T14:14:34.587-0400 I INDEX [conn50] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.588-0400 m31100| 2015-07-09T14:14:34.587-0400 I INDEX [conn50] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.592-0400 m31200| 2015-07-09T14:14:34.592-0400 I INDEX [conn52] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.593-0400 m31200| 2015-07-09T14:14:34.592-0400 I INDEX [conn52] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.594-0400 m31202| 2015-07-09T14:14:34.592-0400 I INDEX [repl writer worker 4] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.594-0400 m31101| 2015-07-09T14:14:34.592-0400 I INDEX [repl writer worker 14] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.594-0400 m31101| 2015-07-09T14:14:34.592-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.595-0400 m31202| 2015-07-09T14:14:34.592-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.596-0400 m31102| 2015-07-09T14:14:34.595-0400 I INDEX [repl writer worker 3] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.596-0400 m31102| 2015-07-09T14:14:34.595-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.598-0400 m31201| 2015-07-09T14:14:34.598-0400 I INDEX [repl writer worker 13] build index on: db49.coll49 properties: { v: 1, key: { foo0: 1.0 }, name: "foo0_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.599-0400 m31201| 2015-07-09T14:14:34.598-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.604-0400 m31200| 2015-07-09T14:14:34.603-0400 I INDEX [conn52] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.604-0400 m31100| 2015-07-09T14:14:34.603-0400 I INDEX [conn50] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.605-0400 m31101| 2015-07-09T14:14:34.603-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.607-0400 m31202| 2015-07-09T14:14:34.605-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.612-0400 m31201| 2015-07-09T14:14:34.611-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.612-0400 m31102| 2015-07-09T14:14:34.611-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.615-0400 m31200| 2015-07-09T14:14:34.614-0400 I INDEX [conn137] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.615-0400 m31200| 2015-07-09T14:14:34.614-0400 I INDEX [conn137] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.615-0400 m30998| 2015-07-09T14:14:34.614-0400 I NETWORK [conn306] end connection 127.0.0.1:63683 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.616-0400 m31100| 2015-07-09T14:14:34.614-0400 I INDEX [conn45] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.616-0400 m31100| 2015-07-09T14:14:34.614-0400 I INDEX [conn45] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.618-0400 m31202| 2015-07-09T14:14:34.617-0400 I INDEX [repl writer worker 2] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.618-0400 m31202| 2015-07-09T14:14:34.618-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.620-0400 m31101| 2015-07-09T14:14:34.619-0400 I INDEX [repl writer worker 12] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.621-0400 m31101| 2015-07-09T14:14:34.619-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.624-0400 m31102| 2015-07-09T14:14:34.623-0400 I INDEX [repl writer worker 14] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.624-0400 m31102| 2015-07-09T14:14:34.623-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.626-0400 m31201| 2015-07-09T14:14:34.625-0400 I INDEX [repl writer worker 0] build index on: db49.coll49 properties: { v: 1, key: { foo3: 1.0 }, name: "foo3_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.626-0400 m31201| 2015-07-09T14:14:34.625-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.631-0400 m31100| 2015-07-09T14:14:34.630-0400 I INDEX [conn45] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.632-0400 m31200| 2015-07-09T14:14:34.631-0400 I INDEX [conn137] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.637-0400 m31102| 2015-07-09T14:14:34.635-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.638-0400 m31201| 2015-07-09T14:14:34.635-0400 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.641-0400 m31101| 2015-07-09T14:14:34.634-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.641-0400 m31202| 2015-07-09T14:14:34.635-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.642-0400 m31100| 2015-07-09T14:14:34.640-0400 I INDEX [conn47] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.642-0400 m31100| 2015-07-09T14:14:34.640-0400 I INDEX [conn47] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.657-0400 m31202| 2015-07-09T14:14:34.655-0400 I INDEX [repl writer worker 3] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.657-0400 m31202| 2015-07-09T14:14:34.655-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.657-0400 m31200| 2015-07-09T14:14:34.656-0400 I INDEX [conn19] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.658-0400 m31200| 2015-07-09T14:14:34.656-0400 I INDEX [conn19] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.658-0400 m31102| 2015-07-09T14:14:34.656-0400 I INDEX [repl writer worker 0] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.658-0400 m31102| 2015-07-09T14:14:34.656-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.665-0400 m31201| 2015-07-09T14:14:34.664-0400 I INDEX [repl writer worker 2] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.666-0400 m31201| 2015-07-09T14:14:34.664-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.666-0400 m31101| 2015-07-09T14:14:34.664-0400 I INDEX [repl writer worker 0] build index on: db49.coll49 properties: { v: 1, key: { foo8: 1.0 }, name: "foo8_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.666-0400 m31101| 2015-07-09T14:14:34.664-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.667-0400 m31100| 2015-07-09T14:14:34.665-0400 I INDEX [conn47] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.667-0400 m31102| 2015-07-09T14:14:34.667-0400 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.676-0400 m31202| 2015-07-09T14:14:34.675-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.682-0400 m31200| 2015-07-09T14:14:34.681-0400 I INDEX [conn19] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.684-0400 m31200| 2015-07-09T14:14:34.683-0400 I COMMAND [conn19] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo2: 1.0 }, name: "foo2_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 55686 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 106ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.685-0400 m31101| 2015-07-09T14:14:34.684-0400 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.685-0400 m31201| 2015-07-09T14:14:34.683-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.690-0400 m31102| 2015-07-09T14:14:34.689-0400 I INDEX [repl writer worker 11] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.690-0400 m31102| 2015-07-09T14:14:34.690-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.692-0400 m31200| 2015-07-09T14:14:34.692-0400 I INDEX [conn60] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.692-0400 m31200| 2015-07-09T14:14:34.692-0400 I INDEX [conn60] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.698-0400 m31101| 2015-07-09T14:14:34.697-0400 I INDEX [repl writer worker 4] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.699-0400 m31202| 2015-07-09T14:14:34.697-0400 I INDEX [repl writer worker 9] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.699-0400 m31101| 2015-07-09T14:14:34.697-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.699-0400 m31202| 2015-07-09T14:14:34.697-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.699-0400 m31100| 2015-07-09T14:14:34.698-0400 I INDEX [conn49] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.699-0400 m31100| 2015-07-09T14:14:34.698-0400 I INDEX [conn49] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.703-0400 m31201| 2015-07-09T14:14:34.703-0400 I INDEX [repl writer worker 8] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.703-0400 m31201| 2015-07-09T14:14:34.703-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.704-0400 m31102| 2015-07-09T14:14:34.703-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.707-0400 m31200| 2015-07-09T14:14:34.707-0400 I INDEX [conn60] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.708-0400 m31200| 2015-07-09T14:14:34.707-0400 I COMMAND [conn60] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo1: 1.0 }, name: "foo1_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 104995 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 129ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.713-0400 m31100| 2015-07-09T14:14:34.713-0400 I INDEX [conn49] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.715-0400 m31202| 2015-07-09T14:14:34.715-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.717-0400 m31100| 2015-07-09T14:14:34.714-0400 I COMMAND [conn49] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo1: 1.0 }, name: "foo1_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 87439 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 136ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.717-0400 m31101| 2015-07-09T14:14:34.716-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.717-0400 m31201| 2015-07-09T14:14:34.717-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.720-0400 m31100| 2015-07-09T14:14:34.719-0400 I INDEX [conn58] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.720-0400 m31100| 2015-07-09T14:14:34.719-0400 I INDEX [conn58] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.724-0400 m31200| 2015-07-09T14:14:34.723-0400 I INDEX [conn38] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.724-0400 m31200| 2015-07-09T14:14:34.723-0400 I INDEX [conn38] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.731-0400 m31202| 2015-07-09T14:14:34.730-0400 I INDEX [repl writer worker 7] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.731-0400 m31202| 2015-07-09T14:14:34.730-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.732-0400 m31101| 2015-07-09T14:14:34.731-0400 I INDEX [repl writer worker 3] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.733-0400 m31101| 2015-07-09T14:14:34.731-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.733-0400 m31102| 2015-07-09T14:14:34.732-0400 I INDEX [repl writer worker 1] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.733-0400 m31102| 2015-07-09T14:14:34.732-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.741-0400 m31201| 2015-07-09T14:14:34.740-0400 I INDEX [repl writer worker 10] build index on: db49.coll49 properties: { v: 1, key: { foo1: 1.0 }, name: "foo1_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.741-0400 m31201| 2015-07-09T14:14:34.740-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.742-0400 m31200| 2015-07-09T14:14:34.741-0400 I INDEX [conn38] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.743-0400 m31200| 2015-07-09T14:14:34.742-0400 I COMMAND [conn38] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo7: 1.0 }, name: "foo7_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 127498 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 161ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.744-0400 m31100| 2015-07-09T14:14:34.743-0400 I INDEX [conn58] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.751-0400 m31100| 2015-07-09T14:14:34.744-0400 I COMMAND [conn45] command db49.coll49 command: listIndexes { listIndexes: "coll49", cursor: { batchSize: 2.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:312 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 110368 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 110ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.754-0400 m31100| 2015-07-09T14:14:34.744-0400 I COMMAND [conn58] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo7: 1.0 }, name: "foo7_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 134072 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 164ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.755-0400 m31100| 2015-07-09T14:14:34.744-0400 I COMMAND [conn57] command db49.coll49 command: listIndexes { listIndexes: "coll49", cursor: { batchSize: 2.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:312 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 163769 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 164ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.755-0400 m31101| 2015-07-09T14:14:34.747-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.757-0400 m31202| 2015-07-09T14:14:34.750-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.757-0400 m30999| 2015-07-09T14:14:34.751-0400 I NETWORK [conn306] end connection 127.0.0.1:63680 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.759-0400 m30998| 2015-07-09T14:14:34.750-0400 I NETWORK [conn304] end connection 127.0.0.1:63681 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.760-0400 m31102| 2015-07-09T14:14:34.754-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.761-0400 m31200| 2015-07-09T14:14:34.758-0400 I INDEX [conn80] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.761-0400 m31200| 2015-07-09T14:14:34.758-0400 I INDEX [conn80] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.762-0400 m31100| 2015-07-09T14:14:34.758-0400 I INDEX [conn73] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.762-0400 m31100| 2015-07-09T14:14:34.758-0400 I INDEX [conn73] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.762-0400 m31102| 2015-07-09T14:14:34.761-0400 I INDEX [repl writer worker 10] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.763-0400 m31102| 2015-07-09T14:14:34.761-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.763-0400 m31201| 2015-07-09T14:14:34.761-0400 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.770-0400 m31202| 2015-07-09T14:14:34.768-0400 I INDEX [repl writer worker 15] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.770-0400 m31200| 2015-07-09T14:14:34.768-0400 I INDEX [conn80] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.770-0400 m31202| 2015-07-09T14:14:34.768-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.774-0400 m31200| 2015-07-09T14:14:34.770-0400 I COMMAND [conn80] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo5: 1.0 }, name: "foo5_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 157499 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 185ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.775-0400 m31100| 2015-07-09T14:14:34.770-0400 I INDEX [conn73] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.775-0400 m31101| 2015-07-09T14:14:34.771-0400 I INDEX [repl writer worker 7] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.776-0400 m31101| 2015-07-09T14:14:34.771-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.776-0400 m31100| 2015-07-09T14:14:34.773-0400 I COMMAND [conn73] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo5: 1.0 }, name: "foo5_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 162669 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 188ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.777-0400 m31201| 2015-07-09T14:14:34.776-0400 I INDEX [repl writer worker 9] build index on: db49.coll49 properties: { v: 1, key: { foo7: 1.0 }, name: "foo7_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.777-0400 m31201| 2015-07-09T14:14:34.776-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.782-0400 m31202| 2015-07-09T14:14:34.782-0400 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.784-0400 m31102| 2015-07-09T14:14:34.782-0400 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.784-0400 m31200| 2015-07-09T14:14:34.782-0400 I INDEX [conn28] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.784-0400 m31200| 2015-07-09T14:14:34.782-0400 I INDEX [conn28] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.792-0400 m31101| 2015-07-09T14:14:34.791-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.792-0400 m31100| 2015-07-09T14:14:34.791-0400 I INDEX [conn174] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.792-0400 m31100| 2015-07-09T14:14:34.791-0400 I INDEX [conn174] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.795-0400 m31200| 2015-07-09T14:14:34.794-0400 I INDEX [conn28] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.795-0400 m31102| 2015-07-09T14:14:34.794-0400 I INDEX [repl writer worker 2] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.796-0400 m31102| 2015-07-09T14:14:34.794-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.796-0400 m31200| 2015-07-09T14:14:34.795-0400 I COMMAND [conn63] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.796-0400 m31200| 2015-07-09T14:14:34.796-0400 I COMMAND [conn28] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo9: 1.0 }, name: "foo9_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 184767 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 210ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.799-0400 m31201| 2015-07-09T14:14:34.798-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.799-0400 m31101| 2015-07-09T14:14:34.799-0400 I INDEX [repl writer worker 13] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.800-0400 m31101| 2015-07-09T14:14:34.799-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.800-0400 m31100| 2015-07-09T14:14:34.799-0400 I INDEX [conn174] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.800-0400 m31200| 2015-07-09T14:14:34.799-0400 I COMMAND [conn85] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.802-0400 m31100| 2015-07-09T14:14:34.800-0400 I COMMAND [conn174] command db49.$cmd command: createIndexes { createIndexes: "coll49", indexes: [ { key: { foo9: 1.0 }, name: "foo9_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 188512 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 215ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.802-0400 m31202| 2015-07-09T14:14:34.800-0400 I INDEX [repl writer worker 12] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.802-0400 m31202| 2015-07-09T14:14:34.800-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.805-0400 m31100| 2015-07-09T14:14:34.801-0400 I COMMAND [conn37] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.805-0400 m31100| 2015-07-09T14:14:34.804-0400 I COMMAND [conn36] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.806-0400 m30998| 2015-07-09T14:14:34.806-0400 I SHARDING [conn302] sharded connection to test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.813-0400 m30998| 2015-07-09T14:14:34.806-0400 I SHARDING [conn302] retrying command: { listIndexes: "coll49", cursor: { batchSize: 2.0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.813-0400 m31100| 2015-07-09T14:14:34.807-0400 I NETWORK [conn174] end connection 127.0.0.1:63692 (98 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.813-0400 m30999| 2015-07-09T14:14:34.810-0400 I NETWORK [conn304] end connection 127.0.0.1:63676 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.813-0400 m31100| 2015-07-09T14:14:34.810-0400 I COMMAND [conn36] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.813-0400 m30998| 2015-07-09T14:14:34.809-0400 I NETWORK [conn305] end connection 127.0.0.1:63682 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.815-0400 m31101| 2015-07-09T14:14:34.811-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.815-0400 m31201| 2015-07-09T14:14:34.812-0400 I INDEX [repl writer worker 5] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.816-0400 m31201| 2015-07-09T14:14:34.812-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.816-0400 m31200| 2015-07-09T14:14:34.811-0400 I COMMAND [conn85] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.816-0400 m31102| 2015-07-09T14:14:34.815-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.817-0400 m31202| 2015-07-09T14:14:34.816-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.827-0400 m31201| 2015-07-09T14:14:34.823-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.828-0400 m31202| 2015-07-09T14:14:34.826-0400 I INDEX [repl writer worker 14] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.828-0400 m31202| 2015-07-09T14:14:34.826-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.828-0400 m31101| 2015-07-09T14:14:34.826-0400 I INDEX [repl writer worker 8] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.829-0400 m31101| 2015-07-09T14:14:34.826-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.829-0400 m31102| 2015-07-09T14:14:34.828-0400 I INDEX [repl writer worker 4] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.829-0400 m31102| 2015-07-09T14:14:34.828-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.836-0400 m31101| 2015-07-09T14:14:34.836-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.838-0400 m31201| 2015-07-09T14:14:34.837-0400 I INDEX [repl writer worker 3] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.838-0400 m31201| 2015-07-09T14:14:34.837-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.838-0400 m31101| 2015-07-09T14:14:34.837-0400 I COMMAND [repl writer worker 15] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.840-0400 m31102| 2015-07-09T14:14:34.839-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.840-0400 m31101| 2015-07-09T14:14:34.840-0400 I COMMAND [repl writer worker 9] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.841-0400 m31102| 2015-07-09T14:14:34.841-0400 I COMMAND [repl writer worker 5] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.844-0400 m31101| 2015-07-09T14:14:34.843-0400 I COMMAND [repl writer worker 5] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.844-0400 m31102| 2015-07-09T14:14:34.843-0400 I COMMAND [repl writer worker 9] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.844-0400 m31202| 2015-07-09T14:14:34.844-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.845-0400 m31202| 2015-07-09T14:14:34.845-0400 I COMMAND [repl writer worker 8] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.845-0400 m31102| 2015-07-09T14:14:34.845-0400 I COMMAND [repl writer worker 8] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.846-0400 m31201| 2015-07-09T14:14:34.846-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.848-0400 m31202| 2015-07-09T14:14:34.846-0400 I COMMAND [repl writer worker 11] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.848-0400 m31201| 2015-07-09T14:14:34.847-0400 I COMMAND [repl writer worker 15] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.848-0400 m31202| 2015-07-09T14:14:34.847-0400 I COMMAND [repl writer worker 6] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.849-0400 m31201| 2015-07-09T14:14:34.848-0400 I COMMAND [repl writer worker 6] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.850-0400 m31201| 2015-07-09T14:14:34.849-0400 I COMMAND [repl writer worker 1] CMD: dropIndexes db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.914-0400 m31200| 2015-07-09T14:14:34.914-0400 I INDEX [conn19] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.914-0400 m31200| 2015-07-09T14:14:34.914-0400 I INDEX [conn19] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.915-0400 m31100| 2015-07-09T14:14:34.914-0400 I INDEX [conn57] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.915-0400 m31100| 2015-07-09T14:14:34.914-0400 I INDEX [conn57] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.919-0400 m31200| 2015-07-09T14:14:34.918-0400 I INDEX [conn19] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.920-0400 m31100| 2015-07-09T14:14:34.920-0400 I INDEX [conn57] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.924-0400 m31202| 2015-07-09T14:14:34.923-0400 I INDEX [repl writer worker 10] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.924-0400 m31202| 2015-07-09T14:14:34.923-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.925-0400 m30999| 2015-07-09T14:14:34.925-0400 I NETWORK [conn302] end connection 127.0.0.1:63674 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.931-0400 m31201| 2015-07-09T14:14:34.930-0400 I INDEX [repl writer worker 7] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.931-0400 m31201| 2015-07-09T14:14:34.930-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.935-0400 m31100| 2015-07-09T14:14:34.934-0400 I INDEX [conn49] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.935-0400 m31100| 2015-07-09T14:14:34.934-0400 I INDEX [conn49] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.936-0400 m31101| 2015-07-09T14:14:34.934-0400 I INDEX [repl writer worker 2] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.936-0400 m31101| 2015-07-09T14:14:34.934-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.940-0400 m31202| 2015-07-09T14:14:34.939-0400 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.945-0400 m31200| 2015-07-09T14:14:34.944-0400 I INDEX [conn28] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.945-0400 m31200| 2015-07-09T14:14:34.944-0400 I INDEX [conn28] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.950-0400 m31102| 2015-07-09T14:14:34.949-0400 I INDEX [repl writer worker 15] build index on: db49.coll49 properties: { v: 1, key: { foo2: 1.0 }, name: "foo2_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.950-0400 m31102| 2015-07-09T14:14:34.949-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.951-0400 m31201| 2015-07-09T14:14:34.949-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.955-0400 m31101| 2015-07-09T14:14:34.955-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.956-0400 m31100| 2015-07-09T14:14:34.955-0400 I INDEX [conn49] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.957-0400 m31200| 2015-07-09T14:14:34.957-0400 I INDEX [conn28] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.958-0400 m31102| 2015-07-09T14:14:34.957-0400 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.962-0400 m31100| 2015-07-09T14:14:34.962-0400 I INDEX [conn73] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.963-0400 m31100| 2015-07-09T14:14:34.962-0400 I INDEX [conn73] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.964-0400 m31200| 2015-07-09T14:14:34.964-0400 I INDEX [conn80] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.965-0400 m31200| 2015-07-09T14:14:34.964-0400 I INDEX [conn80] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.968-0400 m31102| 2015-07-09T14:14:34.967-0400 I INDEX [repl writer worker 7] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.968-0400 m31102| 2015-07-09T14:14:34.967-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.973-0400 m31101| 2015-07-09T14:14:34.973-0400 I INDEX [repl writer worker 11] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.973-0400 m31101| 2015-07-09T14:14:34.973-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.974-0400 m31202| 2015-07-09T14:14:34.973-0400 I INDEX [repl writer worker 5] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.975-0400 m31202| 2015-07-09T14:14:34.973-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.975-0400 m31201| 2015-07-09T14:14:34.974-0400 I INDEX [repl writer worker 4] build index on: db49.coll49 properties: { v: 1, key: { foo5: 1.0 }, name: "foo5_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.976-0400 m31201| 2015-07-09T14:14:34.974-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.981-0400 m31100| 2015-07-09T14:14:34.980-0400 I INDEX [conn73] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.981-0400 m31200| 2015-07-09T14:14:34.980-0400 I INDEX [conn80] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.988-0400 m31102| 2015-07-09T14:14:34.984-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.988-0400 m31202| 2015-07-09T14:14:34.987-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.988-0400 m30998| 2015-07-09T14:14:34.987-0400 I NETWORK [conn302] end connection 127.0.0.1:63677 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.992-0400 m31201| 2015-07-09T14:14:34.988-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.992-0400 m31101| 2015-07-09T14:14:34.988-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.992-0400 m30998| 2015-07-09T14:14:34.991-0400 I NETWORK [conn303] end connection 127.0.0.1:63679 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.995-0400 m31102| 2015-07-09T14:14:34.995-0400 I INDEX [repl writer worker 12] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.996-0400 m31102| 2015-07-09T14:14:34.995-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.997-0400 m31202| 2015-07-09T14:14:34.996-0400 I INDEX [repl writer worker 1] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.997-0400 m31202| 2015-07-09T14:14:34.996-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.998-0400 m31201| 2015-07-09T14:14:34.997-0400 I INDEX [repl writer worker 14] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:34.998-0400 m31201| 2015-07-09T14:14:34.997-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.001-0400 m31102| 2015-07-09T14:14:35.001-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.002-0400 m31101| 2015-07-09T14:14:35.001-0400 I INDEX [repl writer worker 1] build index on: db49.coll49 properties: { v: 1, key: { foo9: 1.0 }, name: "foo9_1", ns: "db49.coll49" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.002-0400 m31101| 2015-07-09T14:14:35.002-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.005-0400 m31202| 2015-07-09T14:14:35.004-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.005-0400 m31201| 2015-07-09T14:14:35.004-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.007-0400 m31101| 2015-07-09T14:14:35.007-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.008-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.008-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.008-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.008-0400 jstests/concurrency/fsm_workloads/list_indexes.js: Workload completed in 4557 ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.008-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.008-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.008-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.009-0400 m30999| 2015-07-09T14:14:35.008-0400 I COMMAND [conn1] DROP: db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.009-0400 m30999| 2015-07-09T14:14:35.009-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:35.008-0400-559eba0bca4787b9985d1e02", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465675008), what: "dropCollection.start", ns: "db49.coll49", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.066-0400 m30999| 2015-07-09T14:14:35.066-0400 I SHARDING [conn1] distributed lock 'db49.coll49/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba0bca4787b9985d1e03 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.067-0400 m31100| 2015-07-09T14:14:35.067-0400 I COMMAND [conn37] CMD: drop db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.077-0400 m31200| 2015-07-09T14:14:35.077-0400 I COMMAND [conn63] CMD: drop db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.077-0400 m31102| 2015-07-09T14:14:35.077-0400 I COMMAND [repl writer worker 13] CMD: drop db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.078-0400 m31101| 2015-07-09T14:14:35.077-0400 I COMMAND [repl writer worker 10] CMD: drop db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.083-0400 m31202| 2015-07-09T14:14:35.083-0400 I COMMAND [repl writer worker 0] CMD: drop db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.083-0400 m31201| 2015-07-09T14:14:35.083-0400 I COMMAND [repl writer worker 12] CMD: drop db49.coll49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.136-0400 m31100| 2015-07-09T14:14:35.136-0400 I SHARDING [conn37] remotely refreshing metadata for db49.coll49 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559eba05ca4787b9985d1e00, current metadata version is 2|3||559eba05ca4787b9985d1e00 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.138-0400 m31100| 2015-07-09T14:14:35.137-0400 W SHARDING [conn37] no chunks found when reloading db49.coll49, previous version was 0|0||559eba05ca4787b9985d1e00, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.138-0400 m31100| 2015-07-09T14:14:35.138-0400 I SHARDING [conn37] dropping metadata for db49.coll49 at shard version 2|3||559eba05ca4787b9985d1e00, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.139-0400 m31200| 2015-07-09T14:14:35.139-0400 I SHARDING [conn63] remotely refreshing metadata for db49.coll49 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559eba05ca4787b9985d1e00, current metadata version is 2|5||559eba05ca4787b9985d1e00 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.141-0400 m31200| 2015-07-09T14:14:35.140-0400 W SHARDING [conn63] no chunks found when reloading db49.coll49, previous version was 0|0||559eba05ca4787b9985d1e00, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.141-0400 m31200| 2015-07-09T14:14:35.140-0400 I SHARDING [conn63] dropping metadata for db49.coll49 at shard version 2|5||559eba05ca4787b9985d1e00, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.142-0400 m30999| 2015-07-09T14:14:35.141-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:35.141-0400-559eba0bca4787b9985d1e04", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465675141), what: "dropCollection", ns: "db49.coll49", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.196-0400 m30999| 2015-07-09T14:14:35.195-0400 I SHARDING [conn1] distributed lock 'db49.coll49/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.251-0400 m30999| 2015-07-09T14:14:35.251-0400 I COMMAND [conn1] DROP DATABASE: db49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.252-0400 m30999| 2015-07-09T14:14:35.251-0400 I SHARDING [conn1] DBConfig::dropDatabase: db49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.252-0400 m30999| 2015-07-09T14:14:35.251-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:35.251-0400-559eba0bca4787b9985d1e05", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465675251), what: "dropDatabase.start", ns: "db49", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.358-0400 m30999| 2015-07-09T14:14:35.357-0400 I SHARDING [conn1] DBConfig::dropDatabase: db49 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.359-0400 m31100| 2015-07-09T14:14:35.358-0400 I COMMAND [conn160] dropDatabase db49 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.359-0400 m31100| 2015-07-09T14:14:35.358-0400 I COMMAND [conn160] dropDatabase db49 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.359-0400 m30999| 2015-07-09T14:14:35.359-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:35.359-0400-559eba0bca4787b9985d1e06", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465675359), what: "dropDatabase", ns: "db49", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.360-0400 m31102| 2015-07-09T14:14:35.360-0400 I COMMAND [repl writer worker 6] dropDatabase db49 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.360-0400 m31101| 2015-07-09T14:14:35.360-0400 I COMMAND [repl writer worker 6] dropDatabase db49 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.360-0400 m31102| 2015-07-09T14:14:35.360-0400 I COMMAND [repl writer worker 6] dropDatabase db49 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.361-0400 m31101| 2015-07-09T14:14:35.360-0400 I COMMAND [repl writer worker 6] dropDatabase db49 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.479-0400 m31100| 2015-07-09T14:14:35.479-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.483-0400 m31101| 2015-07-09T14:14:35.483-0400 I COMMAND [repl writer worker 0] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.483-0400 m31102| 2015-07-09T14:14:35.483-0400 I COMMAND [repl writer worker 0] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.515-0400 m31200| 2015-07-09T14:14:35.515-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.517-0400 m31202| 2015-07-09T14:14:35.517-0400 I COMMAND [repl writer worker 2] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.518-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.518-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.518-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.518-0400 jstests/concurrency/fsm_workloads/findAndModify_inc.js [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.519-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.519-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.519-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.519-0400 m31201| 2015-07-09T14:14:35.519-0400 I COMMAND [repl writer worker 0] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.528-0400 m30999| 2015-07-09T14:14:35.527-0400 I SHARDING [conn1] distributed lock 'db50/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba0bca4787b9985d1e07 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.531-0400 m30999| 2015-07-09T14:14:35.531-0400 I SHARDING [conn1] Placing [db50] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.532-0400 m30999| 2015-07-09T14:14:35.531-0400 I SHARDING [conn1] Enabling sharding for database [db50] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.586-0400 m30999| 2015-07-09T14:14:35.586-0400 I SHARDING [conn1] distributed lock 'db50/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.607-0400 m31100| 2015-07-09T14:14:35.607-0400 I INDEX [conn23] build index on: db50.coll50 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db50.coll50" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.607-0400 m31100| 2015-07-09T14:14:35.607-0400 I INDEX [conn23] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.616-0400 m31100| 2015-07-09T14:14:35.616-0400 I INDEX [conn23] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.618-0400 m30999| 2015-07-09T14:14:35.618-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db50.coll50", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.621-0400 m30999| 2015-07-09T14:14:35.621-0400 I SHARDING [conn1] distributed lock 'db50.coll50/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba0bca4787b9985d1e08 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.622-0400 m30999| 2015-07-09T14:14:35.622-0400 I SHARDING [conn1] enable sharding on: db50.coll50 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.623-0400 m30999| 2015-07-09T14:14:35.622-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:35.622-0400-559eba0bca4787b9985d1e09", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465675622), what: "shardCollection.start", ns: "db50.coll50", details: { shardKey: { _id: "hashed" }, collection: "db50.coll50", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.630-0400 m31102| 2015-07-09T14:14:35.629-0400 I INDEX [repl writer worker 1] build index on: db50.coll50 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db50.coll50" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.630-0400 m31102| 2015-07-09T14:14:35.630-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.634-0400 m31101| 2015-07-09T14:14:35.634-0400 I INDEX [repl writer worker 3] build index on: db50.coll50 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db50.coll50" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.634-0400 m31101| 2015-07-09T14:14:35.634-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.639-0400 m31102| 2015-07-09T14:14:35.639-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.641-0400 m31101| 2015-07-09T14:14:35.641-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.675-0400 m30999| 2015-07-09T14:14:35.675-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db50.coll50 using new epoch 559eba0bca4787b9985d1e0a [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.783-0400 m30999| 2015-07-09T14:14:35.783-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db50.coll50: 0ms sequenceNumber: 220 version: 1|1||559eba0bca4787b9985d1e0a based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.839-0400 m30999| 2015-07-09T14:14:35.838-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db50.coll50: 0ms sequenceNumber: 221 version: 1|1||559eba0bca4787b9985d1e0a based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.841-0400 m31100| 2015-07-09T14:14:35.840-0400 I SHARDING [conn56] remotely refreshing metadata for db50.coll50 with requested shard version 1|1||559eba0bca4787b9985d1e0a, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.842-0400 m31100| 2015-07-09T14:14:35.842-0400 I SHARDING [conn56] collection db50.coll50 was previously unsharded, new metadata loaded with shard version 1|1||559eba0bca4787b9985d1e0a [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.843-0400 m31100| 2015-07-09T14:14:35.842-0400 I SHARDING [conn56] collection version was loaded at version 1|1||559eba0bca4787b9985d1e0a, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.843-0400 m30999| 2015-07-09T14:14:35.842-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:35.842-0400-559eba0bca4787b9985d1e0b", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465675842), what: "shardCollection", ns: "db50.coll50", details: { version: "1|1||559eba0bca4787b9985d1e0a" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.898-0400 m30999| 2015-07-09T14:14:35.898-0400 I SHARDING [conn1] distributed lock 'db50.coll50/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.899-0400 m30999| 2015-07-09T14:14:35.898-0400 I SHARDING [conn1] moving chunk ns: db50.coll50 moving ( ns: db50.coll50, shard: test-rs0, lastmod: 1|1||000000000000000000000000, min: { _id: 0 }, max: { _id: MaxKey }) test-rs0 -> test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.899-0400 m31100| 2015-07-09T14:14:35.899-0400 I SHARDING [conn40] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.900-0400 m31100| 2015-07-09T14:14:35.900-0400 I SHARDING [conn40] received moveChunk request: { moveChunk: "db50.coll50", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eba0bca4787b9985d1e0a') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.904-0400 m31100| 2015-07-09T14:14:35.903-0400 I SHARDING [conn40] distributed lock 'db50.coll50/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eba0b792e00bb67274a0b [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.904-0400 m31100| 2015-07-09T14:14:35.904-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:35.904-0400-559eba0b792e00bb67274a0c", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436465675904), what: "moveChunk.start", ns: "db50.coll50", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.957-0400 m31100| 2015-07-09T14:14:35.957-0400 I SHARDING [conn40] remotely refreshing metadata for db50.coll50 based on current shard version 1|1||559eba0bca4787b9985d1e0a, current metadata version is 1|1||559eba0bca4787b9985d1e0a [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.959-0400 m31100| 2015-07-09T14:14:35.959-0400 I SHARDING [conn40] metadata of collection db50.coll50 already up to date (shard version : 1|1||559eba0bca4787b9985d1e0a, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.959-0400 m31100| 2015-07-09T14:14:35.959-0400 I SHARDING [conn40] moveChunk request accepted at version 1|1||559eba0bca4787b9985d1e0a [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.960-0400 m31100| 2015-07-09T14:14:35.959-0400 I SHARDING [conn40] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.960-0400 m31200| 2015-07-09T14:14:35.960-0400 I SHARDING [conn16] remotely refreshing metadata for db50.coll50, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.961-0400 m31200| 2015-07-09T14:14:35.961-0400 I SHARDING [conn16] collection db50.coll50 was previously unsharded, new metadata loaded with shard version 0|0||559eba0bca4787b9985d1e0a [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.962-0400 m31200| 2015-07-09T14:14:35.961-0400 I SHARDING [conn16] collection version was loaded at version 1|1||559eba0bca4787b9985d1e0a, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.962-0400 m31200| 2015-07-09T14:14:35.961-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: 0 } -> { _id: MaxKey } for collection db50.coll50 from test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 at epoch 559eba0bca4787b9985d1e0a [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.964-0400 m31100| 2015-07-09T14:14:35.963-0400 I SHARDING [conn40] moveChunk data transfer progress: { active: true, ns: "db50.coll50", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.967-0400 m31100| 2015-07-09T14:14:35.966-0400 I SHARDING [conn40] moveChunk data transfer progress: { active: true, ns: "db50.coll50", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.972-0400 m31100| 2015-07-09T14:14:35.971-0400 I SHARDING [conn40] moveChunk data transfer progress: { active: true, ns: "db50.coll50", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.979-0400 m30998| 2015-07-09T14:14:35.979-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:14:35.976-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30998:1436464535:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.982-0400 m31100| 2015-07-09T14:14:35.981-0400 I SHARDING [conn40] moveChunk data transfer progress: { active: true, ns: "db50.coll50", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.982-0400 m31200| 2015-07-09T14:14:35.982-0400 I INDEX [migrateThread] build index on: db50.coll50 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db50.coll50" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.983-0400 m31200| 2015-07-09T14:14:35.982-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.990-0400 m31200| 2015-07-09T14:14:35.990-0400 I INDEX [migrateThread] build index on: db50.coll50 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db50.coll50" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.991-0400 m31200| 2015-07-09T14:14:35.990-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:35.999-0400 m31100| 2015-07-09T14:14:35.999-0400 I SHARDING [conn40] moveChunk data transfer progress: { active: true, ns: "db50.coll50", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.007-0400 m31200| 2015-07-09T14:14:36.007-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.008-0400 m31200| 2015-07-09T14:14:36.008-0400 I SHARDING [migrateThread] Deleter starting delete for: db50.coll50 from { _id: 0 } -> { _id: MaxKey }, with opId: 86254 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.009-0400 m31200| 2015-07-09T14:14:36.008-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db50.coll50 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.022-0400 m31201| 2015-07-09T14:14:36.021-0400 I INDEX [repl writer worker 10] build index on: db50.coll50 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db50.coll50" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.022-0400 m31201| 2015-07-09T14:14:36.021-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.025-0400 m31202| 2015-07-09T14:14:36.024-0400 I INDEX [repl writer worker 7] build index on: db50.coll50 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db50.coll50" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.025-0400 m31202| 2015-07-09T14:14:36.024-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.031-0400 m31201| 2015-07-09T14:14:36.031-0400 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.033-0400 m31100| 2015-07-09T14:14:36.032-0400 I SHARDING [conn40] moveChunk data transfer progress: { active: true, ns: "db50.coll50", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.034-0400 m31200| 2015-07-09T14:14:36.033-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.034-0400 m31200| 2015-07-09T14:14:36.033-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db50.coll50' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.034-0400 m31202| 2015-07-09T14:14:36.033-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.099-0400 m31100| 2015-07-09T14:14:36.098-0400 I SHARDING [conn40] moveChunk data transfer progress: { active: true, ns: "db50.coll50", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.099-0400 m31100| 2015-07-09T14:14:36.098-0400 I SHARDING [conn40] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.099-0400 m31100| 2015-07-09T14:14:36.099-0400 I SHARDING [conn40] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.099-0400 m31100| 2015-07-09T14:14:36.099-0400 I SHARDING [conn40] moveChunk setting version to: 2|0||559eba0bca4787b9985d1e0a [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.104-0400 m31200| 2015-07-09T14:14:36.103-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db50.coll50' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.104-0400 m31200| 2015-07-09T14:14:36.103-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:36.103-0400-559eba0cd5a107a5b9c0db4c", server: "bs-osx108-8", clientAddr: "", time: new Date(1436465676103), what: "moveChunk.to", ns: "db50.coll50", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 5: 46, step 2 of 5: 24, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 70, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.159-0400 m31100| 2015-07-09T14:14:36.158-0400 I SHARDING [conn40] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db50.coll50", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.160-0400 m31100| 2015-07-09T14:14:36.159-0400 I SHARDING [conn40] moveChunk updating self version to: 2|1||559eba0bca4787b9985d1e0a through { _id: MinKey } -> { _id: 0 } for collection 'db50.coll50' [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.161-0400 m31100| 2015-07-09T14:14:36.160-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:36.160-0400-559eba0c792e00bb67274a0d", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436465676160), what: "moveChunk.commit", ns: "db50.coll50", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.214-0400 m31100| 2015-07-09T14:14:36.213-0400 I SHARDING [conn40] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.214-0400 m31100| 2015-07-09T14:14:36.213-0400 I SHARDING [conn40] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.214-0400 m31100| 2015-07-09T14:14:36.214-0400 I SHARDING [conn40] Deleter starting delete for: db50.coll50 from { _id: 0 } -> { _id: MaxKey }, with opId: 129909 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.215-0400 m31100| 2015-07-09T14:14:36.214-0400 I SHARDING [conn40] rangeDeleter deleted 0 documents for db50.coll50 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.215-0400 m31100| 2015-07-09T14:14:36.214-0400 I SHARDING [conn40] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.215-0400 m31100| 2015-07-09T14:14:36.215-0400 I SHARDING [conn40] distributed lock 'db50.coll50/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.216-0400 m31100| 2015-07-09T14:14:36.215-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:36.215-0400-559eba0c792e00bb67274a0e", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436465676215), what: "moveChunk.from", ns: "db50.coll50", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 6: 0, step 2 of 6: 59, step 3 of 6: 2, step 4 of 6: 136, step 5 of 6: 115, step 6 of 6: 0, to: "test-rs1", from: "test-rs0", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.269-0400 m31100| 2015-07-09T14:14:36.268-0400 I COMMAND [conn40] command db50.coll50 command: moveChunk { moveChunk: "db50.coll50", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eba0bca4787b9985d1e0a') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 368ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.271-0400 m30999| 2015-07-09T14:14:36.270-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db50.coll50: 0ms sequenceNumber: 222 version: 2|1||559eba0bca4787b9985d1e0a based on: 1|1||559eba0bca4787b9985d1e0a [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.272-0400 m31100| 2015-07-09T14:14:36.271-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db50.coll50", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba0bca4787b9985d1e0a') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.277-0400 m31100| 2015-07-09T14:14:36.277-0400 I SHARDING [conn40] distributed lock 'db50.coll50/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eba0c792e00bb67274a0f [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.277-0400 m31100| 2015-07-09T14:14:36.277-0400 I SHARDING [conn40] remotely refreshing metadata for db50.coll50 based on current shard version 2|0||559eba0bca4787b9985d1e0a, current metadata version is 2|0||559eba0bca4787b9985d1e0a [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.279-0400 m31100| 2015-07-09T14:14:36.278-0400 I SHARDING [conn40] updating metadata for db50.coll50 from shard version 2|0||559eba0bca4787b9985d1e0a to shard version 2|1||559eba0bca4787b9985d1e0a [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.279-0400 m31100| 2015-07-09T14:14:36.278-0400 I SHARDING [conn40] collection version was loaded at version 2|1||559eba0bca4787b9985d1e0a, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.279-0400 m31100| 2015-07-09T14:14:36.278-0400 I SHARDING [conn40] splitChunk accepted at version 2|1||559eba0bca4787b9985d1e0a [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.280-0400 m31100| 2015-07-09T14:14:36.280-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:36.280-0400-559eba0c792e00bb67274a10", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436465676280), what: "split", ns: "db50.coll50", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559eba0bca4787b9985d1e0a') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559eba0bca4787b9985d1e0a') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.333-0400 m31100| 2015-07-09T14:14:36.333-0400 I SHARDING [conn40] distributed lock 'db50.coll50/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.335-0400 m30999| 2015-07-09T14:14:36.335-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db50.coll50: 0ms sequenceNumber: 223 version: 2|3||559eba0bca4787b9985d1e0a based on: 2|1||559eba0bca4787b9985d1e0a [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.336-0400 m31200| 2015-07-09T14:14:36.335-0400 I SHARDING [conn18] received splitChunk request: { splitChunk: "db50.coll50", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba0bca4787b9985d1e0a') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.341-0400 m31200| 2015-07-09T14:14:36.340-0400 I SHARDING [conn18] distributed lock 'db50.coll50/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eba0cd5a107a5b9c0db4d [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.341-0400 m31200| 2015-07-09T14:14:36.340-0400 I SHARDING [conn18] remotely refreshing metadata for db50.coll50 based on current shard version 0|0||559eba0bca4787b9985d1e0a, current metadata version is 1|1||559eba0bca4787b9985d1e0a [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.342-0400 m31200| 2015-07-09T14:14:36.342-0400 I SHARDING [conn18] updating metadata for db50.coll50 from shard version 0|0||559eba0bca4787b9985d1e0a to shard version 2|0||559eba0bca4787b9985d1e0a [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.342-0400 m31200| 2015-07-09T14:14:36.342-0400 I SHARDING [conn18] collection version was loaded at version 2|3||559eba0bca4787b9985d1e0a, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.342-0400 m31200| 2015-07-09T14:14:36.342-0400 I SHARDING [conn18] splitChunk accepted at version 2|0||559eba0bca4787b9985d1e0a [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.344-0400 m31200| 2015-07-09T14:14:36.343-0400 I SHARDING [conn18] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:36.343-0400-559eba0cd5a107a5b9c0db4e", server: "bs-osx108-8", clientAddr: "127.0.0.1:62585", time: new Date(1436465676343), what: "split", ns: "db50.coll50", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559eba0bca4787b9985d1e0a') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559eba0bca4787b9985d1e0a') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.398-0400 m31200| 2015-07-09T14:14:36.398-0400 I SHARDING [conn18] distributed lock 'db50.coll50/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.400-0400 m30999| 2015-07-09T14:14:36.400-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db50.coll50: 0ms sequenceNumber: 224 version: 2|5||559eba0bca4787b9985d1e0a based on: 2|3||559eba0bca4787b9985d1e0a [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.403-0400 Using 20 threads (requested 20) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.513-0400 m30999| 2015-07-09T14:14:36.513-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63693 #307 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.627-0400 m30998| 2015-07-09T14:14:36.626-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63695 #307 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.629-0400 m30999| 2015-07-09T14:14:36.626-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63694 #308 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.629-0400 m30999| 2015-07-09T14:14:36.629-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63696 #309 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.630-0400 m30998| 2015-07-09T14:14:36.630-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63697 #308 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.630-0400 m30999| 2015-07-09T14:14:36.630-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63698 #310 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.636-0400 m30999| 2015-07-09T14:14:36.634-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63701 #311 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.636-0400 m30998| 2015-07-09T14:14:36.636-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63699 #309 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.637-0400 m30998| 2015-07-09T14:14:36.636-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63700 #310 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.637-0400 m30998| 2015-07-09T14:14:36.637-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63702 #311 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.640-0400 m30999| 2015-07-09T14:14:36.640-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63705 #312 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.641-0400 m30998| 2015-07-09T14:14:36.641-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63703 #312 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.644-0400 m30998| 2015-07-09T14:14:36.644-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63704 #313 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.644-0400 m30998| 2015-07-09T14:14:36.644-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63709 #314 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.645-0400 m30998| 2015-07-09T14:14:36.644-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63710 #315 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.646-0400 m30999| 2015-07-09T14:14:36.646-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63706 #313 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.647-0400 m30998| 2015-07-09T14:14:36.646-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63712 #316 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.647-0400 m30999| 2015-07-09T14:14:36.646-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63707 #314 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.647-0400 m30999| 2015-07-09T14:14:36.646-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63708 #315 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.647-0400 m30999| 2015-07-09T14:14:36.646-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63711 #316 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.657-0400 setting random seed: 5572537425905 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.657-0400 setting random seed: 4706522445194 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.657-0400 setting random seed: 1299856128171 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.657-0400 setting random seed: 5151375560089 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.659-0400 setting random seed: 2522773216478 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.659-0400 setting random seed: 7677464825101 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.660-0400 m30998| 2015-07-09T14:14:36.659-0400 I SHARDING [conn308] ChunkManager: time to load chunks for db50.coll50: 0ms sequenceNumber: 61 version: 2|5||559eba0bca4787b9985d1e0a based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.661-0400 setting random seed: 249469098635 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.661-0400 setting random seed: 6136118229478 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.661-0400 setting random seed: 9739130865782 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.662-0400 setting random seed: 7636560802347 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.663-0400 setting random seed: 4648285172879 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.665-0400 setting random seed: 3728936966508 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.666-0400 m31100| 2015-07-09T14:14:36.665-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63713 #175 (99 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.666-0400 setting random seed: 6681956341490 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.666-0400 setting random seed: 9169078744016 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.667-0400 setting random seed: 2248488767072 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.667-0400 setting random seed: 2827142709866 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.668-0400 setting random seed: 3640283718705 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.669-0400 m31100| 2015-07-09T14:14:36.668-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63714 #176 (100 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.671-0400 m31100| 2015-07-09T14:14:36.669-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63715 #177 (101 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.671-0400 setting random seed: 350725785829 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.671-0400 m31100| 2015-07-09T14:14:36.671-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63716 #178 (102 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.672-0400 setting random seed: 2243625293485 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.672-0400 setting random seed: 5787546844221 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.673-0400 m31100| 2015-07-09T14:14:36.673-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63717 #179 (103 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.674-0400 m31100| 2015-07-09T14:14:36.674-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63718 #180 (104 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.674-0400 m31100| 2015-07-09T14:14:36.674-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63719 #181 (105 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.678-0400 m31100| 2015-07-09T14:14:36.678-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63720 #182 (106 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.680-0400 m31100| 2015-07-09T14:14:36.680-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63721 #183 (107 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.683-0400 m31100| 2015-07-09T14:14:36.682-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63722 #184 (108 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.684-0400 m31100| 2015-07-09T14:14:36.684-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63723 #185 (109 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.685-0400 m31100| 2015-07-09T14:14:36.685-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63724 #186 (110 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.752-0400 m30999| 2015-07-09T14:14:36.751-0400 I NETWORK [conn312] end connection 127.0.0.1:63705 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.757-0400 m30998| 2015-07-09T14:14:36.756-0400 I NETWORK [conn313] end connection 127.0.0.1:63704 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.765-0400 m30998| 2015-07-09T14:14:36.764-0400 I NETWORK [conn311] end connection 127.0.0.1:63702 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.779-0400 m30999| 2015-07-09T14:14:36.773-0400 I NETWORK [conn310] end connection 127.0.0.1:63698 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.780-0400 m30998| 2015-07-09T14:14:36.773-0400 I NETWORK [conn308] end connection 127.0.0.1:63697 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.794-0400 m31100| 2015-07-09T14:14:36.793-0400 I COMMAND [conn47] command db50.$cmd command: findAndModify { findandmodify: "coll50", query: { _id: "findAndModify_inc" }, update: { $inc: { t2: 1.0 } } } update: { $inc: { t2: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:29 numYields:29 reslen:393 locks:{ Global: { acquireCount: { r: 31, w: 31 } }, Database: { acquireCount: { w: 31 } }, Collection: { acquireCount: { w: 30 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 126ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.795-0400 m31100| 2015-07-09T14:14:36.795-0400 I COMMAND [conn58] command db50.$cmd command: findAndModify { findandmodify: "coll50", query: { _id: "findAndModify_inc" }, update: { $inc: { t5: 1.0 } } } update: { $inc: { t5: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:31 numYields:31 reslen:393 locks:{ Global: { acquireCount: { r: 33, w: 33 } }, Database: { acquireCount: { w: 33 } }, Collection: { acquireCount: { w: 32 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 134ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.803-0400 m31100| 2015-07-09T14:14:36.802-0400 I COMMAND [conn49] command db50.$cmd command: findAndModify { findandmodify: "coll50", query: { _id: "findAndModify_inc" }, update: { $inc: { t17: 1.0 } } } update: { $inc: { t17: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:28 numYields:28 reslen:405 locks:{ Global: { acquireCount: { r: 30, w: 30 } }, Database: { acquireCount: { w: 30 } }, Collection: { acquireCount: { w: 29 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 121ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.807-0400 m31100| 2015-07-09T14:14:36.806-0400 I COMMAND [conn180] command db50.$cmd command: findAndModify { findandmodify: "coll50", query: { _id: "findAndModify_inc" }, update: { $inc: { t10: 1.0 } } } update: { $inc: { t10: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:30 numYields:30 reslen:405 locks:{ Global: { acquireCount: { r: 32, w: 32 } }, Database: { acquireCount: { w: 32 } }, Collection: { acquireCount: { w: 31 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 128ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.807-0400 m30999| 2015-07-09T14:14:36.807-0400 I NETWORK [conn308] end connection 127.0.0.1:63694 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.811-0400 m30999| 2015-07-09T14:14:36.810-0400 I NETWORK [conn309] end connection 127.0.0.1:63696 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.819-0400 m30999| 2015-07-09T14:14:36.819-0400 I NETWORK [conn314] end connection 127.0.0.1:63707 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.825-0400 m31100| 2015-07-09T14:14:36.825-0400 I COMMAND [conn56] command db50.$cmd command: findAndModify { findandmodify: "coll50", query: { _id: "findAndModify_inc" }, update: { $inc: { t16: 1.0 } } } update: { $inc: { t16: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:27 numYields:27 reslen:418 locks:{ Global: { acquireCount: { r: 29, w: 29 } }, Database: { acquireCount: { w: 29 } }, Collection: { acquireCount: { w: 28 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 115ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.833-0400 m30998| 2015-07-09T14:14:36.832-0400 I NETWORK [conn315] end connection 127.0.0.1:63710 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.833-0400 m30998| 2015-07-09T14:14:36.833-0400 I NETWORK [conn310] end connection 127.0.0.1:63700 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.857-0400 m30999| 2015-07-09T14:14:36.856-0400 I NETWORK [conn311] end connection 127.0.0.1:63701 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.858-0400 m30999| 2015-07-09T14:14:36.857-0400 I NETWORK [conn307] end connection 127.0.0.1:63693 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.859-0400 m30998| 2015-07-09T14:14:36.858-0400 I NETWORK [conn314] end connection 127.0.0.1:63709 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.864-0400 m31100| 2015-07-09T14:14:36.863-0400 I COMMAND [conn181] command db50.$cmd command: findAndModify { findandmodify: "coll50", query: { _id: "findAndModify_inc" }, update: { $inc: { t1: 1.0 } } } update: { $inc: { t1: 1.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:25 numYields:25 reslen:418 locks:{ Global: { acquireCount: { r: 27, w: 27 } }, Database: { acquireCount: { w: 27 } }, Collection: { acquireCount: { w: 26 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 115ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.875-0400 m30998| 2015-07-09T14:14:36.875-0400 I NETWORK [conn316] end connection 127.0.0.1:63712 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.891-0400 m30998| 2015-07-09T14:14:36.891-0400 I NETWORK [conn312] end connection 127.0.0.1:63703 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.893-0400 m30998| 2015-07-09T14:14:36.893-0400 I NETWORK [conn307] end connection 127.0.0.1:63695 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.894-0400 m31100| 2015-07-09T14:14:36.894-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:14:36.891-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31100:1436464536:197041335', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.910-0400 m30998| 2015-07-09T14:14:36.904-0400 I NETWORK [conn309] end connection 127.0.0.1:63699 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.910-0400 m30999| 2015-07-09T14:14:36.904-0400 I NETWORK [conn313] end connection 127.0.0.1:63706 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.914-0400 m30999| 2015-07-09T14:14:36.914-0400 I NETWORK [conn315] end connection 127.0.0.1:63708 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.933-0400 m30999| 2015-07-09T14:14:36.933-0400 I NETWORK [conn316] end connection 127.0.0.1:63711 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.949-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.950-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.950-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.950-0400 jstests/concurrency/fsm_workloads/findAndModify_inc.js: Workload completed in 546 ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.950-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.950-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.950-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.950-0400 m30999| 2015-07-09T14:14:36.950-0400 I COMMAND [conn1] DROP: db50.coll50 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:36.950-0400 m30999| 2015-07-09T14:14:36.950-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:36.950-0400-559eba0cca4787b9985d1e0c", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465676950), what: "dropCollection.start", ns: "db50.coll50", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.008-0400 m30999| 2015-07-09T14:14:37.008-0400 I SHARDING [conn1] distributed lock 'db50.coll50/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba0dca4787b9985d1e0d [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.009-0400 m31100| 2015-07-09T14:14:37.009-0400 I COMMAND [conn40] CMD: drop db50.coll50 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.013-0400 m31200| 2015-07-09T14:14:37.012-0400 I COMMAND [conn18] CMD: drop db50.coll50 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.014-0400 m31102| 2015-07-09T14:14:37.014-0400 I COMMAND [repl writer worker 0] CMD: drop db50.coll50 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.014-0400 m31101| 2015-07-09T14:14:37.014-0400 I COMMAND [repl writer worker 2] CMD: drop db50.coll50 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.016-0400 m31202| 2015-07-09T14:14:37.016-0400 I COMMAND [repl writer worker 15] CMD: drop db50.coll50 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.017-0400 m31201| 2015-07-09T14:14:37.016-0400 I COMMAND [repl writer worker 9] CMD: drop db50.coll50 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.071-0400 m31100| 2015-07-09T14:14:37.070-0400 I SHARDING [conn40] remotely refreshing metadata for db50.coll50 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559eba0bca4787b9985d1e0a, current metadata version is 2|3||559eba0bca4787b9985d1e0a [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.073-0400 m31100| 2015-07-09T14:14:37.072-0400 W SHARDING [conn40] no chunks found when reloading db50.coll50, previous version was 0|0||559eba0bca4787b9985d1e0a, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.073-0400 m31100| 2015-07-09T14:14:37.072-0400 I SHARDING [conn40] dropping metadata for db50.coll50 at shard version 2|3||559eba0bca4787b9985d1e0a, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.074-0400 m31200| 2015-07-09T14:14:37.074-0400 I SHARDING [conn18] remotely refreshing metadata for db50.coll50 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559eba0bca4787b9985d1e0a, current metadata version is 2|5||559eba0bca4787b9985d1e0a [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.076-0400 m31200| 2015-07-09T14:14:37.075-0400 W SHARDING [conn18] no chunks found when reloading db50.coll50, previous version was 0|0||559eba0bca4787b9985d1e0a, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.076-0400 m31200| 2015-07-09T14:14:37.076-0400 I SHARDING [conn18] dropping metadata for db50.coll50 at shard version 2|5||559eba0bca4787b9985d1e0a, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.077-0400 m30999| 2015-07-09T14:14:37.076-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:37.076-0400-559eba0dca4787b9985d1e0e", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465677076), what: "dropCollection", ns: "db50.coll50", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.131-0400 m30999| 2015-07-09T14:14:37.130-0400 I SHARDING [conn1] distributed lock 'db50.coll50/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.187-0400 m30999| 2015-07-09T14:14:37.187-0400 I COMMAND [conn1] DROP DATABASE: db50 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.188-0400 m30999| 2015-07-09T14:14:37.187-0400 I SHARDING [conn1] DBConfig::dropDatabase: db50 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.188-0400 m30999| 2015-07-09T14:14:37.187-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:37.187-0400-559eba0dca4787b9985d1e0f", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465677187), what: "dropDatabase.start", ns: "db50", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.295-0400 m30999| 2015-07-09T14:14:37.294-0400 I SHARDING [conn1] DBConfig::dropDatabase: db50 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.295-0400 m31100| 2015-07-09T14:14:37.295-0400 I COMMAND [conn160] dropDatabase db50 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.296-0400 m31100| 2015-07-09T14:14:37.295-0400 I COMMAND [conn160] dropDatabase db50 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.296-0400 m30999| 2015-07-09T14:14:37.296-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:37.296-0400-559eba0dca4787b9985d1e10", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465677296), what: "dropDatabase", ns: "db50", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.297-0400 m31102| 2015-07-09T14:14:37.297-0400 I COMMAND [repl writer worker 11] dropDatabase db50 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.297-0400 m31102| 2015-07-09T14:14:37.297-0400 I COMMAND [repl writer worker 11] dropDatabase db50 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.297-0400 m31101| 2015-07-09T14:14:37.297-0400 I COMMAND [repl writer worker 11] dropDatabase db50 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.298-0400 m31101| 2015-07-09T14:14:37.297-0400 I COMMAND [repl writer worker 11] dropDatabase db50 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.308-0400 m31200| 2015-07-09T14:14:37.308-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:14:37.300-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31200:1436464537:809424560', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.341-0400 m31100| 2015-07-09T14:14:37.341-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.345-0400 m31101| 2015-07-09T14:14:37.345-0400 I COMMAND [repl writer worker 6] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.346-0400 m31102| 2015-07-09T14:14:37.345-0400 I COMMAND [repl writer worker 2] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.379-0400 m31200| 2015-07-09T14:14:37.379-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.383-0400 m31202| 2015-07-09T14:14:37.382-0400 I COMMAND [repl writer worker 8] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.383-0400 m31201| 2015-07-09T14:14:37.383-0400 I COMMAND [repl writer worker 15] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.383-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.383-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.383-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.383-0400 jstests/concurrency/fsm_workloads/indexed_insert_2dsphere.js [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.384-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.384-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.384-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.393-0400 m30999| 2015-07-09T14:14:37.392-0400 I SHARDING [conn1] distributed lock 'db51/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba0dca4787b9985d1e11 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.396-0400 m30999| 2015-07-09T14:14:37.396-0400 I SHARDING [conn1] Placing [db51] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.397-0400 m30999| 2015-07-09T14:14:37.396-0400 I SHARDING [conn1] Enabling sharding for database [db51] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.454-0400 m30999| 2015-07-09T14:14:37.453-0400 I SHARDING [conn1] distributed lock 'db51/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.476-0400 m31100| 2015-07-09T14:14:37.476-0400 I INDEX [conn23] build index on: db51.coll51 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db51.coll51" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.477-0400 m31100| 2015-07-09T14:14:37.476-0400 I INDEX [conn23] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.484-0400 m31100| 2015-07-09T14:14:37.484-0400 I INDEX [conn23] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.486-0400 m30999| 2015-07-09T14:14:37.485-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db51.coll51", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.501-0400 m30999| 2015-07-09T14:14:37.501-0400 I SHARDING [conn1] distributed lock 'db51.coll51/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba0dca4787b9985d1e12 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.502-0400 m30999| 2015-07-09T14:14:37.502-0400 I SHARDING [conn1] enable sharding on: db51.coll51 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.503-0400 m30999| 2015-07-09T14:14:37.502-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:37.502-0400-559eba0dca4787b9985d1e13", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465677502), what: "shardCollection.start", ns: "db51.coll51", details: { shardKey: { _id: "hashed" }, collection: "db51.coll51", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.504-0400 m31102| 2015-07-09T14:14:37.503-0400 I INDEX [repl writer worker 5] build index on: db51.coll51 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db51.coll51" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.504-0400 m31102| 2015-07-09T14:14:37.503-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.508-0400 m31101| 2015-07-09T14:14:37.507-0400 I INDEX [repl writer worker 12] build index on: db51.coll51 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db51.coll51" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.508-0400 m31101| 2015-07-09T14:14:37.508-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.513-0400 m31102| 2015-07-09T14:14:37.513-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.515-0400 m31101| 2015-07-09T14:14:37.515-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.555-0400 m30999| 2015-07-09T14:14:37.555-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db51.coll51 using new epoch 559eba0dca4787b9985d1e14 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.663-0400 m30999| 2015-07-09T14:14:37.663-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db51.coll51: 0ms sequenceNumber: 225 version: 1|1||559eba0dca4787b9985d1e14 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.719-0400 m30999| 2015-07-09T14:14:37.719-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db51.coll51: 0ms sequenceNumber: 226 version: 1|1||559eba0dca4787b9985d1e14 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.721-0400 m31100| 2015-07-09T14:14:37.721-0400 I SHARDING [conn175] remotely refreshing metadata for db51.coll51 with requested shard version 1|1||559eba0dca4787b9985d1e14, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.723-0400 m31100| 2015-07-09T14:14:37.722-0400 I SHARDING [conn175] collection db51.coll51 was previously unsharded, new metadata loaded with shard version 1|1||559eba0dca4787b9985d1e14 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.723-0400 m31100| 2015-07-09T14:14:37.723-0400 I SHARDING [conn175] collection version was loaded at version 1|1||559eba0dca4787b9985d1e14, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.723-0400 m30999| 2015-07-09T14:14:37.723-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:37.723-0400-559eba0dca4787b9985d1e15", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465677723), what: "shardCollection", ns: "db51.coll51", details: { version: "1|1||559eba0dca4787b9985d1e14" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.777-0400 m30999| 2015-07-09T14:14:37.776-0400 I SHARDING [conn1] distributed lock 'db51.coll51/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.778-0400 m30999| 2015-07-09T14:14:37.777-0400 I SHARDING [conn1] moving chunk ns: db51.coll51 moving ( ns: db51.coll51, shard: test-rs0, lastmod: 1|1||000000000000000000000000, min: { _id: 0 }, max: { _id: MaxKey }) test-rs0 -> test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.778-0400 m31100| 2015-07-09T14:14:37.778-0400 I SHARDING [conn40] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.779-0400 m31100| 2015-07-09T14:14:37.779-0400 I SHARDING [conn40] received moveChunk request: { moveChunk: "db51.coll51", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eba0dca4787b9985d1e14') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.784-0400 m31100| 2015-07-09T14:14:37.784-0400 I SHARDING [conn40] distributed lock 'db51.coll51/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eba0d792e00bb67274a12 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.785-0400 m31100| 2015-07-09T14:14:37.784-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:37.784-0400-559eba0d792e00bb67274a13", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436465677784), what: "moveChunk.start", ns: "db51.coll51", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.838-0400 m31100| 2015-07-09T14:14:37.837-0400 I SHARDING [conn40] remotely refreshing metadata for db51.coll51 based on current shard version 1|1||559eba0dca4787b9985d1e14, current metadata version is 1|1||559eba0dca4787b9985d1e14 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.839-0400 m31100| 2015-07-09T14:14:37.839-0400 I SHARDING [conn40] metadata of collection db51.coll51 already up to date (shard version : 1|1||559eba0dca4787b9985d1e14, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.840-0400 m31100| 2015-07-09T14:14:37.839-0400 I SHARDING [conn40] moveChunk request accepted at version 1|1||559eba0dca4787b9985d1e14 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.840-0400 m31100| 2015-07-09T14:14:37.840-0400 I SHARDING [conn40] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.841-0400 m31200| 2015-07-09T14:14:37.840-0400 I SHARDING [conn16] remotely refreshing metadata for db51.coll51, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.842-0400 m31200| 2015-07-09T14:14:37.842-0400 I SHARDING [conn16] collection db51.coll51 was previously unsharded, new metadata loaded with shard version 0|0||559eba0dca4787b9985d1e14 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.843-0400 m31200| 2015-07-09T14:14:37.842-0400 I SHARDING [conn16] collection version was loaded at version 1|1||559eba0dca4787b9985d1e14, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.843-0400 m31200| 2015-07-09T14:14:37.842-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: 0 } -> { _id: MaxKey } for collection db51.coll51 from test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 at epoch 559eba0dca4787b9985d1e14 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.845-0400 m31100| 2015-07-09T14:14:37.844-0400 I SHARDING [conn40] moveChunk data transfer progress: { active: true, ns: "db51.coll51", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.849-0400 m31100| 2015-07-09T14:14:37.848-0400 I SHARDING [conn40] moveChunk data transfer progress: { active: true, ns: "db51.coll51", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.853-0400 m31100| 2015-07-09T14:14:37.853-0400 I SHARDING [conn40] moveChunk data transfer progress: { active: true, ns: "db51.coll51", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.860-0400 m31200| 2015-07-09T14:14:37.859-0400 I INDEX [migrateThread] build index on: db51.coll51 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db51.coll51" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.860-0400 m31200| 2015-07-09T14:14:37.859-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.863-0400 m31100| 2015-07-09T14:14:37.862-0400 I SHARDING [conn40] moveChunk data transfer progress: { active: true, ns: "db51.coll51", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.870-0400 m31200| 2015-07-09T14:14:37.869-0400 I INDEX [migrateThread] build index on: db51.coll51 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db51.coll51" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.870-0400 m31200| 2015-07-09T14:14:37.870-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.880-0400 m31200| 2015-07-09T14:14:37.880-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.881-0400 m31200| 2015-07-09T14:14:37.881-0400 I SHARDING [migrateThread] Deleter starting delete for: db51.coll51 from { _id: 0 } -> { _id: MaxKey }, with opId: 86496 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.881-0400 m31100| 2015-07-09T14:14:37.881-0400 I SHARDING [conn40] moveChunk data transfer progress: { active: true, ns: "db51.coll51", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.882-0400 m31200| 2015-07-09T14:14:37.881-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db51.coll51 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.889-0400 m31201| 2015-07-09T14:14:37.888-0400 I INDEX [repl writer worker 7] build index on: db51.coll51 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db51.coll51" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.889-0400 m31201| 2015-07-09T14:14:37.888-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.889-0400 m31202| 2015-07-09T14:14:37.889-0400 I INDEX [repl writer worker 10] build index on: db51.coll51 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db51.coll51" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.889-0400 m31202| 2015-07-09T14:14:37.889-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.893-0400 m31201| 2015-07-09T14:14:37.893-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.896-0400 m31200| 2015-07-09T14:14:37.896-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.896-0400 m31200| 2015-07-09T14:14:37.896-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db51.coll51' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.897-0400 m31202| 2015-07-09T14:14:37.896-0400 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.915-0400 m31100| 2015-07-09T14:14:37.915-0400 I SHARDING [conn40] moveChunk data transfer progress: { active: true, ns: "db51.coll51", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.916-0400 m31100| 2015-07-09T14:14:37.915-0400 I SHARDING [conn40] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.916-0400 m31100| 2015-07-09T14:14:37.916-0400 I SHARDING [conn40] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.916-0400 m31100| 2015-07-09T14:14:37.916-0400 I SHARDING [conn40] moveChunk setting version to: 2|0||559eba0dca4787b9985d1e14 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.919-0400 m31200| 2015-07-09T14:14:37.919-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db51.coll51' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.920-0400 m31200| 2015-07-09T14:14:37.919-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:37.919-0400-559eba0dd5a107a5b9c0db4f", server: "bs-osx108-8", clientAddr: "", time: new Date(1436465677919), what: "moveChunk.to", ns: "db51.coll51", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 5: 37, step 2 of 5: 13, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 23, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.977-0400 m31100| 2015-07-09T14:14:37.976-0400 I SHARDING [conn40] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db51.coll51", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.977-0400 m31100| 2015-07-09T14:14:37.977-0400 I SHARDING [conn40] moveChunk updating self version to: 2|1||559eba0dca4787b9985d1e14 through { _id: MinKey } -> { _id: 0 } for collection 'db51.coll51' [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:37.979-0400 m31100| 2015-07-09T14:14:37.978-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:37.978-0400-559eba0d792e00bb67274a14", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436465677978), what: "moveChunk.commit", ns: "db51.coll51", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.036-0400 m31100| 2015-07-09T14:14:38.036-0400 I SHARDING [conn40] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.037-0400 m31100| 2015-07-09T14:14:38.036-0400 I SHARDING [conn40] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.037-0400 m31100| 2015-07-09T14:14:38.036-0400 I SHARDING [conn40] Deleter starting delete for: db51.coll51 from { _id: 0 } -> { _id: MaxKey }, with opId: 130668 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.037-0400 m31100| 2015-07-09T14:14:38.036-0400 I SHARDING [conn40] rangeDeleter deleted 0 documents for db51.coll51 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.037-0400 m31100| 2015-07-09T14:14:38.036-0400 I SHARDING [conn40] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.039-0400 m31100| 2015-07-09T14:14:38.039-0400 I SHARDING [conn40] distributed lock 'db51.coll51/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.039-0400 m31100| 2015-07-09T14:14:38.039-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:38.039-0400-559eba0e792e00bb67274a15", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436465678039), what: "moveChunk.from", ns: "db51.coll51", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 6: 0, step 2 of 6: 60, step 3 of 6: 3, step 4 of 6: 72, step 5 of 6: 120, step 6 of 6: 0, to: "test-rs1", from: "test-rs0", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.093-0400 m31100| 2015-07-09T14:14:38.092-0400 I COMMAND [conn40] command db51.coll51 command: moveChunk { moveChunk: "db51.coll51", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eba0dca4787b9985d1e14') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 314ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.094-0400 m30999| 2015-07-09T14:14:38.094-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db51.coll51: 0ms sequenceNumber: 227 version: 2|1||559eba0dca4787b9985d1e14 based on: 1|1||559eba0dca4787b9985d1e14 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.096-0400 m31100| 2015-07-09T14:14:38.095-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db51.coll51", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba0dca4787b9985d1e14') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.099-0400 m31100| 2015-07-09T14:14:38.099-0400 I SHARDING [conn40] distributed lock 'db51.coll51/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eba0e792e00bb67274a16 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.099-0400 m31100| 2015-07-09T14:14:38.099-0400 I SHARDING [conn40] remotely refreshing metadata for db51.coll51 based on current shard version 2|0||559eba0dca4787b9985d1e14, current metadata version is 2|0||559eba0dca4787b9985d1e14 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.101-0400 m31100| 2015-07-09T14:14:38.100-0400 I SHARDING [conn40] updating metadata for db51.coll51 from shard version 2|0||559eba0dca4787b9985d1e14 to shard version 2|1||559eba0dca4787b9985d1e14 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.101-0400 m31100| 2015-07-09T14:14:38.100-0400 I SHARDING [conn40] collection version was loaded at version 2|1||559eba0dca4787b9985d1e14, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.101-0400 m31100| 2015-07-09T14:14:38.100-0400 I SHARDING [conn40] splitChunk accepted at version 2|1||559eba0dca4787b9985d1e14 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.102-0400 m31100| 2015-07-09T14:14:38.102-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:38.102-0400-559eba0e792e00bb67274a17", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436465678102), what: "split", ns: "db51.coll51", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559eba0dca4787b9985d1e14') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559eba0dca4787b9985d1e14') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.157-0400 m31100| 2015-07-09T14:14:38.157-0400 I SHARDING [conn40] distributed lock 'db51.coll51/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.159-0400 m30999| 2015-07-09T14:14:38.159-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db51.coll51: 1ms sequenceNumber: 228 version: 2|3||559eba0dca4787b9985d1e14 based on: 2|1||559eba0dca4787b9985d1e14 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.160-0400 m31200| 2015-07-09T14:14:38.159-0400 I SHARDING [conn18] received splitChunk request: { splitChunk: "db51.coll51", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba0dca4787b9985d1e14') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.165-0400 m31200| 2015-07-09T14:14:38.164-0400 I SHARDING [conn18] distributed lock 'db51.coll51/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eba0ed5a107a5b9c0db50 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.165-0400 m31200| 2015-07-09T14:14:38.164-0400 I SHARDING [conn18] remotely refreshing metadata for db51.coll51 based on current shard version 0|0||559eba0dca4787b9985d1e14, current metadata version is 1|1||559eba0dca4787b9985d1e14 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.166-0400 m31200| 2015-07-09T14:14:38.166-0400 I SHARDING [conn18] updating metadata for db51.coll51 from shard version 0|0||559eba0dca4787b9985d1e14 to shard version 2|0||559eba0dca4787b9985d1e14 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.167-0400 m31200| 2015-07-09T14:14:38.166-0400 I SHARDING [conn18] collection version was loaded at version 2|3||559eba0dca4787b9985d1e14, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.167-0400 m31200| 2015-07-09T14:14:38.166-0400 I SHARDING [conn18] splitChunk accepted at version 2|0||559eba0dca4787b9985d1e14 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.168-0400 m31200| 2015-07-09T14:14:38.167-0400 I SHARDING [conn18] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:38.167-0400-559eba0ed5a107a5b9c0db51", server: "bs-osx108-8", clientAddr: "127.0.0.1:62585", time: new Date(1436465678167), what: "split", ns: "db51.coll51", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559eba0dca4787b9985d1e14') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559eba0dca4787b9985d1e14') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.222-0400 m31200| 2015-07-09T14:14:38.222-0400 I SHARDING [conn18] distributed lock 'db51.coll51/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.224-0400 m30999| 2015-07-09T14:14:38.224-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db51.coll51: 0ms sequenceNumber: 229 version: 2|5||559eba0dca4787b9985d1e14 based on: 2|3||559eba0dca4787b9985d1e14 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.233-0400 m31200| 2015-07-09T14:14:38.232-0400 I INDEX [conn41] build index on: db51.coll51 properties: { v: 1, key: { indexed_insert_2dsphere: "2dsphere" }, name: "indexed_insert_2dsphere_2dsphere", ns: "db51.coll51", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.233-0400 m31200| 2015-07-09T14:14:38.232-0400 I INDEX [conn41] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.234-0400 m31100| 2015-07-09T14:14:38.233-0400 I INDEX [conn175] build index on: db51.coll51 properties: { v: 1, key: { indexed_insert_2dsphere: "2dsphere" }, name: "indexed_insert_2dsphere_2dsphere", ns: "db51.coll51", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.234-0400 m31100| 2015-07-09T14:14:38.233-0400 I INDEX [conn175] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.236-0400 m31200| 2015-07-09T14:14:38.236-0400 I INDEX [conn41] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.244-0400 m31100| 2015-07-09T14:14:38.243-0400 I INDEX [conn175] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.244-0400 Using 20 threads (requested 20) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.262-0400 m31201| 2015-07-09T14:14:38.259-0400 I INDEX [repl writer worker 4] build index on: db51.coll51 properties: { v: 1, key: { indexed_insert_2dsphere: "2dsphere" }, name: "indexed_insert_2dsphere_2dsphere", ns: "db51.coll51", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.262-0400 m31201| 2015-07-09T14:14:38.260-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.263-0400 m31202| 2015-07-09T14:14:38.262-0400 I INDEX [repl writer worker 5] build index on: db51.coll51 properties: { v: 1, key: { indexed_insert_2dsphere: "2dsphere" }, name: "indexed_insert_2dsphere_2dsphere", ns: "db51.coll51", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.275-0400 m31202| 2015-07-09T14:14:38.262-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.340-0400 m31101| 2015-07-09T14:14:38.334-0400 I INDEX [repl writer worker 0] build index on: db51.coll51 properties: { v: 1, key: { indexed_insert_2dsphere: "2dsphere" }, name: "indexed_insert_2dsphere_2dsphere", ns: "db51.coll51", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.341-0400 m31101| 2015-07-09T14:14:38.334-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.407-0400 m31102| 2015-07-09T14:14:38.401-0400 I INDEX [repl writer worker 9] build index on: db51.coll51 properties: { v: 1, key: { indexed_insert_2dsphere: "2dsphere" }, name: "indexed_insert_2dsphere_2dsphere", ns: "db51.coll51", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.407-0400 m31102| 2015-07-09T14:14:38.401-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.408-0400 m31202| 2015-07-09T14:14:38.408-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.420-0400 m31101| 2015-07-09T14:14:38.419-0400 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.420-0400 m31201| 2015-07-09T14:14:38.420-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.452-0400 m31102| 2015-07-09T14:14:38.452-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.505-0400 m30998| 2015-07-09T14:14:38.504-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63725 #317 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.509-0400 m30999| 2015-07-09T14:14:38.505-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63726 #317 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.516-0400 m30999| 2015-07-09T14:14:38.514-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63728 #318 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.516-0400 m30998| 2015-07-09T14:14:38.514-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63727 #318 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.518-0400 m30998| 2015-07-09T14:14:38.518-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63729 #319 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.527-0400 m30998| 2015-07-09T14:14:38.519-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63730 #320 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.527-0400 m30999| 2015-07-09T14:14:38.524-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63731 #319 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.529-0400 m30998| 2015-07-09T14:14:38.529-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63733 #321 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.531-0400 m30999| 2015-07-09T14:14:38.530-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63732 #320 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.531-0400 m30999| 2015-07-09T14:14:38.530-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63734 #321 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.534-0400 m30998| 2015-07-09T14:14:38.534-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63738 #322 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.541-0400 m30999| 2015-07-09T14:14:38.541-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63735 #322 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.542-0400 m30999| 2015-07-09T14:14:38.541-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63736 #323 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.542-0400 m30999| 2015-07-09T14:14:38.541-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63737 #324 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.542-0400 m30999| 2015-07-09T14:14:38.541-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63740 #325 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.545-0400 m30998| 2015-07-09T14:14:38.544-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63739 #323 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.552-0400 m30999| 2015-07-09T14:14:38.546-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63742 #326 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.553-0400 m30998| 2015-07-09T14:14:38.553-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63741 #324 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.557-0400 m30998| 2015-07-09T14:14:38.556-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63743 #325 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.557-0400 m30998| 2015-07-09T14:14:38.556-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63744 #326 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.572-0400 setting random seed: 9259920194745 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.572-0400 setting random seed: 5046327873133 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.572-0400 setting random seed: 6048031169921 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.573-0400 setting random seed: 5534428842365 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.574-0400 setting random seed: 3948769802227 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.574-0400 setting random seed: 3647339115850 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.575-0400 setting random seed: 1478785551153 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.577-0400 setting random seed: 7954707932658 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.580-0400 setting random seed: 3735226672142 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.595-0400 setting random seed: 5126190162263 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.596-0400 setting random seed: 3632603241130 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.596-0400 setting random seed: 2405255320481 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.596-0400 m30998| 2015-07-09T14:14:38.590-0400 I SHARDING [conn318] ChunkManager: time to load chunks for db51.coll51: 0ms sequenceNumber: 62 version: 2|5||559eba0dca4787b9985d1e14 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.596-0400 setting random seed: 5417108745314 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.626-0400 setting random seed: 7145596952177 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.627-0400 setting random seed: 4776286445558 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.627-0400 setting random seed: 6974096940830 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.628-0400 setting random seed: 3162702112458 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.628-0400 setting random seed: 5346724889241 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.628-0400 setting random seed: 8610325171612 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:38.633-0400 setting random seed: 828548646531 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.174-0400 m30999| 2015-07-09T14:14:39.173-0400 I NETWORK [conn320] end connection 127.0.0.1:63732 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.181-0400 m30999| 2015-07-09T14:14:39.180-0400 I NETWORK [conn318] end connection 127.0.0.1:63728 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.192-0400 m30999| 2015-07-09T14:14:39.192-0400 I NETWORK [conn317] end connection 127.0.0.1:63726 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.205-0400 m30999| 2015-07-09T14:14:39.204-0400 I NETWORK [conn324] end connection 127.0.0.1:63737 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.220-0400 m30999| 2015-07-09T14:14:39.219-0400 I NETWORK [conn319] end connection 127.0.0.1:63731 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.233-0400 m30998| 2015-07-09T14:14:39.233-0400 I NETWORK [conn321] end connection 127.0.0.1:63733 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.279-0400 m30998| 2015-07-09T14:14:39.279-0400 I NETWORK [conn325] end connection 127.0.0.1:63743 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.292-0400 m30999| 2015-07-09T14:14:39.292-0400 I NETWORK [conn321] end connection 127.0.0.1:63734 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.310-0400 m30999| 2015-07-09T14:14:39.310-0400 I NETWORK [conn323] end connection 127.0.0.1:63736 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.324-0400 m30998| 2015-07-09T14:14:39.324-0400 I NETWORK [conn322] end connection 127.0.0.1:63738 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.335-0400 m30998| 2015-07-09T14:14:39.335-0400 I NETWORK [conn317] end connection 127.0.0.1:63725 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.342-0400 m30998| 2015-07-09T14:14:39.342-0400 I NETWORK [conn319] end connection 127.0.0.1:63729 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.364-0400 m30998| 2015-07-09T14:14:39.364-0400 I NETWORK [conn318] end connection 127.0.0.1:63727 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.370-0400 m30998| 2015-07-09T14:14:39.370-0400 I NETWORK [conn323] end connection 127.0.0.1:63739 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.377-0400 m30998| 2015-07-09T14:14:39.377-0400 I NETWORK [conn326] end connection 127.0.0.1:63744 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.388-0400 m30999| 2015-07-09T14:14:39.387-0400 I NETWORK [conn325] end connection 127.0.0.1:63740 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.408-0400 m30998| 2015-07-09T14:14:39.408-0400 I NETWORK [conn320] end connection 127.0.0.1:63730 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.423-0400 m30999| 2015-07-09T14:14:39.423-0400 I NETWORK [conn326] end connection 127.0.0.1:63742 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.426-0400 m30998| 2015-07-09T14:14:39.425-0400 I NETWORK [conn324] end connection 127.0.0.1:63741 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.448-0400 m30999| 2015-07-09T14:14:39.448-0400 I NETWORK [conn322] end connection 127.0.0.1:63735 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.468-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.468-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.469-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.469-0400 jstests/concurrency/fsm_workloads/indexed_insert_2dsphere.js: Workload completed in 1224 ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.469-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.469-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.469-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.469-0400 m30999| 2015-07-09T14:14:39.469-0400 I COMMAND [conn1] DROP: db51.coll51 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.469-0400 m30999| 2015-07-09T14:14:39.469-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:39.469-0400-559eba0fca4787b9985d1e16", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465679469), what: "dropCollection.start", ns: "db51.coll51", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.527-0400 m30999| 2015-07-09T14:14:39.526-0400 I SHARDING [conn1] distributed lock 'db51.coll51/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba0fca4787b9985d1e17 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.528-0400 m31100| 2015-07-09T14:14:39.527-0400 I COMMAND [conn40] CMD: drop db51.coll51 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.530-0400 m31200| 2015-07-09T14:14:39.530-0400 I COMMAND [conn18] CMD: drop db51.coll51 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.532-0400 m31101| 2015-07-09T14:14:39.532-0400 I COMMAND [repl writer worker 11] CMD: drop db51.coll51 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.532-0400 m31102| 2015-07-09T14:14:39.532-0400 I COMMAND [repl writer worker 5] CMD: drop db51.coll51 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.534-0400 m31201| 2015-07-09T14:14:39.534-0400 I COMMAND [repl writer worker 5] CMD: drop db51.coll51 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.535-0400 m31202| 2015-07-09T14:14:39.534-0400 I COMMAND [repl writer worker 0] CMD: drop db51.coll51 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.586-0400 m31100| 2015-07-09T14:14:39.586-0400 I SHARDING [conn40] remotely refreshing metadata for db51.coll51 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559eba0dca4787b9985d1e14, current metadata version is 2|3||559eba0dca4787b9985d1e14 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.588-0400 m31100| 2015-07-09T14:14:39.587-0400 W SHARDING [conn40] no chunks found when reloading db51.coll51, previous version was 0|0||559eba0dca4787b9985d1e14, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.588-0400 m31100| 2015-07-09T14:14:39.588-0400 I SHARDING [conn40] dropping metadata for db51.coll51 at shard version 2|3||559eba0dca4787b9985d1e14, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.589-0400 m31200| 2015-07-09T14:14:39.589-0400 I SHARDING [conn18] remotely refreshing metadata for db51.coll51 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559eba0dca4787b9985d1e14, current metadata version is 2|5||559eba0dca4787b9985d1e14 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.591-0400 m31200| 2015-07-09T14:14:39.590-0400 W SHARDING [conn18] no chunks found when reloading db51.coll51, previous version was 0|0||559eba0dca4787b9985d1e14, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.591-0400 m31200| 2015-07-09T14:14:39.591-0400 I SHARDING [conn18] dropping metadata for db51.coll51 at shard version 2|5||559eba0dca4787b9985d1e14, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.592-0400 m30999| 2015-07-09T14:14:39.592-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:39.592-0400-559eba0fca4787b9985d1e18", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465679592), what: "dropCollection", ns: "db51.coll51", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.647-0400 m30999| 2015-07-09T14:14:39.647-0400 I SHARDING [conn1] distributed lock 'db51.coll51/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.704-0400 m30999| 2015-07-09T14:14:39.703-0400 I COMMAND [conn1] DROP DATABASE: db51 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.704-0400 m30999| 2015-07-09T14:14:39.703-0400 I SHARDING [conn1] DBConfig::dropDatabase: db51 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.704-0400 m30999| 2015-07-09T14:14:39.703-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:39.703-0400-559eba0fca4787b9985d1e19", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465679703), what: "dropDatabase.start", ns: "db51", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.811-0400 m30999| 2015-07-09T14:14:39.810-0400 I SHARDING [conn1] DBConfig::dropDatabase: db51 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.811-0400 m31100| 2015-07-09T14:14:39.811-0400 I COMMAND [conn160] dropDatabase db51 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.811-0400 m31100| 2015-07-09T14:14:39.811-0400 I COMMAND [conn160] dropDatabase db51 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.812-0400 m30999| 2015-07-09T14:14:39.812-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:39.812-0400-559eba0fca4787b9985d1e1a", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465679812), what: "dropDatabase", ns: "db51", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.812-0400 m31102| 2015-07-09T14:14:39.812-0400 I COMMAND [repl writer worker 9] dropDatabase db51 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.812-0400 m31101| 2015-07-09T14:14:39.812-0400 I COMMAND [repl writer worker 1] dropDatabase db51 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.813-0400 m31102| 2015-07-09T14:14:39.812-0400 I COMMAND [repl writer worker 9] dropDatabase db51 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.813-0400 m31101| 2015-07-09T14:14:39.812-0400 I COMMAND [repl writer worker 1] dropDatabase db51 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.902-0400 m31100| 2015-07-09T14:14:39.902-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.903-0400 m31101| 2015-07-09T14:14:39.903-0400 I COMMAND [repl writer worker 10] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.906-0400 m31102| 2015-07-09T14:14:39.905-0400 I COMMAND [repl writer worker 10] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.939-0400 m31200| 2015-07-09T14:14:39.939-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.942-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.942-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.942-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.942-0400 jstests/concurrency/fsm_workloads/reindex.js [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.942-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.942-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.943-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.943-0400 m31201| 2015-07-09T14:14:39.942-0400 I COMMAND [repl writer worker 6] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.943-0400 m31202| 2015-07-09T14:14:39.943-0400 I COMMAND [repl writer worker 5] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.950-0400 m30999| 2015-07-09T14:14:39.950-0400 I SHARDING [conn1] distributed lock 'db52/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba0fca4787b9985d1e1b [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.954-0400 m30999| 2015-07-09T14:14:39.954-0400 I SHARDING [conn1] Placing [db52] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:39.954-0400 m30999| 2015-07-09T14:14:39.954-0400 I SHARDING [conn1] Enabling sharding for database [db52] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.008-0400 m30999| 2015-07-09T14:14:40.008-0400 I SHARDING [conn1] distributed lock 'db52/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.029-0400 m31100| 2015-07-09T14:14:40.028-0400 I INDEX [conn69] build index on: db52.coll52 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db52.coll52" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.030-0400 m31100| 2015-07-09T14:14:40.028-0400 I INDEX [conn69] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.044-0400 m31100| 2015-07-09T14:14:40.043-0400 I INDEX [conn69] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.046-0400 m30999| 2015-07-09T14:14:40.045-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db52.coll52", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.049-0400 m30999| 2015-07-09T14:14:40.048-0400 I SHARDING [conn1] distributed lock 'db52.coll52/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba10ca4787b9985d1e1c [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.050-0400 m30999| 2015-07-09T14:14:40.050-0400 I SHARDING [conn1] enable sharding on: db52.coll52 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.051-0400 m30999| 2015-07-09T14:14:40.050-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:40.050-0400-559eba10ca4787b9985d1e1d", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465680050), what: "shardCollection.start", ns: "db52.coll52", details: { shardKey: { _id: "hashed" }, collection: "db52.coll52", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.052-0400 m31101| 2015-07-09T14:14:40.051-0400 I INDEX [repl writer worker 2] build index on: db52.coll52 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db52.coll52" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.052-0400 m31101| 2015-07-09T14:14:40.052-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.057-0400 m31102| 2015-07-09T14:14:40.056-0400 I INDEX [repl writer worker 6] build index on: db52.coll52 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db52.coll52" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.057-0400 m31102| 2015-07-09T14:14:40.057-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.063-0400 m31101| 2015-07-09T14:14:40.062-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.064-0400 m31102| 2015-07-09T14:14:40.063-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.104-0400 m30999| 2015-07-09T14:14:40.103-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db52.coll52 using new epoch 559eba10ca4787b9985d1e1e [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.210-0400 m30999| 2015-07-09T14:14:40.210-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db52.coll52: 0ms sequenceNumber: 230 version: 1|1||559eba10ca4787b9985d1e1e based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.266-0400 m30999| 2015-07-09T14:14:40.266-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db52.coll52: 0ms sequenceNumber: 231 version: 1|1||559eba10ca4787b9985d1e1e based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.268-0400 m31100| 2015-07-09T14:14:40.267-0400 I SHARDING [conn56] remotely refreshing metadata for db52.coll52 with requested shard version 1|1||559eba10ca4787b9985d1e1e, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.269-0400 m31100| 2015-07-09T14:14:40.269-0400 I SHARDING [conn56] collection db52.coll52 was previously unsharded, new metadata loaded with shard version 1|1||559eba10ca4787b9985d1e1e [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.269-0400 m31100| 2015-07-09T14:14:40.269-0400 I SHARDING [conn56] collection version was loaded at version 1|1||559eba10ca4787b9985d1e1e, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.270-0400 m30999| 2015-07-09T14:14:40.269-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:40.269-0400-559eba10ca4787b9985d1e1f", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465680269), what: "shardCollection", ns: "db52.coll52", details: { version: "1|1||559eba10ca4787b9985d1e1e" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.325-0400 m30999| 2015-07-09T14:14:40.325-0400 I SHARDING [conn1] distributed lock 'db52.coll52/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.326-0400 m30999| 2015-07-09T14:14:40.326-0400 I SHARDING [conn1] moving chunk ns: db52.coll52 moving ( ns: db52.coll52, shard: test-rs0, lastmod: 1|1||000000000000000000000000, min: { _id: 0 }, max: { _id: MaxKey }) test-rs0 -> test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.327-0400 m31100| 2015-07-09T14:14:40.326-0400 I SHARDING [conn40] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.328-0400 m31100| 2015-07-09T14:14:40.327-0400 I SHARDING [conn40] received moveChunk request: { moveChunk: "db52.coll52", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eba10ca4787b9985d1e1e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.331-0400 m31100| 2015-07-09T14:14:40.331-0400 I SHARDING [conn40] distributed lock 'db52.coll52/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eba10792e00bb67274a19 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.331-0400 m31100| 2015-07-09T14:14:40.331-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:40.331-0400-559eba10792e00bb67274a1a", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436465680331), what: "moveChunk.start", ns: "db52.coll52", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.385-0400 m31100| 2015-07-09T14:14:40.384-0400 I SHARDING [conn40] remotely refreshing metadata for db52.coll52 based on current shard version 1|1||559eba10ca4787b9985d1e1e, current metadata version is 1|1||559eba10ca4787b9985d1e1e [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.386-0400 m31100| 2015-07-09T14:14:40.386-0400 I SHARDING [conn40] metadata of collection db52.coll52 already up to date (shard version : 1|1||559eba10ca4787b9985d1e1e, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.387-0400 m31100| 2015-07-09T14:14:40.386-0400 I SHARDING [conn40] moveChunk request accepted at version 1|1||559eba10ca4787b9985d1e1e [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.387-0400 m31100| 2015-07-09T14:14:40.386-0400 I SHARDING [conn40] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.387-0400 m31200| 2015-07-09T14:14:40.387-0400 I SHARDING [conn16] remotely refreshing metadata for db52.coll52, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.389-0400 m31200| 2015-07-09T14:14:40.388-0400 I SHARDING [conn16] collection db52.coll52 was previously unsharded, new metadata loaded with shard version 0|0||559eba10ca4787b9985d1e1e [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.389-0400 m31200| 2015-07-09T14:14:40.389-0400 I SHARDING [conn16] collection version was loaded at version 1|1||559eba10ca4787b9985d1e1e, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.390-0400 m31200| 2015-07-09T14:14:40.389-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: 0 } -> { _id: MaxKey } for collection db52.coll52 from test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 at epoch 559eba10ca4787b9985d1e1e [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.392-0400 m31100| 2015-07-09T14:14:40.391-0400 I SHARDING [conn40] moveChunk data transfer progress: { active: true, ns: "db52.coll52", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.395-0400 m31100| 2015-07-09T14:14:40.395-0400 I SHARDING [conn40] moveChunk data transfer progress: { active: true, ns: "db52.coll52", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.400-0400 m31100| 2015-07-09T14:14:40.400-0400 I SHARDING [conn40] moveChunk data transfer progress: { active: true, ns: "db52.coll52", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.402-0400 m31200| 2015-07-09T14:14:40.401-0400 I INDEX [migrateThread] build index on: db52.coll52 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.coll52" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.402-0400 m31200| 2015-07-09T14:14:40.401-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.408-0400 m31200| 2015-07-09T14:14:40.408-0400 I INDEX [migrateThread] build index on: db52.coll52 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db52.coll52" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.409-0400 m31200| 2015-07-09T14:14:40.408-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.410-0400 m31100| 2015-07-09T14:14:40.409-0400 I SHARDING [conn40] moveChunk data transfer progress: { active: true, ns: "db52.coll52", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.422-0400 m31200| 2015-07-09T14:14:40.421-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.422-0400 m31200| 2015-07-09T14:14:40.422-0400 I SHARDING [migrateThread] Deleter starting delete for: db52.coll52 from { _id: 0 } -> { _id: MaxKey }, with opId: 88130 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.423-0400 m31200| 2015-07-09T14:14:40.422-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db52.coll52 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.427-0400 m31100| 2015-07-09T14:14:40.426-0400 I SHARDING [conn40] moveChunk data transfer progress: { active: true, ns: "db52.coll52", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.430-0400 m31202| 2015-07-09T14:14:40.430-0400 I INDEX [repl writer worker 10] build index on: db52.coll52 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db52.coll52" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.430-0400 m31202| 2015-07-09T14:14:40.430-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.435-0400 m31201| 2015-07-09T14:14:40.435-0400 I INDEX [repl writer worker 7] build index on: db52.coll52 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db52.coll52" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.435-0400 m31201| 2015-07-09T14:14:40.435-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.439-0400 m31202| 2015-07-09T14:14:40.439-0400 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.442-0400 m31200| 2015-07-09T14:14:40.441-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.442-0400 m31200| 2015-07-09T14:14:40.441-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db52.coll52' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.443-0400 m31201| 2015-07-09T14:14:40.442-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.461-0400 m31100| 2015-07-09T14:14:40.460-0400 I SHARDING [conn40] moveChunk data transfer progress: { active: true, ns: "db52.coll52", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.461-0400 m31100| 2015-07-09T14:14:40.460-0400 I SHARDING [conn40] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.462-0400 m31100| 2015-07-09T14:14:40.461-0400 I SHARDING [conn40] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.462-0400 m31100| 2015-07-09T14:14:40.461-0400 I SHARDING [conn40] moveChunk setting version to: 2|0||559eba10ca4787b9985d1e1e [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.467-0400 m31200| 2015-07-09T14:14:40.466-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db52.coll52' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.467-0400 m31200| 2015-07-09T14:14:40.466-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:40.466-0400-559eba10d5a107a5b9c0db52", server: "bs-osx108-8", clientAddr: "", time: new Date(1436465680466), what: "moveChunk.to", ns: "db52.coll52", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 5: 32, step 2 of 5: 18, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 24, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.520-0400 m31100| 2015-07-09T14:14:40.520-0400 I SHARDING [conn40] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db52.coll52", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.521-0400 m31100| 2015-07-09T14:14:40.520-0400 I SHARDING [conn40] moveChunk updating self version to: 2|1||559eba10ca4787b9985d1e1e through { _id: MinKey } -> { _id: 0 } for collection 'db52.coll52' [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.522-0400 m31100| 2015-07-09T14:14:40.521-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:40.521-0400-559eba10792e00bb67274a1b", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436465680521), what: "moveChunk.commit", ns: "db52.coll52", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.576-0400 m31100| 2015-07-09T14:14:40.575-0400 I SHARDING [conn40] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.576-0400 m31100| 2015-07-09T14:14:40.575-0400 I SHARDING [conn40] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.576-0400 m31100| 2015-07-09T14:14:40.575-0400 I SHARDING [conn40] Deleter starting delete for: db52.coll52 from { _id: 0 } -> { _id: MaxKey }, with opId: 132309 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.577-0400 m31100| 2015-07-09T14:14:40.576-0400 I SHARDING [conn40] rangeDeleter deleted 0 documents for db52.coll52 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.577-0400 m31100| 2015-07-09T14:14:40.576-0400 I SHARDING [conn40] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.577-0400 m31100| 2015-07-09T14:14:40.577-0400 I SHARDING [conn40] distributed lock 'db52.coll52/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.578-0400 m31100| 2015-07-09T14:14:40.577-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:40.577-0400-559eba10792e00bb67274a1c", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436465680577), what: "moveChunk.from", ns: "db52.coll52", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 6: 0, step 2 of 6: 58, step 3 of 6: 2, step 4 of 6: 71, step 5 of 6: 114, step 6 of 6: 0, to: "test-rs1", from: "test-rs0", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.633-0400 m31100| 2015-07-09T14:14:40.632-0400 I COMMAND [conn40] command db52.coll52 command: moveChunk { moveChunk: "db52.coll52", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eba10ca4787b9985d1e1e') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 305ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.634-0400 m30999| 2015-07-09T14:14:40.634-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db52.coll52: 0ms sequenceNumber: 232 version: 2|1||559eba10ca4787b9985d1e1e based on: 1|1||559eba10ca4787b9985d1e1e [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.636-0400 m31100| 2015-07-09T14:14:40.635-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db52.coll52", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba10ca4787b9985d1e1e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.639-0400 m31100| 2015-07-09T14:14:40.639-0400 I SHARDING [conn40] distributed lock 'db52.coll52/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eba10792e00bb67274a1d [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.639-0400 m31100| 2015-07-09T14:14:40.639-0400 I SHARDING [conn40] remotely refreshing metadata for db52.coll52 based on current shard version 2|0||559eba10ca4787b9985d1e1e, current metadata version is 2|0||559eba10ca4787b9985d1e1e [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.641-0400 m31100| 2015-07-09T14:14:40.640-0400 I SHARDING [conn40] updating metadata for db52.coll52 from shard version 2|0||559eba10ca4787b9985d1e1e to shard version 2|1||559eba10ca4787b9985d1e1e [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.641-0400 m31100| 2015-07-09T14:14:40.640-0400 I SHARDING [conn40] collection version was loaded at version 2|1||559eba10ca4787b9985d1e1e, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.641-0400 m31100| 2015-07-09T14:14:40.640-0400 I SHARDING [conn40] splitChunk accepted at version 2|1||559eba10ca4787b9985d1e1e [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.643-0400 m31100| 2015-07-09T14:14:40.642-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:40.642-0400-559eba10792e00bb67274a1e", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436465680642), what: "split", ns: "db52.coll52", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559eba10ca4787b9985d1e1e') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559eba10ca4787b9985d1e1e') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.696-0400 m31100| 2015-07-09T14:14:40.696-0400 I SHARDING [conn40] distributed lock 'db52.coll52/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.699-0400 m30999| 2015-07-09T14:14:40.698-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db52.coll52: 0ms sequenceNumber: 233 version: 2|3||559eba10ca4787b9985d1e1e based on: 2|1||559eba10ca4787b9985d1e1e [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.699-0400 m31200| 2015-07-09T14:14:40.699-0400 I SHARDING [conn18] received splitChunk request: { splitChunk: "db52.coll52", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba10ca4787b9985d1e1e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.703-0400 m31200| 2015-07-09T14:14:40.703-0400 I SHARDING [conn18] distributed lock 'db52.coll52/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eba10d5a107a5b9c0db53 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.704-0400 m31200| 2015-07-09T14:14:40.703-0400 I SHARDING [conn18] remotely refreshing metadata for db52.coll52 based on current shard version 0|0||559eba10ca4787b9985d1e1e, current metadata version is 1|1||559eba10ca4787b9985d1e1e [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.705-0400 m31200| 2015-07-09T14:14:40.704-0400 I SHARDING [conn18] updating metadata for db52.coll52 from shard version 0|0||559eba10ca4787b9985d1e1e to shard version 2|0||559eba10ca4787b9985d1e1e [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.705-0400 m31200| 2015-07-09T14:14:40.705-0400 I SHARDING [conn18] collection version was loaded at version 2|3||559eba10ca4787b9985d1e1e, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.705-0400 m31200| 2015-07-09T14:14:40.705-0400 I SHARDING [conn18] splitChunk accepted at version 2|0||559eba10ca4787b9985d1e1e [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.707-0400 m31200| 2015-07-09T14:14:40.706-0400 I SHARDING [conn18] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:40.706-0400-559eba10d5a107a5b9c0db54", server: "bs-osx108-8", clientAddr: "127.0.0.1:62585", time: new Date(1436465680706), what: "split", ns: "db52.coll52", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559eba10ca4787b9985d1e1e') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559eba10ca4787b9985d1e1e') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.761-0400 m31200| 2015-07-09T14:14:40.761-0400 I SHARDING [conn18] distributed lock 'db52.coll52/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.763-0400 m30999| 2015-07-09T14:14:40.763-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db52.coll52: 0ms sequenceNumber: 234 version: 2|5||559eba10ca4787b9985d1e1e based on: 2|3||559eba10ca4787b9985d1e1e [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.764-0400 Using 15 threads (requested 15) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.866-0400 m30999| 2015-07-09T14:14:40.865-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63745 #327 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.908-0400 m30999| 2015-07-09T14:14:40.902-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63746 #328 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.918-0400 m30998| 2015-07-09T14:14:40.918-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63747 #327 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.930-0400 m30999| 2015-07-09T14:14:40.930-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63748 #329 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.938-0400 m30998| 2015-07-09T14:14:40.938-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63750 #328 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.942-0400 m30999| 2015-07-09T14:14:40.940-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63749 #330 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.948-0400 m30998| 2015-07-09T14:14:40.948-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63752 #329 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.948-0400 m30999| 2015-07-09T14:14:40.948-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63751 #331 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.953-0400 m30999| 2015-07-09T14:14:40.952-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63754 #332 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.955-0400 m30998| 2015-07-09T14:14:40.955-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63753 #330 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.962-0400 m30999| 2015-07-09T14:14:40.962-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63755 #333 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.963-0400 m30998| 2015-07-09T14:14:40.962-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63757 #331 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.963-0400 m30998| 2015-07-09T14:14:40.962-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63758 #332 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.963-0400 m30999| 2015-07-09T14:14:40.963-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63756 #334 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.968-0400 m30998| 2015-07-09T14:14:40.968-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63759 #333 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.975-0400 setting random seed: 4882630738429 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.975-0400 setting random seed: 3181108026765 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.975-0400 setting random seed: 4255240932106 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.975-0400 setting random seed: 2411633916199 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.975-0400 setting random seed: 1694162120111 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.993-0400 setting random seed: 2374859000556 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.993-0400 setting random seed: 1862420071847 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.996-0400 setting random seed: 5878097284585 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.998-0400 setting random seed: 7834379221312 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:40.998-0400 setting random seed: 3782765967771 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:41.007-0400 setting random seed: 1992163038812 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:41.007-0400 setting random seed: 1292437207885 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:41.007-0400 setting random seed: 2284208913333 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:41.014-0400 setting random seed: 2331248433329 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:41.029-0400 setting random seed: 699224276468 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:41.107-0400 m30998| 2015-07-09T14:14:41.105-0400 I SHARDING [conn328] ChunkManager: time to load chunks for db52.coll52: 0ms sequenceNumber: 63 version: 2|5||559eba10ca4787b9985d1e1e based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:41.242-0400 m31100| 2015-07-09T14:14:41.241-0400 I WRITE [conn25] insert db52.reindex_3 query: { _id: ObjectId('559eba11eac5440bf8d34f41'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -25.0, -25.0 ] }, integer: 0.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 3, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 25552, W: 68071 } }, Collection: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 2 } }, oplog: { acquireCount: { w: 2 } } } 111ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:41.259-0400 m31100| 2015-07-09T14:14:41.258-0400 I WRITE [conn30] insert db52.reindex_0 query: { _id: ObjectId('559eba11eac5440bf8d356d0'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -25.0, -25.0 ] }, integer: 0.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 3, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 16560, W: 86581 } }, Collection: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 2 } }, oplog: { acquireCount: { w: 2 } } } 119ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:41.276-0400 m31100| 2015-07-09T14:14:41.274-0400 I WRITE [conn23] insert db52.reindex_8 query: { _id: ObjectId('559eba11eac5440bf8d34b01'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -25.0, -25.0 ] }, integer: 0.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 3, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 15563, W: 103732 } }, Collection: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 2 } }, oplog: { acquireCount: { w: 2 } } } 135ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:41.306-0400 m31100| 2015-07-09T14:14:41.304-0400 I WRITE [conn22] insert db52.reindex_12 query: { _id: ObjectId('559eba11eac5440bf8d35c92'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -25.0, -25.0 ] }, integer: 0.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 3, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 11716, W: 119091 } }, Collection: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 2 } }, oplog: { acquireCount: { w: 2 } } } 159ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:41.306-0400 m31100| 2015-07-09T14:14:41.305-0400 I WRITE [conn146] insert db52.reindex_13 query: { _id: ObjectId('559eba11eac5440bf8d34366'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ 0.0, 0.0 ] }, integer: 25.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 31, w: 31 } }, Database: { acquireCount: { w: 30, W: 1 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 136290, W: 21248 } }, Collection: { acquireCount: { w: 3, W: 1 } }, Metadata: { acquireCount: { w: 27 } }, oplog: { acquireCount: { w: 27 } } } 135ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:41.307-0400 m31100| 2015-07-09T14:14:41.305-0400 I WRITE [conn69] insert db52.reindex_6 query: { _id: ObjectId('559eba11eac5440bf8d33b81'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -21.0, -21.0 ] }, integer: 4.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 11, w: 11 } }, Database: { acquireCount: { w: 10, W: 1 }, acquireWaitCount: { w: 2 }, timeAcquiringMicros: { w: 167669 } }, Collection: { acquireCount: { w: 4, W: 1 } }, Metadata: { acquireCount: { w: 6 } }, oplog: { acquireCount: { w: 6 } } } 144ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:41.307-0400 m31100| 2015-07-09T14:14:41.305-0400 I WRITE [conn144] insert db52.reindex_4 query: { _id: ObjectId('559eba11eac5440bf8d33f66'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -24.0, -24.0 ] }, integer: 1.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 6, w: 6 } }, Database: { acquireCount: { w: 5, W: 1 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 141254, W: 14632 } }, Collection: { acquireCount: { w: 2, W: 1 } }, Metadata: { acquireCount: { w: 3 } }, oplog: { acquireCount: { w: 3 } } } 121ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:41.308-0400 m31100| 2015-07-09T14:14:41.306-0400 I WRITE [conn67] insert db52.reindex_1 query: { _id: ObjectId('559eba11eac5440bf8d34736'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -24.0, -24.0 ] }, integer: 1.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 6, w: 6 } }, Database: { acquireCount: { w: 5, W: 1 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 108354, W: 28248 } }, Collection: { acquireCount: { w: 2, W: 1 } }, Metadata: { acquireCount: { w: 3 } }, oplog: { acquireCount: { w: 3 } } } 102ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:41.347-0400 m31100| 2015-07-09T14:14:41.346-0400 I WRITE [conn68] insert db52.reindex_2 query: { _id: ObjectId('559eba11eac5440bf8d3622f'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -25.0, -25.0 ] }, integer: 0.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 3, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 139464, W: 17536 } }, Collection: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 2 } }, oplog: { acquireCount: { w: 2 } } } 181ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:41.368-0400 m31100| 2015-07-09T14:14:41.366-0400 I WRITE [conn24] insert db52.reindex_7 query: { _id: ObjectId('559eba11eac5440bf8d34f05'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -25.0, -25.0 ] }, integer: 0.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 3, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 145394, W: 41200 } }, Collection: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 2 } }, oplog: { acquireCount: { w: 2 } } } 207ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:41.390-0400 m31100| 2015-07-09T14:14:41.389-0400 I WRITE [conn31] insert db52.reindex_9 query: { _id: ObjectId('559eba11eac5440bf8d35d25'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -25.0, -25.0 ] }, integer: 0.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 3, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 127431, W: 62285 } }, Collection: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 2 } }, oplog: { acquireCount: { w: 2 } } } 212ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:41.414-0400 m31100| 2015-07-09T14:14:41.413-0400 I WRITE [conn29] insert db52.reindex_14 query: { _id: ObjectId('559eba11eac5440bf8d35ff6'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -25.0, -25.0 ] }, integer: 0.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 3, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 144947, W: 85587 } }, Collection: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 2 } }, oplog: { acquireCount: { w: 2 } } } 253ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:41.439-0400 m31100| 2015-07-09T14:14:41.438-0400 I WRITE [conn27] insert db52.reindex_5 query: { _id: ObjectId('559eba11eac5440bf8d34f07'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -25.0, -25.0 ] }, integer: 0.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 3, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 138539, W: 107496 } }, Collection: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 2 } }, oplog: { acquireCount: { w: 2 } } } 271ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:41.460-0400 m31100| 2015-07-09T14:14:41.458-0400 I WRITE [conn147] insert db52.reindex_11 query: { _id: ObjectId('559eba11eac5440bf8d3722d'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -25.0, -25.0 ] }, integer: 0.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 3, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 130099, W: 132238 } }, Collection: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 2 } }, oplog: { acquireCount: { w: 2 } } } 282ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:42.789-0400 m31100| 2015-07-09T14:14:42.788-0400 I COMMAND [conn22] command db52.$cmd command: insert { insert: "reindex_12", documents: 1000, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 1068, w: 1068 } }, Database: { acquireCount: { w: 1067, W: 1 }, acquireWaitCount: { w: 3, W: 1 }, timeAcquiringMicros: { w: 147803, W: 119091 } }, Collection: { acquireCount: { w: 66, W: 1 } }, Metadata: { acquireCount: { w: 1001 } }, oplog: { acquireCount: { w: 1001 } } } protocol:op_command 1644ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:42.816-0400 m31100| 2015-07-09T14:14:42.816-0400 I INDEX [conn56] build index on: db52.reindex_12 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_12", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:42.816-0400 m31100| 2015-07-09T14:14:42.816-0400 I INDEX [conn56] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:42.927-0400 m31100| 2015-07-09T14:14:42.926-0400 I INDEX [conn56] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:42.929-0400 m31100| 2015-07-09T14:14:42.929-0400 I COMMAND [conn56] command db52.$cmd command: createIndexes { createIndexes: "reindex_12", indexes: [ { key: { text: "text" }, name: "text_text" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 15441 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1, W: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 135ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:42.930-0400 m31100| 2015-07-09T14:14:42.929-0400 I WRITE [conn16] insert db52.reindex_10 query: { _id: ObjectId('559eba11eac5440bf8d36453'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -7.0, -7.0 ] }, integer: 68.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 938, w: 938 } }, Database: { acquireCount: { w: 937, W: 1 }, acquireWaitCount: { w: 5, W: 1 }, timeAcquiringMicros: { w: 366606, W: 48174 } }, Collection: { acquireCount: { w: 67, W: 1 } }, Metadata: { acquireCount: { w: 870 } }, oplog: { acquireCount: { w: 870 } } } 132ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:42.930-0400 m31100| 2015-07-09T14:14:42.929-0400 I WRITE [conn30] insert db52.reindex_0 query: { _id: ObjectId('559eba11eac5440bf8d35a6e'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ 1.0, 1.0 ] }, integer: 26.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 996, w: 996 } }, Database: { acquireCount: { w: 995, W: 1 }, acquireWaitCount: { w: 5, W: 1 }, timeAcquiringMicros: { w: 341472, W: 86581 } }, Collection: { acquireCount: { w: 67, W: 1 } }, Metadata: { acquireCount: { w: 928 } }, oplog: { acquireCount: { w: 928 } } } 127ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:42.931-0400 m31100| 2015-07-09T14:14:42.930-0400 I WRITE [conn146] insert db52.reindex_13 query: { _id: ObjectId('559eba11eac5440bf8d346f2'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ 8.0, 8.0 ] }, integer: 33.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 1004, w: 1004 } }, Database: { acquireCount: { w: 1003, W: 1 }, acquireWaitCount: { w: 5, W: 1 }, timeAcquiringMicros: { w: 420653, W: 21248 } }, Collection: { acquireCount: { w: 68, W: 1 } }, Metadata: { acquireCount: { w: 935 } }, oplog: { acquireCount: { w: 935 } } } 133ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:42.932-0400 m31100| 2015-07-09T14:14:42.930-0400 I WRITE [conn25] insert db52.reindex_3 query: { _id: ObjectId('559eba11eac5440bf8d3564b'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -17.0, -17.0 ] }, integer: 8.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 978, w: 978 } }, Database: { acquireCount: { w: 977, W: 1 }, acquireWaitCount: { w: 5, W: 1 }, timeAcquiringMicros: { w: 372158, W: 68071 } }, Collection: { acquireCount: { w: 67, W: 1 } }, Metadata: { acquireCount: { w: 910 } }, oplog: { acquireCount: { w: 910 } } } 133ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:42.933-0400 m31100| 2015-07-09T14:14:42.930-0400 I WRITE [conn24] insert db52.reindex_7 query: { _id: ObjectId('559eba11eac5440bf8d36e78'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -21.0, -21.0 ] }, integer: 4.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 973, w: 973 } }, Database: { acquireCount: { w: 972, W: 1 }, acquireWaitCount: { w: 4, W: 1 }, timeAcquiringMicros: { w: 356909, W: 41200 } }, Collection: { acquireCount: { w: 66, W: 1 } }, Metadata: { acquireCount: { w: 906 } }, oplog: { acquireCount: { w: 906 } } } 121ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:42.933-0400 m31100| 2015-07-09T14:14:42.930-0400 I WRITE [conn27] insert db52.reindex_5 query: { _id: ObjectId('559eba11eac5440bf8d35602'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -15.0, -15.0 ] }, integer: 10.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 977, w: 977 } }, Database: { acquireCount: { w: 976, W: 1 }, acquireWaitCount: { w: 3, W: 1 }, timeAcquiringMicros: { w: 286363, W: 107496 } }, Collection: { acquireCount: { w: 64, W: 1 } }, Metadata: { acquireCount: { w: 912 } }, oplog: { acquireCount: { w: 912 } } } 128ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:42.934-0400 m31100| 2015-07-09T14:14:42.930-0400 I WRITE [conn147] insert db52.reindex_11 query: { _id: ObjectId('559eba11eac5440bf8d375a7'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ 15.0, 15.0 ] }, integer: 90.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 957, w: 957 } }, Database: { acquireCount: { w: 956, W: 1 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 262087, W: 132238 } }, Collection: { acquireCount: { w: 64, W: 1 } }, Metadata: { acquireCount: { w: 892 } }, oplog: { acquireCount: { w: 892 } } } 133ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:42.934-0400 m31100| 2015-07-09T14:14:42.930-0400 I WRITE [conn67] insert db52.reindex_1 query: { _id: ObjectId('559eba11eac5440bf8d34ac2'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -16.0, -16.0 ] }, integer: 9.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 979, w: 979 } }, Database: { acquireCount: { w: 978, W: 1 }, acquireWaitCount: { w: 5, W: 1 }, timeAcquiringMicros: { w: 391547, W: 28248 } }, Collection: { acquireCount: { w: 67, W: 1 } }, Metadata: { acquireCount: { w: 911 } }, oplog: { acquireCount: { w: 911 } } } 133ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:42.934-0400 m31100| 2015-07-09T14:14:42.930-0400 I WRITE [conn68] insert db52.reindex_2 query: { _id: ObjectId('559eba11eac5440bf8d367c4'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -18.0, -18.0 ] }, integer: 7.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 976, w: 976 } }, Database: { acquireCount: { w: 975, W: 1 }, acquireWaitCount: { w: 4, W: 1 }, timeAcquiringMicros: { w: 382645, W: 17536 } }, Collection: { acquireCount: { w: 66, W: 1 } }, Metadata: { acquireCount: { w: 909 } }, oplog: { acquireCount: { w: 909 } } } 133ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:42.935-0400 m31100| 2015-07-09T14:14:42.931-0400 I WRITE [conn69] insert db52.reindex_6 query: { _id: ObjectId('559eba11eac5440bf8d33ef3'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ 11.0, 11.0 ] }, integer: 86.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 958, w: 958 } }, Database: { acquireCount: { w: 957, W: 1 }, acquireWaitCount: { w: 5 }, timeAcquiringMicros: { w: 441171 } }, Collection: { acquireCount: { w: 69, W: 1 } }, Metadata: { acquireCount: { w: 888 } }, oplog: { acquireCount: { w: 888 } } } 123ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:42.935-0400 m31100| 2015-07-09T14:14:42.931-0400 I WRITE [conn29] insert db52.reindex_14 query: { _id: ObjectId('559eba11eac5440bf8d36aa2'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ 13.0, 13.0 ] }, integer: 88.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 956, w: 956 } }, Database: { acquireCount: { w: 955, W: 1 }, acquireWaitCount: { w: 3, W: 1 }, timeAcquiringMicros: { w: 313393, W: 85587 } }, Collection: { acquireCount: { w: 65, W: 1 } }, Metadata: { acquireCount: { w: 890 } }, oplog: { acquireCount: { w: 890 } } } 123ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:42.936-0400 m31100| 2015-07-09T14:14:42.931-0400 I WRITE [conn23] insert db52.reindex_8 query: { _id: ObjectId('559eba11eac5440bf8d34e8f'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ 7.0, 7.0 ] }, integer: 82.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 952, w: 952 } }, Database: { acquireCount: { w: 951, W: 1 }, acquireWaitCount: { w: 5, W: 1 }, timeAcquiringMicros: { w: 327912, W: 103732 } }, Collection: { acquireCount: { w: 67, W: 1 } }, Metadata: { acquireCount: { w: 884 } }, oplog: { acquireCount: { w: 884 } } } 133ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:42.937-0400 m31100| 2015-07-09T14:14:42.931-0400 I WRITE [conn144] insert db52.reindex_4 query: { _id: ObjectId('559eba11eac5440bf8d34304'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ 2.0, 2.0 ] }, integer: 27.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 997, w: 997 } }, Database: { acquireCount: { w: 996, W: 1 }, acquireWaitCount: { w: 5, W: 1 }, timeAcquiringMicros: { w: 426982, W: 14632 } }, Collection: { acquireCount: { w: 67, W: 1 } }, Metadata: { acquireCount: { w: 929 } }, oplog: { acquireCount: { w: 929 } } } 134ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:42.937-0400 m31100| 2015-07-09T14:14:42.932-0400 I WRITE [conn31] insert db52.reindex_9 query: { _id: ObjectId('559eba11eac5440bf8d371c9'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -25.0, -25.0 ] }, integer: 0.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 968, w: 968 } }, Database: { acquireCount: { w: 967, W: 1 }, acquireWaitCount: { w: 3, W: 1 }, timeAcquiringMicros: { w: 329745, W: 62285 } }, Collection: { acquireCount: { w: 65, W: 1 } }, Metadata: { acquireCount: { w: 902 } }, oplog: { acquireCount: { w: 902 } } } 135ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:42.941-0400 m31101| 2015-07-09T14:14:42.940-0400 I INDEX [repl writer worker 9] build index on: db52.reindex_12 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_12", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:42.941-0400 m31101| 2015-07-09T14:14:42.940-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:42.945-0400 m31102| 2015-07-09T14:14:42.945-0400 I INDEX [repl writer worker 8] build index on: db52.reindex_12 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_12", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:42.946-0400 m31102| 2015-07-09T14:14:42.945-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:42.956-0400 m31100| 2015-07-09T14:14:42.955-0400 I INDEX [conn56] build index on: db52.reindex_12 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_12", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:42.956-0400 m31100| 2015-07-09T14:14:42.955-0400 I INDEX [conn56] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:42.973-0400 m31100| 2015-07-09T14:14:42.973-0400 I INDEX [conn56] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:42.998-0400 m31100| 2015-07-09T14:14:42.997-0400 I INDEX [conn56] build index on: db52.reindex_12 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_12" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:42.998-0400 m31100| 2015-07-09T14:14:42.998-0400 I INDEX [conn56] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.009-0400 m31100| 2015-07-09T14:14:43.007-0400 I INDEX [conn56] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.010-0400 m31100| 2015-07-09T14:14:43.010-0400 I COMMAND [conn40] CMD: reIndex db52.reindex_12 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.019-0400 m31101| 2015-07-09T14:14:43.019-0400 I INDEX [repl writer worker 9] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.031-0400 m31102| 2015-07-09T14:14:43.030-0400 I INDEX [repl writer worker 8] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.035-0400 m31100| 2015-07-09T14:14:43.035-0400 I INDEX [conn40] build index on: db52.reindex_12 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_12" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.036-0400 m31100| 2015-07-09T14:14:43.035-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.040-0400 m31100| 2015-07-09T14:14:43.039-0400 I INDEX [conn40] build index on: db52.reindex_12 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_12", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.040-0400 m31100| 2015-07-09T14:14:43.039-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.042-0400 m31101| 2015-07-09T14:14:43.041-0400 I INDEX [repl writer worker 5] build index on: db52.reindex_12 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_12", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.042-0400 m31101| 2015-07-09T14:14:43.041-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.046-0400 m31100| 2015-07-09T14:14:43.045-0400 I INDEX [conn40] build index on: db52.reindex_12 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_12", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.046-0400 m31100| 2015-07-09T14:14:43.045-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.050-0400 m31100| 2015-07-09T14:14:43.049-0400 I INDEX [conn40] build index on: db52.reindex_12 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_12" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.050-0400 m31100| 2015-07-09T14:14:43.049-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.065-0400 m31101| 2015-07-09T14:14:43.065-0400 I INDEX [repl writer worker 5] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.068-0400 m31102| 2015-07-09T14:14:43.067-0400 I INDEX [repl writer worker 14] build index on: db52.reindex_12 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_12", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.068-0400 m31102| 2015-07-09T14:14:43.068-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.085-0400 m31102| 2015-07-09T14:14:43.084-0400 I INDEX [repl writer worker 14] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.088-0400 m31101| 2015-07-09T14:14:43.088-0400 I INDEX [repl writer worker 0] build index on: db52.reindex_12 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_12" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.088-0400 m31101| 2015-07-09T14:14:43.088-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.100-0400 m31101| 2015-07-09T14:14:43.100-0400 I INDEX [repl writer worker 0] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.104-0400 m31102| 2015-07-09T14:14:43.104-0400 I INDEX [repl writer worker 3] build index on: db52.reindex_12 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_12" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.105-0400 m31102| 2015-07-09T14:14:43.104-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.117-0400 m31102| 2015-07-09T14:14:43.117-0400 I INDEX [repl writer worker 3] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.185-0400 m31100| 2015-07-09T14:14:43.184-0400 I INDEX [conn40] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.186-0400 m31100| 2015-07-09T14:14:43.185-0400 I COMMAND [conn40] command db52.reindex_12 command: reIndex { reIndex: "reindex_12" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:592 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 13947 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 175ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.187-0400 m31100| 2015-07-09T14:14:43.185-0400 I WRITE [conn29] insert db52.reindex_14 query: { _id: ObjectId('559eba11eac5440bf8d36aa5'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ 16.0, 16.0 ] }, integer: 91.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 962, w: 962 } }, Database: { acquireCount: { w: 961, W: 1 }, acquireWaitCount: { w: 6, W: 1 }, timeAcquiringMicros: { w: 559202, W: 85587 } }, Collection: { acquireCount: { w: 68, W: 1 } }, Metadata: { acquireCount: { w: 893 } }, oplog: { acquireCount: { w: 893 } } } 174ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.188-0400 m31100| 2015-07-09T14:14:43.186-0400 I WRITE [conn147] insert db52.reindex_11 query: { _id: ObjectId('559eba11eac5440bf8d375aa'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ 18.0, 18.0 ] }, integer: 93.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 963, w: 963 } }, Database: { acquireCount: { w: 962, W: 1 }, acquireWaitCount: { w: 5, W: 1 }, timeAcquiringMicros: { w: 504273, W: 132238 } }, Collection: { acquireCount: { w: 67, W: 1 } }, Metadata: { acquireCount: { w: 895 } }, oplog: { acquireCount: { w: 895 } } } 175ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.188-0400 m31100| 2015-07-09T14:14:43.186-0400 I WRITE [conn144] insert db52.reindex_4 query: { _id: ObjectId('559eba11eac5440bf8d34311'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ 15.0, 15.0 ] }, integer: 40.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 1014, w: 1014 } }, Database: { acquireCount: { w: 1013, W: 1 }, acquireWaitCount: { w: 8, W: 1 }, timeAcquiringMicros: { w: 658064, W: 14632 } }, Collection: { acquireCount: { w: 71, W: 1 } }, Metadata: { acquireCount: { w: 942 } }, oplog: { acquireCount: { w: 942 } } } 171ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.189-0400 m31100| 2015-07-09T14:14:43.186-0400 I WRITE [conn31] insert db52.reindex_9 query: { _id: ObjectId('559eba11eac5440bf8d371cc'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -22.0, -22.0 ] }, integer: 3.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 974, w: 974 } }, Database: { acquireCount: { w: 973, W: 1 }, acquireWaitCount: { w: 6, W: 1 }, timeAcquiringMicros: { w: 575226, W: 62285 } }, Collection: { acquireCount: { w: 68, W: 1 } }, Metadata: { acquireCount: { w: 905 } }, oplog: { acquireCount: { w: 905 } } } 174ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.189-0400 m31100| 2015-07-09T14:14:43.186-0400 I WRITE [conn69] insert db52.reindex_6 query: { _id: ObjectId('559eba11eac5440bf8d33efd'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ 21.0, 21.0 ] }, integer: 96.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 972, w: 972 } }, Database: { acquireCount: { w: 971, W: 1 }, acquireWaitCount: { w: 8 }, timeAcquiringMicros: { w: 674023 } }, Collection: { acquireCount: { w: 73, W: 1 } }, Metadata: { acquireCount: { w: 898 } }, oplog: { acquireCount: { w: 898 } } } 162ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.190-0400 m31100| 2015-07-09T14:14:43.186-0400 I WRITE [conn67] insert db52.reindex_1 query: { _id: ObjectId('559eba11eac5440bf8d34ac5'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -13.0, -13.0 ] }, integer: 12.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 985, w: 985 } }, Database: { acquireCount: { w: 984, W: 1 }, acquireWaitCount: { w: 8, W: 1 }, timeAcquiringMicros: { w: 635911, W: 28248 } }, Collection: { acquireCount: { w: 70, W: 1 } }, Metadata: { acquireCount: { w: 914 } }, oplog: { acquireCount: { w: 914 } } } 175ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.191-0400 m31100| 2015-07-09T14:14:43.186-0400 I WRITE [conn68] insert db52.reindex_2 query: { _id: ObjectId('559eba11eac5440bf8d367d3'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -3.0, -3.0 ] }, integer: 22.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 995, w: 995 } }, Database: { acquireCount: { w: 994, W: 1 }, acquireWaitCount: { w: 7, W: 1 }, timeAcquiringMicros: { w: 616833, W: 17536 } }, Collection: { acquireCount: { w: 70, W: 1 } }, Metadata: { acquireCount: { w: 924 } }, oplog: { acquireCount: { w: 924 } } } 173ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.191-0400 m31100| 2015-07-09T14:14:43.186-0400 I WRITE [conn23] insert db52.reindex_8 query: { _id: ObjectId('559eba11eac5440bf8d34ea7'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -19.0, -19.0 ] }, integer: 6.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 980, w: 980 } }, Database: { acquireCount: { w: 979, W: 1 }, acquireWaitCount: { w: 8, W: 1 }, timeAcquiringMicros: { w: 557270, W: 103732 } }, Collection: { acquireCount: { w: 71, W: 1 } }, Metadata: { acquireCount: { w: 908 } }, oplog: { acquireCount: { w: 908 } } } 162ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.192-0400 m31100| 2015-07-09T14:14:43.186-0400 I COMMAND [conn40] CMD: reIndex db52.reindex_12 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.192-0400 m31100| 2015-07-09T14:14:43.186-0400 I WRITE [conn24] insert db52.reindex_7 query: { _id: ObjectId('559eba11eac5440bf8d36e7b'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -18.0, -18.0 ] }, integer: 7.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 979, w: 979 } }, Database: { acquireCount: { w: 978, W: 1 }, acquireWaitCount: { w: 7, W: 1 }, timeAcquiringMicros: { w: 604781, W: 41200 } }, Collection: { acquireCount: { w: 69, W: 1 } }, Metadata: { acquireCount: { w: 909 } }, oplog: { acquireCount: { w: 909 } } } 174ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.193-0400 m31100| 2015-07-09T14:14:43.186-0400 I WRITE [conn30] insert db52.reindex_0 query: { _id: ObjectId('559eba11eac5440bf8d35a89'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -22.0, -22.0 ] }, integer: 53.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 1027, w: 1027 } }, Database: { acquireCount: { w: 1026, W: 1 }, acquireWaitCount: { w: 8, W: 1 }, timeAcquiringMicros: { w: 569205, W: 86581 } }, Collection: { acquireCount: { w: 71, W: 1 } }, Metadata: { acquireCount: { w: 955 } }, oplog: { acquireCount: { w: 955 } } } 175ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.193-0400 m31100| 2015-07-09T14:14:43.186-0400 I WRITE [conn27] insert db52.reindex_5 query: { _id: ObjectId('559eba11eac5440bf8d35608'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -12.0, -12.0 ] }, integer: 13.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 983, w: 983 } }, Database: { acquireCount: { w: 982, W: 1 }, acquireWaitCount: { w: 6, W: 1 }, timeAcquiringMicros: { w: 529127, W: 107496 } }, Collection: { acquireCount: { w: 67, W: 1 } }, Metadata: { acquireCount: { w: 915 } }, oplog: { acquireCount: { w: 915 } } } 172ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.194-0400 m31100| 2015-07-09T14:14:43.186-0400 I WRITE [conn146] insert db52.reindex_13 query: { _id: ObjectId('559eba11eac5440bf8d3472b'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ 15.0, 15.0 ] }, integer: 90.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 1066, w: 1066 } }, Database: { acquireCount: { w: 1065, W: 1 }, acquireWaitCount: { w: 8, W: 1 }, timeAcquiringMicros: { w: 635337, W: 21248 } }, Collection: { acquireCount: { w: 73, W: 1 } }, Metadata: { acquireCount: { w: 992 } }, oplog: { acquireCount: { w: 992 } } } 172ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.195-0400 m31100| 2015-07-09T14:14:43.186-0400 I WRITE [conn16] insert db52.reindex_10 query: { _id: ObjectId('559eba11eac5440bf8d3647c'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -16.0, -16.0 ] }, integer: 9.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 983, w: 983 } }, Database: { acquireCount: { w: 982, W: 1 }, acquireWaitCount: { w: 8, W: 1 }, timeAcquiringMicros: { w: 595422, W: 48174 } }, Collection: { acquireCount: { w: 71, W: 1 } }, Metadata: { acquireCount: { w: 911 } }, oplog: { acquireCount: { w: 911 } } } 175ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.196-0400 m31100| 2015-07-09T14:14:43.187-0400 I WRITE [conn25] insert db52.reindex_3 query: { _id: ObjectId('559eba11eac5440bf8d35671'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ 2.0, 2.0 ] }, integer: 27.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 1001, w: 1001 } }, Database: { acquireCount: { w: 1000, W: 1 }, acquireWaitCount: { w: 8, W: 1 }, timeAcquiringMicros: { w: 605238, W: 68071 } }, Collection: { acquireCount: { w: 71, W: 1 } }, Metadata: { acquireCount: { w: 929 } }, oplog: { acquireCount: { w: 929 } } } 172ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.207-0400 m31100| 2015-07-09T14:14:43.207-0400 I INDEX [conn40] build index on: db52.reindex_12 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_12" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.208-0400 m31100| 2015-07-09T14:14:43.207-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.213-0400 m31100| 2015-07-09T14:14:43.213-0400 I INDEX [conn40] build index on: db52.reindex_12 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_12", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.213-0400 m31100| 2015-07-09T14:14:43.213-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.218-0400 m31100| 2015-07-09T14:14:43.218-0400 I INDEX [conn40] build index on: db52.reindex_12 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_12", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.218-0400 m31100| 2015-07-09T14:14:43.218-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.222-0400 m31100| 2015-07-09T14:14:43.222-0400 I INDEX [conn40] build index on: db52.reindex_12 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_12" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.222-0400 m31100| 2015-07-09T14:14:43.222-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.329-0400 m31100| 2015-07-09T14:14:43.328-0400 I INDEX [conn40] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.330-0400 m31100| 2015-07-09T14:14:43.329-0400 I WRITE [conn24] insert db52.reindex_7 query: { _id: ObjectId('559eba11eac5440bf8d36e7c'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -17.0, -17.0 ] }, integer: 8.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 981, w: 981 } }, Database: { acquireCount: { w: 980, W: 1 }, acquireWaitCount: { w: 8, W: 1 }, timeAcquiringMicros: { w: 746677, W: 41200 } }, Collection: { acquireCount: { w: 70, W: 1 } }, Metadata: { acquireCount: { w: 910 } }, oplog: { acquireCount: { w: 910 } } } 142ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.331-0400 m31100| 2015-07-09T14:14:43.330-0400 I COMMAND [conn40] command db52.reindex_12 command: reIndex { reIndex: "reindex_12" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:592 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 12048 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 144ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.332-0400 m31100| 2015-07-09T14:14:43.330-0400 I WRITE [conn30] insert db52.reindex_0 query: { _id: ObjectId('559eba11eac5440bf8d35a8a'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -21.0, -21.0 ] }, integer: 54.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 1029, w: 1029 } }, Database: { acquireCount: { w: 1028, W: 1 }, acquireWaitCount: { w: 9, W: 1 }, timeAcquiringMicros: { w: 711104, W: 86581 } }, Collection: { acquireCount: { w: 72, W: 1 } }, Metadata: { acquireCount: { w: 956 } }, oplog: { acquireCount: { w: 956 } } } 142ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.333-0400 m31100| 2015-07-09T14:14:43.330-0400 I WRITE [conn27] insert db52.reindex_5 query: { _id: ObjectId('559eba11eac5440bf8d3560a'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -11.0, -11.0 ] }, integer: 14.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 985, w: 985 } }, Database: { acquireCount: { w: 984, W: 1 }, acquireWaitCount: { w: 7, W: 1 }, timeAcquiringMicros: { w: 670881, W: 107496 } }, Collection: { acquireCount: { w: 68, W: 1 } }, Metadata: { acquireCount: { w: 916 } }, oplog: { acquireCount: { w: 916 } } } 142ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.334-0400 m31100| 2015-07-09T14:14:43.331-0400 I WRITE [conn146] insert db52.reindex_13 query: { _id: ObjectId('559eba11eac5440bf8d3472c'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ 16.0, 16.0 ] }, integer: 91.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 1068, w: 1068 } }, Database: { acquireCount: { w: 1067, W: 1 }, acquireWaitCount: { w: 9, W: 1 }, timeAcquiringMicros: { w: 776963, W: 21248 } }, Collection: { acquireCount: { w: 74, W: 1 } }, Metadata: { acquireCount: { w: 993 } }, oplog: { acquireCount: { w: 993 } } } 143ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.334-0400 m31100| 2015-07-09T14:14:43.331-0400 I WRITE [conn67] insert db52.reindex_1 query: { _id: ObjectId('559eba11eac5440bf8d34ad3'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ 1.0, 1.0 ] }, integer: 26.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 1001, w: 1001 } }, Database: { acquireCount: { w: 1000, W: 1 }, acquireWaitCount: { w: 9, W: 1 }, timeAcquiringMicros: { w: 767356, W: 28248 } }, Collection: { acquireCount: { w: 72, W: 1 } }, Metadata: { acquireCount: { w: 928 } }, oplog: { acquireCount: { w: 928 } } } 132ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.335-0400 m31100| 2015-07-09T14:14:43.331-0400 I WRITE [conn68] insert db52.reindex_2 query: { _id: ObjectId('559eba11eac5440bf8d367e5'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ 15.0, 15.0 ] }, integer: 40.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 1015, w: 1015 } }, Database: { acquireCount: { w: 1014, W: 1 }, acquireWaitCount: { w: 8, W: 1 }, timeAcquiringMicros: { w: 748249, W: 17536 } }, Collection: { acquireCount: { w: 72, W: 1 } }, Metadata: { acquireCount: { w: 942 } }, oplog: { acquireCount: { w: 942 } } } 132ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.336-0400 m31100| 2015-07-09T14:14:43.331-0400 I WRITE [conn29] insert db52.reindex_14 query: { _id: ObjectId('559eba11eac5440bf8d36ab0'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -23.0, -23.0 ] }, integer: 2.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 975, w: 975 } }, Database: { acquireCount: { w: 974, W: 1 }, acquireWaitCount: { w: 7, W: 1 }, timeAcquiringMicros: { w: 690465, W: 85587 } }, Collection: { acquireCount: { w: 70, W: 1 } }, Metadata: { acquireCount: { w: 904 } }, oplog: { acquireCount: { w: 904 } } } 132ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.336-0400 m31100| 2015-07-09T14:14:43.331-0400 I WRITE [conn25] insert db52.reindex_3 query: { _id: ObjectId('559eba11eac5440bf8d35673'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ 3.0, 3.0 ] }, integer: 28.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 1003, w: 1003 } }, Database: { acquireCount: { w: 1002, W: 1 }, acquireWaitCount: { w: 9, W: 1 }, timeAcquiringMicros: { w: 747284, W: 68071 } }, Collection: { acquireCount: { w: 72, W: 1 } }, Metadata: { acquireCount: { w: 930 } }, oplog: { acquireCount: { w: 930 } } } 143ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.337-0400 m31100| 2015-07-09T14:14:43.331-0400 I WRITE [conn31] insert db52.reindex_9 query: { _id: ObjectId('559eba11eac5440bf8d371db'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -7.0, -7.0 ] }, integer: 18.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 991, w: 991 } }, Database: { acquireCount: { w: 990, W: 1 }, acquireWaitCount: { w: 7, W: 1 }, timeAcquiringMicros: { w: 706707, W: 62285 } }, Collection: { acquireCount: { w: 70, W: 1 } }, Metadata: { acquireCount: { w: 920 } }, oplog: { acquireCount: { w: 920 } } } 132ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.338-0400 m31100| 2015-07-09T14:14:43.331-0400 I WRITE [conn23] insert db52.reindex_8 query: { _id: ObjectId('559eba11eac5440bf8d34eb2'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -8.0, -8.0 ] }, integer: 17.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 993, w: 993 } }, Database: { acquireCount: { w: 992, W: 1 }, acquireWaitCount: { w: 9, W: 1 }, timeAcquiringMicros: { w: 688516, W: 103732 } }, Collection: { acquireCount: { w: 73, W: 1 } }, Metadata: { acquireCount: { w: 919 } }, oplog: { acquireCount: { w: 919 } } } 132ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.339-0400 m31100| 2015-07-09T14:14:43.331-0400 I WRITE [conn144] insert db52.reindex_4 query: { _id: ObjectId('559eba11eac5440bf8d34323'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -17.0, -17.0 ] }, integer: 58.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 1034, w: 1034 } }, Database: { acquireCount: { w: 1033, W: 1 }, acquireWaitCount: { w: 9, W: 1 }, timeAcquiringMicros: { w: 789335, W: 14632 } }, Collection: { acquireCount: { w: 73, W: 1 } }, Metadata: { acquireCount: { w: 960 } }, oplog: { acquireCount: { w: 960 } } } 132ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.339-0400 m31100| 2015-07-09T14:14:43.331-0400 I COMMAND [conn40] CMD: reIndex db52.reindex_12 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.340-0400 m31100| 2015-07-09T14:14:43.332-0400 I WRITE [conn69] insert db52.reindex_6 query: { _id: ObjectId('559eba11eac5440bf8d33f12'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -8.0, -8.0 ] }, integer: 17.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 995, w: 995 } }, Database: { acquireCount: { w: 994, W: 1 }, acquireWaitCount: { w: 9 }, timeAcquiringMicros: { w: 805201 } }, Collection: { acquireCount: { w: 75, W: 1 } }, Metadata: { acquireCount: { w: 919 } }, oplog: { acquireCount: { w: 919 } } } 132ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.340-0400 m31100| 2015-07-09T14:14:43.332-0400 I WRITE [conn147] insert db52.reindex_11 query: { _id: ObjectId('559eba11eac5440bf8d375ba'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -16.0, -16.0 ] }, integer: 9.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 981, w: 981 } }, Database: { acquireCount: { w: 980, W: 1 }, acquireWaitCount: { w: 6, W: 1 }, timeAcquiringMicros: { w: 635841, W: 132238 } }, Collection: { acquireCount: { w: 69, W: 1 } }, Metadata: { acquireCount: { w: 911 } }, oplog: { acquireCount: { w: 911 } } } 133ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.341-0400 m31100| 2015-07-09T14:14:43.334-0400 I WRITE [conn16] insert db52.reindex_10 query: { _id: ObjectId('559eba11eac5440bf8d3647d'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -15.0, -15.0 ] }, integer: 10.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 985, w: 985 } }, Database: { acquireCount: { w: 984, W: 1 }, acquireWaitCount: { w: 9, W: 1 }, timeAcquiringMicros: { w: 737173, W: 48174 } }, Collection: { acquireCount: { w: 72, W: 1 } }, Metadata: { acquireCount: { w: 912 } }, oplog: { acquireCount: { w: 912 } } } 146ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.345-0400 m31100| 2015-07-09T14:14:43.344-0400 I COMMAND [conn146] command db52.$cmd command: insert { insert: "reindex_13", documents: 1000, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 1077, w: 1077 } }, Database: { acquireCount: { w: 1076, W: 1 }, acquireWaitCount: { w: 9, W: 1 }, timeAcquiringMicros: { w: 776963, W: 21248 } }, Collection: { acquireCount: { w: 75, W: 1 } }, Metadata: { acquireCount: { w: 1001 } }, oplog: { acquireCount: { w: 1001 } } } protocol:op_command 2236ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.360-0400 m31100| 2015-07-09T14:14:43.359-0400 I INDEX [conn40] build index on: db52.reindex_12 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_12" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.360-0400 m31100| 2015-07-09T14:14:43.359-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.363-0400 m31100| 2015-07-09T14:14:43.362-0400 I INDEX [conn40] build index on: db52.reindex_12 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_12", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.363-0400 m31100| 2015-07-09T14:14:43.362-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.370-0400 m31100| 2015-07-09T14:14:43.368-0400 I INDEX [conn40] build index on: db52.reindex_12 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_12", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.370-0400 m31100| 2015-07-09T14:14:43.368-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.375-0400 m31100| 2015-07-09T14:14:43.374-0400 I INDEX [conn40] build index on: db52.reindex_12 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_12" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.375-0400 m31100| 2015-07-09T14:14:43.374-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.486-0400 m31100| 2015-07-09T14:14:43.485-0400 I INDEX [conn40] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.487-0400 m31100| 2015-07-09T14:14:43.486-0400 I WRITE [conn31] insert db52.reindex_9 query: { _id: ObjectId('559eba11eac5440bf8d371e6'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ 4.0, 4.0 ] }, integer: 29.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 1004, w: 1004 } }, Database: { acquireCount: { w: 1003, W: 1 }, acquireWaitCount: { w: 8, W: 1 }, timeAcquiringMicros: { w: 843792, W: 62285 } }, Collection: { acquireCount: { w: 72, W: 1 } }, Metadata: { acquireCount: { w: 931 } }, oplog: { acquireCount: { w: 931 } } } 137ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.488-0400 m31100| 2015-07-09T14:14:43.486-0400 I WRITE [conn69] insert db52.reindex_6 query: { _id: ObjectId('559eba11eac5440bf8d33f13'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -7.0, -7.0 ] }, integer: 18.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 997, w: 997 } }, Database: { acquireCount: { w: 996, W: 1 }, acquireWaitCount: { w: 10 }, timeAcquiringMicros: { w: 958862 } }, Collection: { acquireCount: { w: 76, W: 1 } }, Metadata: { acquireCount: { w: 920 } }, oplog: { acquireCount: { w: 920 } } } 153ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.489-0400 m31100| 2015-07-09T14:14:43.487-0400 I WRITE [conn16] insert db52.reindex_10 query: { _id: ObjectId('559eba11eac5440bf8d3647e'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -14.0, -14.0 ] }, integer: 11.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 987, w: 987 } }, Database: { acquireCount: { w: 986, W: 1 }, acquireWaitCount: { w: 10, W: 1 }, timeAcquiringMicros: { w: 888877, W: 48174 } }, Collection: { acquireCount: { w: 73, W: 1 } }, Metadata: { acquireCount: { w: 913 } }, oplog: { acquireCount: { w: 913 } } } 152ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.489-0400 m31100| 2015-07-09T14:14:43.487-0400 I WRITE [conn24] insert db52.reindex_7 query: { _id: ObjectId('559eba11eac5440bf8d36e8a'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -3.0, -3.0 ] }, integer: 22.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 997, w: 997 } }, Database: { acquireCount: { w: 996, W: 1 }, acquireWaitCount: { w: 9, W: 1 }, timeAcquiringMicros: { w: 883866, W: 41200 } }, Collection: { acquireCount: { w: 72, W: 1 } }, Metadata: { acquireCount: { w: 924 } }, oplog: { acquireCount: { w: 924 } } } 137ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.490-0400 m31100| 2015-07-09T14:14:43.487-0400 I WRITE [conn147] insert db52.reindex_11 query: { _id: ObjectId('559eba11eac5440bf8d375bb'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -15.0, -15.0 ] }, integer: 10.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 983, w: 983 } }, Database: { acquireCount: { w: 982, W: 1 }, acquireWaitCount: { w: 7, W: 1 }, timeAcquiringMicros: { w: 788055, W: 132238 } }, Collection: { acquireCount: { w: 70, W: 1 } }, Metadata: { acquireCount: { w: 912 } }, oplog: { acquireCount: { w: 912 } } } 153ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.491-0400 m31100| 2015-07-09T14:14:43.487-0400 I WRITE [conn23] insert db52.reindex_8 query: { _id: ObjectId('559eba11eac5440bf8d34ec3'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ 9.0, 9.0 ] }, integer: 34.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 1012, w: 1012 } }, Database: { acquireCount: { w: 1011, W: 1 }, acquireWaitCount: { w: 10, W: 1 }, timeAcquiringMicros: { w: 825466, W: 103732 } }, Collection: { acquireCount: { w: 75, W: 1 } }, Metadata: { acquireCount: { w: 936 } }, oplog: { acquireCount: { w: 936 } } } 137ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.492-0400 m31100| 2015-07-09T14:14:43.487-0400 I WRITE [conn67] insert db52.reindex_1 query: { _id: ObjectId('559eba11eac5440bf8d34ae0'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ 14.0, 14.0 ] }, integer: 39.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 1016, w: 1016 } }, Database: { acquireCount: { w: 1015, W: 1 }, acquireWaitCount: { w: 10, W: 1 }, timeAcquiringMicros: { w: 904425, W: 28248 } }, Collection: { acquireCount: { w: 74, W: 1 } }, Metadata: { acquireCount: { w: 941 } }, oplog: { acquireCount: { w: 941 } } } 138ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.493-0400 m31100| 2015-07-09T14:14:43.488-0400 I COMMAND [conn40] command db52.reindex_12 command: reIndex { reIndex: "reindex_12" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:592 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 17261 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 156ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.493-0400 m31100| 2015-07-09T14:14:43.488-0400 I WRITE [conn68] insert db52.reindex_2 query: { _id: ObjectId('559eba11eac5440bf8d367f3'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -21.0, -21.0 ] }, integer: 54.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 1031, w: 1031 } }, Database: { acquireCount: { w: 1030, W: 1 }, acquireWaitCount: { w: 9, W: 1 }, timeAcquiringMicros: { w: 885334, W: 17536 } }, Collection: { acquireCount: { w: 74, W: 1 } }, Metadata: { acquireCount: { w: 956 } }, oplog: { acquireCount: { w: 956 } } } 138ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.495-0400 m31100| 2015-07-09T14:14:43.488-0400 I WRITE [conn30] insert db52.reindex_0 query: { _id: ObjectId('559eba11eac5440bf8d35a97'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -8.0, -8.0 ] }, integer: 67.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 1044, w: 1044 } }, Database: { acquireCount: { w: 1043, W: 1 }, acquireWaitCount: { w: 10, W: 1 }, timeAcquiringMicros: { w: 848363, W: 86581 } }, Collection: { acquireCount: { w: 74, W: 1 } }, Metadata: { acquireCount: { w: 969 } }, oplog: { acquireCount: { w: 969 } } } 138ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.496-0400 m31100| 2015-07-09T14:14:43.488-0400 I WRITE [conn25] insert db52.reindex_3 query: { _id: ObjectId('559eba11eac5440bf8d35690'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ 17.0, 17.0 ] }, integer: 42.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 1019, w: 1019 } }, Database: { acquireCount: { w: 1018, W: 1 }, acquireWaitCount: { w: 10, W: 1 }, timeAcquiringMicros: { w: 885239, W: 68071 } }, Collection: { acquireCount: { w: 74, W: 1 } }, Metadata: { acquireCount: { w: 944 } }, oplog: { acquireCount: { w: 944 } } } 138ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.497-0400 m31100| 2015-07-09T14:14:43.488-0400 I WRITE [conn27] insert db52.reindex_5 query: { _id: ObjectId('559eba11eac5440bf8d35615'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -1.0, -1.0 ] }, integer: 24.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 997, w: 997 } }, Database: { acquireCount: { w: 996, W: 1 }, acquireWaitCount: { w: 8, W: 1 }, timeAcquiringMicros: { w: 808956, W: 107496 } }, Collection: { acquireCount: { w: 70, W: 1 } }, Metadata: { acquireCount: { w: 926 } }, oplog: { acquireCount: { w: 926 } } } 138ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.498-0400 m31100| 2015-07-09T14:14:43.488-0400 I WRITE [conn144] insert db52.reindex_4 query: { _id: ObjectId('559eba11eac5440bf8d34331'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -3.0, -3.0 ] }, integer: 72.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 1050, w: 1050 } }, Database: { acquireCount: { w: 1049, W: 1 }, acquireWaitCount: { w: 10, W: 1 }, timeAcquiringMicros: { w: 927827, W: 14632 } }, Collection: { acquireCount: { w: 75, W: 1 } }, Metadata: { acquireCount: { w: 974 } }, oplog: { acquireCount: { w: 974 } } } 139ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.499-0400 m31100| 2015-07-09T14:14:43.489-0400 I WRITE [conn29] insert db52.reindex_14 query: { _id: ObjectId('559eba11eac5440bf8d36abc'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -11.0, -11.0 ] }, integer: 14.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 989, w: 989 } }, Database: { acquireCount: { w: 988, W: 1 }, acquireWaitCount: { w: 8, W: 1 }, timeAcquiringMicros: { w: 828572, W: 85587 } }, Collection: { acquireCount: { w: 72, W: 1 } }, Metadata: { acquireCount: { w: 916 } }, oplog: { acquireCount: { w: 916 } } } 140ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.499-0400 m31100| 2015-07-09T14:14:43.495-0400 I INDEX [conn49] build index on: db52.reindex_13 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_13", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.499-0400 m31100| 2015-07-09T14:14:43.495-0400 I INDEX [conn49] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.572-0400 m31100| 2015-07-09T14:14:43.571-0400 I INDEX [conn49] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.573-0400 m31100| 2015-07-09T14:14:43.572-0400 I COMMAND [conn49] command db52.$cmd command: createIndexes { createIndexes: "reindex_13", indexes: [ { key: { text: "text" }, name: "text_text" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 141363 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1, W: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 223ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.579-0400 m31102| 2015-07-09T14:14:43.578-0400 I INDEX [repl writer worker 13] build index on: db52.reindex_13 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_13", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.579-0400 m31102| 2015-07-09T14:14:43.578-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.584-0400 m31101| 2015-07-09T14:14:43.584-0400 I INDEX [repl writer worker 3] build index on: db52.reindex_13 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_13", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.585-0400 m31101| 2015-07-09T14:14:43.584-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.596-0400 m31100| 2015-07-09T14:14:43.596-0400 I INDEX [conn49] build index on: db52.reindex_13 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_13", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.597-0400 m31100| 2015-07-09T14:14:43.596-0400 I INDEX [conn49] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.609-0400 m31100| 2015-07-09T14:14:43.609-0400 I INDEX [conn49] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.639-0400 m31100| 2015-07-09T14:14:43.638-0400 I INDEX [conn49] build index on: db52.reindex_13 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_13" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.639-0400 m31100| 2015-07-09T14:14:43.638-0400 I INDEX [conn49] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.649-0400 m31100| 2015-07-09T14:14:43.649-0400 I INDEX [conn49] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.658-0400 m31100| 2015-07-09T14:14:43.657-0400 I COMMAND [conn144] command db52.$cmd command: insert { insert: "reindex_4", documents: 1000, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 1082, w: 1082 } }, Database: { acquireCount: { w: 1081, W: 1 }, acquireWaitCount: { w: 14, W: 1 }, timeAcquiringMicros: { w: 1047269, W: 14632 } }, Collection: { acquireCount: { w: 80, W: 1 } }, Metadata: { acquireCount: { w: 1001 } }, oplog: { acquireCount: { w: 1001 } } } protocol:op_command 2522ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.674-0400 m31100| 2015-07-09T14:14:43.673-0400 I COMMAND [conn30] command db52.$cmd command: insert { insert: "reindex_0", documents: 1000, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 1082, w: 1082 } }, Database: { acquireCount: { w: 1081, W: 1 }, acquireWaitCount: { w: 15, W: 1 }, timeAcquiringMicros: { w: 972891, W: 86581 } }, Collection: { acquireCount: { w: 80, W: 1 } }, Metadata: { acquireCount: { w: 1001 } }, oplog: { acquireCount: { w: 1001 } } } protocol:op_command 2535ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.681-0400 m31101| 2015-07-09T14:14:43.681-0400 I INDEX [repl writer worker 3] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.683-0400 m31100| 2015-07-09T14:14:43.683-0400 I INDEX [conn56] build index on: db52.reindex_4 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_4", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.683-0400 m31100| 2015-07-09T14:14:43.683-0400 I INDEX [conn56] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.685-0400 m31102| 2015-07-09T14:14:43.684-0400 I INDEX [repl writer worker 13] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.706-0400 m31102| 2015-07-09T14:14:43.705-0400 I INDEX [repl writer worker 12] build index on: db52.reindex_13 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_13", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.706-0400 m31102| 2015-07-09T14:14:43.705-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.708-0400 m31101| 2015-07-09T14:14:43.707-0400 I INDEX [repl writer worker 1] build index on: db52.reindex_13 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_13", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.708-0400 m31101| 2015-07-09T14:14:43.707-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.720-0400 m31102| 2015-07-09T14:14:43.718-0400 I INDEX [repl writer worker 12] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.724-0400 m31101| 2015-07-09T14:14:43.724-0400 I INDEX [repl writer worker 1] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.746-0400 m31102| 2015-07-09T14:14:43.746-0400 I INDEX [repl writer worker 10] build index on: db52.reindex_13 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_13" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.746-0400 m31102| 2015-07-09T14:14:43.746-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.747-0400 m31101| 2015-07-09T14:14:43.746-0400 I INDEX [repl writer worker 10] build index on: db52.reindex_13 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_13" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.747-0400 m31101| 2015-07-09T14:14:43.746-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.755-0400 m31101| 2015-07-09T14:14:43.755-0400 I INDEX [repl writer worker 10] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.759-0400 m31102| 2015-07-09T14:14:43.758-0400 I INDEX [repl writer worker 10] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.769-0400 m31100| 2015-07-09T14:14:43.768-0400 I INDEX [conn56] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.771-0400 m31100| 2015-07-09T14:14:43.770-0400 I WRITE [conn24] insert db52.reindex_7 query: { _id: ObjectId('559eba11eac5440bf8d36ea3'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ 22.0, 22.0 ] }, integer: 47.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 1028, w: 1028 } }, Database: { acquireCount: { w: 1027, W: 1 }, acquireWaitCount: { w: 15, W: 1 }, timeAcquiringMicros: { w: 1108351, W: 41200 } }, Collection: { acquireCount: { w: 78, W: 1 } }, Metadata: { acquireCount: { w: 949 } }, oplog: { acquireCount: { w: 949 } } } 101ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.772-0400 m31100| 2015-07-09T14:14:43.771-0400 I COMMAND [conn56] command db52.$cmd command: createIndexes { createIndexes: "reindex_4", indexes: [ { key: { text: "text" }, name: "text_text" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 13479 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1, W: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 107ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.779-0400 m31100| 2015-07-09T14:14:43.778-0400 I INDEX [conn180] build index on: db52.reindex_0 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_0", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.779-0400 m31100| 2015-07-09T14:14:43.778-0400 I INDEX [conn180] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.783-0400 m31101| 2015-07-09T14:14:43.782-0400 I INDEX [repl writer worker 4] build index on: db52.reindex_4 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_4", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.783-0400 m31101| 2015-07-09T14:14:43.783-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.788-0400 m31102| 2015-07-09T14:14:43.788-0400 I INDEX [repl writer worker 15] build index on: db52.reindex_4 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_4", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.788-0400 m31102| 2015-07-09T14:14:43.788-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.853-0400 m31101| 2015-07-09T14:14:43.852-0400 I INDEX [repl writer worker 4] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.860-0400 m31102| 2015-07-09T14:14:43.860-0400 I INDEX [repl writer worker 15] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.874-0400 m31100| 2015-07-09T14:14:43.874-0400 I INDEX [conn180] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.876-0400 m31100| 2015-07-09T14:14:43.875-0400 I COMMAND [conn180] command db52.$cmd command: createIndexes { createIndexes: "reindex_0", indexes: [ { key: { text: "text" }, name: "text_text" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 95150 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1, W: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 197ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.876-0400 m31100| 2015-07-09T14:14:43.875-0400 I WRITE [conn25] insert db52.reindex_3 query: { _id: ObjectId('559eba11eac5440bf8d356bb'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ 4.0, 4.0 ] }, integer: 79.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 1064, w: 1064 } }, Database: { acquireCount: { w: 1063, W: 1 }, acquireWaitCount: { w: 16, W: 1 }, timeAcquiringMicros: { w: 1205896, W: 68071 } }, Collection: { acquireCount: { w: 82, W: 1 } }, Metadata: { acquireCount: { w: 981 } }, oplog: { acquireCount: { w: 981 } } } 103ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.877-0400 m31100| 2015-07-09T14:14:43.875-0400 I WRITE [conn31] insert db52.reindex_9 query: { _id: ObjectId('559eba11eac5440bf8d37208'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -12.0, -12.0 ] }, integer: 63.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 1046, w: 1046 } }, Database: { acquireCount: { w: 1045, W: 1 }, acquireWaitCount: { w: 15, W: 1 }, timeAcquiringMicros: { w: 1164755, W: 62285 } }, Collection: { acquireCount: { w: 80, W: 1 } }, Metadata: { acquireCount: { w: 965 } }, oplog: { acquireCount: { w: 965 } } } 104ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.878-0400 m31100| 2015-07-09T14:14:43.875-0400 I WRITE [conn69] insert db52.reindex_6 query: { _id: ObjectId('559eba11eac5440bf8d33f38'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -20.0, -20.0 ] }, integer: 55.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 1042, w: 1042 } }, Database: { acquireCount: { w: 1041, W: 1 }, acquireWaitCount: { w: 15 }, timeAcquiringMicros: { w: 1277243 } }, Collection: { acquireCount: { w: 84, W: 1 } }, Metadata: { acquireCount: { w: 957 } }, oplog: { acquireCount: { w: 957 } } } 104ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.878-0400 m31100| 2015-07-09T14:14:43.875-0400 I WRITE [conn27] insert db52.reindex_5 query: { _id: ObjectId('559eba11eac5440bf8d35660'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -13.0, -13.0 ] }, integer: 62.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 1043, w: 1043 } }, Database: { acquireCount: { w: 1042, W: 1 }, acquireWaitCount: { w: 15, W: 1 }, timeAcquiringMicros: { w: 1128839, W: 107496 } }, Collection: { acquireCount: { w: 78, W: 1 } }, Metadata: { acquireCount: { w: 964 } }, oplog: { acquireCount: { w: 964 } } } 104ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.878-0400 m31100| 2015-07-09T14:14:43.875-0400 I WRITE [conn16] insert db52.reindex_10 query: { _id: ObjectId('559eba11eac5440bf8d364ae'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -16.0, -16.0 ] }, integer: 59.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 1043, w: 1043 } }, Database: { acquireCount: { w: 1042, W: 1 }, acquireWaitCount: { w: 17, W: 1 }, timeAcquiringMicros: { w: 1210714, W: 48174 } }, Collection: { acquireCount: { w: 81, W: 1 } }, Metadata: { acquireCount: { w: 961 } }, oplog: { acquireCount: { w: 961 } } } 103ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.879-0400 m31100| 2015-07-09T14:14:43.875-0400 I WRITE [conn24] insert db52.reindex_7 query: { _id: ObjectId('559eba11eac5440bf8d36ea4'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ 23.0, 23.0 ] }, integer: 48.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 1030, w: 1030 } }, Database: { acquireCount: { w: 1029, W: 1 }, acquireWaitCount: { w: 16, W: 1 }, timeAcquiringMicros: { w: 1211580, W: 41200 } }, Collection: { acquireCount: { w: 79, W: 1 } }, Metadata: { acquireCount: { w: 950 } }, oplog: { acquireCount: { w: 950 } } } 104ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.880-0400 m31100| 2015-07-09T14:14:43.876-0400 I WRITE [conn29] insert db52.reindex_14 query: { _id: ObjectId('559eba11eac5440bf8d36bbc'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -22.0, -22.0 ] }, integer: 53.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 1036, w: 1036 } }, Database: { acquireCount: { w: 1035, W: 1 }, acquireWaitCount: { w: 15, W: 1 }, timeAcquiringMicros: { w: 1148188, W: 85587 } }, Collection: { acquireCount: { w: 80, W: 1 } }, Metadata: { acquireCount: { w: 955 } }, oplog: { acquireCount: { w: 955 } } } 103ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.880-0400 m31100| 2015-07-09T14:14:43.876-0400 I WRITE [conn68] insert db52.reindex_2 query: { _id: ObjectId('559eba11eac5440bf8d36816'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ 14.0, 14.0 ] }, integer: 89.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 1074, w: 1074 } }, Database: { acquireCount: { w: 1073, W: 1 }, acquireWaitCount: { w: 16, W: 1 }, timeAcquiringMicros: { w: 1203791, W: 17536 } }, Collection: { acquireCount: { w: 82, W: 1 } }, Metadata: { acquireCount: { w: 991 } }, oplog: { acquireCount: { w: 991 } } } 103ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.881-0400 m31100| 2015-07-09T14:14:43.876-0400 I WRITE [conn67] insert db52.reindex_1 query: { _id: ObjectId('559eba11eac5440bf8d34b06'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ 1.0, 1.0 ] }, integer: 76.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 1061, w: 1061 } }, Database: { acquireCount: { w: 1060, W: 1 }, acquireWaitCount: { w: 17, W: 1 }, timeAcquiringMicros: { w: 1227147, W: 28248 } }, Collection: { acquireCount: { w: 82, W: 1 } }, Metadata: { acquireCount: { w: 978 } }, oplog: { acquireCount: { w: 978 } } } 103ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.881-0400 m31100| 2015-07-09T14:14:43.876-0400 I WRITE [conn23] insert db52.reindex_8 query: { _id: ObjectId('559eba11eac5440bf8d34eda'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ -18.0, -18.0 ] }, integer: 57.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 1042, w: 1042 } }, Database: { acquireCount: { w: 1041, W: 1 }, acquireWaitCount: { w: 15, W: 1 }, timeAcquiringMicros: { w: 1164578, W: 103732 } }, Collection: { acquireCount: { w: 82, W: 1 } }, Metadata: { acquireCount: { w: 959 } }, oplog: { acquireCount: { w: 959 } } } 104ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.882-0400 m31100| 2015-07-09T14:14:43.876-0400 I WRITE [conn147] insert db52.reindex_11 query: { _id: ObjectId('559eba11eac5440bf8d375e0'), text: "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.", geo: { type: "Point", coordinates: [ 22.0, 22.0 ] }, integer: 47.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 1028, w: 1028 } }, Database: { acquireCount: { w: 1027, W: 1 }, acquireWaitCount: { w: 14, W: 1 }, timeAcquiringMicros: { w: 1108256, W: 132238 } }, Collection: { acquireCount: { w: 78, W: 1 } }, Metadata: { acquireCount: { w: 949 } }, oplog: { acquireCount: { w: 949 } } } 105ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.882-0400 m31100| 2015-07-09T14:14:43.879-0400 I QUERY [conn138] getmore db52.reindex_13 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:3393183202151 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 3 }, timeAcquiringMicros: { r: 210004 } }, Collection: { acquireCount: { r: 8 } } } 222ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.887-0400 m31101| 2015-07-09T14:14:43.887-0400 I INDEX [repl writer worker 13] build index on: db52.reindex_0 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_0", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.887-0400 m31101| 2015-07-09T14:14:43.887-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.891-0400 m31102| 2015-07-09T14:14:43.890-0400 I INDEX [repl writer worker 2] build index on: db52.reindex_0 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_0", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.891-0400 m31102| 2015-07-09T14:14:43.890-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.901-0400 m31100| 2015-07-09T14:14:43.901-0400 I INDEX [conn56] build index on: db52.reindex_4 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_4", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.901-0400 m31100| 2015-07-09T14:14:43.901-0400 I INDEX [conn56] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.915-0400 m31100| 2015-07-09T14:14:43.915-0400 I INDEX [conn56] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.916-0400 m31100| 2015-07-09T14:14:43.916-0400 I COMMAND [conn56] command db52.$cmd command: createIndexes { createIndexes: "reindex_4", indexes: [ { key: { geo: "2dsphere" }, name: "geo_2dsphere" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 124593 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 144ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.920-0400 m31100| 2015-07-09T14:14:43.919-0400 I INDEX [conn180] build index on: db52.reindex_0 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_0", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.920-0400 m31100| 2015-07-09T14:14:43.919-0400 I INDEX [conn180] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.934-0400 m31100| 2015-07-09T14:14:43.933-0400 I INDEX [conn180] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.952-0400 m31100| 2015-07-09T14:14:43.952-0400 I COMMAND [conn68] command db52.$cmd command: insert { insert: "reindex_2", documents: 1000, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 1087, w: 1087 } }, Database: { acquireCount: { w: 1086, W: 1 }, acquireWaitCount: { w: 19, W: 1 }, timeAcquiringMicros: { w: 1247511, W: 17536 } }, Collection: { acquireCount: { w: 85, W: 1 } }, Metadata: { acquireCount: { w: 1001 } }, oplog: { acquireCount: { w: 1001 } } } protocol:op_command 2787ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.957-0400 m31100| 2015-07-09T14:14:43.957-0400 I COMMAND [conn25] command db52.$cmd command: insert { insert: "reindex_3", documents: 1000, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 1087, w: 1087 } }, Database: { acquireCount: { w: 1086, W: 1 }, acquireWaitCount: { w: 19, W: 1 }, timeAcquiringMicros: { w: 1246231, W: 68071 } }, Collection: { acquireCount: { w: 85, W: 1 } }, Metadata: { acquireCount: { w: 1001 } }, oplog: { acquireCount: { w: 1001 } } } protocol:op_command 2827ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.965-0400 m31100| 2015-07-09T14:14:43.965-0400 I INDEX [conn56] build index on: db52.reindex_4 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_4" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.966-0400 m31100| 2015-07-09T14:14:43.965-0400 I INDEX [conn56] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.973-0400 m31100| 2015-07-09T14:14:43.972-0400 I INDEX [conn56] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.974-0400 m31100| 2015-07-09T14:14:43.974-0400 I COMMAND [conn40] CMD: reIndex db52.reindex_4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.975-0400 m31101| 2015-07-09T14:14:43.975-0400 I INDEX [repl writer worker 13] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.978-0400 m31100| 2015-07-09T14:14:43.978-0400 I INDEX [conn180] build index on: db52.reindex_0 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_0" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.979-0400 m31100| 2015-07-09T14:14:43.978-0400 I INDEX [conn180] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.986-0400 m31102| 2015-07-09T14:14:43.986-0400 I INDEX [repl writer worker 2] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:43.990-0400 m31100| 2015-07-09T14:14:43.990-0400 I INDEX [conn180] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.002-0400 m31100| 2015-07-09T14:14:43.997-0400 I COMMAND [conn67] command db52.$cmd command: insert { insert: "reindex_1", documents: 1000, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 1088, w: 1088 } }, Database: { acquireCount: { w: 1087, W: 1 }, acquireWaitCount: { w: 21, W: 1 }, timeAcquiringMicros: { w: 1303603, W: 28248 } }, Collection: { acquireCount: { w: 86, W: 1 } }, Metadata: { acquireCount: { w: 1001 } }, oplog: { acquireCount: { w: 1001 } } } protocol:op_command 2849ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.003-0400 m31100| 2015-07-09T14:14:44.001-0400 I QUERY [conn179] query db52.reindex_12 query: { $text: { $search: "ipsum" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 } cursorid:3424480271998 ntoreturn:0 ntoskip:0 nscanned:1000 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:101 reslen:24159 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 9 }, timeAcquiringMicros: { r: 316363 } }, Collection: { acquireCount: { r: 9 } } } 231ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.004-0400 m31101| 2015-07-09T14:14:44.002-0400 I INDEX [repl writer worker 3] build index on: db52.reindex_4 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_4", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.004-0400 m31101| 2015-07-09T14:14:44.002-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.005-0400 m31102| 2015-07-09T14:14:44.005-0400 I INDEX [repl writer worker 7] build index on: db52.reindex_4 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_4", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.006-0400 m31102| 2015-07-09T14:14:44.005-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.026-0400 m31102| 2015-07-09T14:14:44.024-0400 I INDEX [repl writer worker 7] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.028-0400 m31100| 2015-07-09T14:14:44.028-0400 I COMMAND [conn31] command db52.$cmd command: insert { insert: "reindex_9", documents: 1000, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 1088, w: 1088 } }, Database: { acquireCount: { w: 1087, W: 1 }, acquireWaitCount: { w: 21, W: 1 }, timeAcquiringMicros: { w: 1247397, W: 62285 } }, Collection: { acquireCount: { w: 86, W: 1 } }, Metadata: { acquireCount: { w: 1001 } }, oplog: { acquireCount: { w: 1001 } } } protocol:op_command 2853ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.033-0400 m31102| 2015-07-09T14:14:44.032-0400 I INDEX [repl writer worker 6] build index on: db52.reindex_0 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_0", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.033-0400 m31102| 2015-07-09T14:14:44.033-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.036-0400 m31101| 2015-07-09T14:14:44.035-0400 I INDEX [repl writer worker 3] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.037-0400 m31100| 2015-07-09T14:14:44.036-0400 I COMMAND [conn27] command db52.$cmd command: insert { insert: "reindex_5", documents: 1000, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 1087, w: 1087 } }, Database: { acquireCount: { w: 1086, W: 1 }, acquireWaitCount: { w: 22, W: 1 }, timeAcquiringMicros: { w: 1207686, W: 107496 } }, Collection: { acquireCount: { w: 85, W: 1 } }, Metadata: { acquireCount: { w: 1001 } }, oplog: { acquireCount: { w: 1001 } } } protocol:op_command 2870ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.041-0400 m31100| 2015-07-09T14:14:44.040-0400 I QUERY [conn49] query db52.reindex_13 query: { $text: { $search: "ipsum" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 } cursorid:3393855222480 ntoreturn:0 ntoskip:0 nscanned:1000 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:101 reslen:24159 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 9 }, timeAcquiringMicros: { r: 139992 } }, Collection: { acquireCount: { r: 9 } } } 105ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.048-0400 m31101| 2015-07-09T14:14:44.047-0400 I INDEX [repl writer worker 0] build index on: db52.reindex_0 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_0", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.048-0400 m31101| 2015-07-09T14:14:44.047-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.051-0400 m31100| 2015-07-09T14:14:44.050-0400 I COMMAND [conn69] command db52.$cmd command: insert { insert: "reindex_6", documents: 1000, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 1093, w: 1093 } }, Database: { acquireCount: { w: 1092, W: 1 }, acquireWaitCount: { w: 22 }, timeAcquiringMicros: { w: 1362427 } }, Collection: { acquireCount: { w: 91, W: 1 } }, Metadata: { acquireCount: { w: 1001 } }, oplog: { acquireCount: { w: 1001 } } } protocol:op_command 2998ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.061-0400 m31102| 2015-07-09T14:14:44.060-0400 I INDEX [repl writer worker 6] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.064-0400 m31101| 2015-07-09T14:14:44.063-0400 I INDEX [repl writer worker 0] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.066-0400 m31100| 2015-07-09T14:14:44.065-0400 I COMMAND [conn16] command db52.$cmd command: insert { insert: "reindex_10", documents: 1000, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 1091, w: 1091 } }, Database: { acquireCount: { w: 1090, W: 1 }, acquireWaitCount: { w: 25, W: 1 }, timeAcquiringMicros: { w: 1305885, W: 48174 } }, Collection: { acquireCount: { w: 89, W: 1 } }, Metadata: { acquireCount: { w: 1001 } }, oplog: { acquireCount: { w: 1001 } } } protocol:op_command 2911ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.067-0400 m31100| 2015-07-09T14:14:44.066-0400 I COMMAND [conn24] command db52.$cmd command: insert { insert: "reindex_7", documents: 1000, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 1089, w: 1089 } }, Database: { acquireCount: { w: 1088, W: 1 }, acquireWaitCount: { w: 24, W: 1 }, timeAcquiringMicros: { w: 1289287, W: 41200 } }, Collection: { acquireCount: { w: 87, W: 1 } }, Metadata: { acquireCount: { w: 1001 } }, oplog: { acquireCount: { w: 1001 } } } protocol:op_command 2908ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.072-0400 m31100| 2015-07-09T14:14:44.071-0400 I COMMAND [conn147] command db52.$cmd command: insert { insert: "reindex_11", documents: 1000, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 1088, w: 1088 } }, Database: { acquireCount: { w: 1087, W: 1 }, acquireWaitCount: { w: 22, W: 1 }, timeAcquiringMicros: { w: 1204231, W: 132238 } }, Collection: { acquireCount: { w: 86, W: 1 } }, Metadata: { acquireCount: { w: 1001 } }, oplog: { acquireCount: { w: 1001 } } } protocol:op_command 2898ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.086-0400 m31100| 2015-07-09T14:14:44.083-0400 I COMMAND [conn23] command db52.$cmd command: insert { insert: "reindex_8", documents: 1000, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 1093, w: 1093 } }, Database: { acquireCount: { w: 1092, W: 1 }, acquireWaitCount: { w: 24, W: 1 }, timeAcquiringMicros: { w: 1258295, W: 103732 } }, Collection: { acquireCount: { w: 91, W: 1 } }, Metadata: { acquireCount: { w: 1001 } }, oplog: { acquireCount: { w: 1001 } } } protocol:op_command 2943ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.091-0400 m31100| 2015-07-09T14:14:44.089-0400 I COMMAND [conn29] command db52.$cmd command: insert { insert: "reindex_14", documents: 1000, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 1091, w: 1091 } }, Database: { acquireCount: { w: 1090, W: 1 }, acquireWaitCount: { w: 24, W: 1 }, timeAcquiringMicros: { w: 1245331, W: 85587 } }, Collection: { acquireCount: { w: 89, W: 1 } }, Metadata: { acquireCount: { w: 1001 } }, oplog: { acquireCount: { w: 1001 } } } protocol:op_command 2929ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.099-0400 m31100| 2015-07-09T14:14:44.098-0400 I INDEX [conn183] build index on: db52.reindex_2 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_2", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.099-0400 m31100| 2015-07-09T14:14:44.098-0400 I INDEX [conn183] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.100-0400 m31102| 2015-07-09T14:14:44.099-0400 I INDEX [repl writer worker 3] build index on: db52.reindex_4 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_4" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.100-0400 m31102| 2015-07-09T14:14:44.100-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.104-0400 m31101| 2015-07-09T14:14:44.104-0400 I INDEX [repl writer worker 1] build index on: db52.reindex_4 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_4" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.104-0400 m31101| 2015-07-09T14:14:44.104-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.109-0400 m31102| 2015-07-09T14:14:44.108-0400 I INDEX [repl writer worker 3] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.113-0400 m31101| 2015-07-09T14:14:44.113-0400 I INDEX [repl writer worker 1] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.116-0400 m31102| 2015-07-09T14:14:44.116-0400 I INDEX [repl writer worker 13] build index on: db52.reindex_0 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_0" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.117-0400 m31102| 2015-07-09T14:14:44.116-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.121-0400 m31101| 2015-07-09T14:14:44.120-0400 I INDEX [repl writer worker 8] build index on: db52.reindex_0 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_0" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.121-0400 m31101| 2015-07-09T14:14:44.120-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.129-0400 m31101| 2015-07-09T14:14:44.129-0400 I INDEX [repl writer worker 8] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.129-0400 m31102| 2015-07-09T14:14:44.129-0400 I INDEX [repl writer worker 13] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.193-0400 m31100| 2015-07-09T14:14:44.193-0400 I INDEX [conn183] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.194-0400 m31100| 2015-07-09T14:14:44.194-0400 I COMMAND [conn183] command db52.$cmd command: createIndexes { createIndexes: "reindex_2", indexes: [ { key: { text: "text" }, name: "text_text" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 138687 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1, W: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 238ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.203-0400 m31100| 2015-07-09T14:14:44.201-0400 I INDEX [conn185] build index on: db52.reindex_3 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_3", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.204-0400 m31100| 2015-07-09T14:14:44.202-0400 I INDEX [conn185] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.205-0400 m31102| 2015-07-09T14:14:44.204-0400 I INDEX [repl writer worker 3] build index on: db52.reindex_2 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_2", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.205-0400 m31102| 2015-07-09T14:14:44.204-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.207-0400 m31101| 2015-07-09T14:14:44.207-0400 I INDEX [repl writer worker 7] build index on: db52.reindex_2 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_2", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.208-0400 m31101| 2015-07-09T14:14:44.207-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.284-0400 m31100| 2015-07-09T14:14:44.283-0400 I INDEX [conn185] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.488-0400 m31100| 2015-07-09T14:14:44.283-0400 I COMMAND [conn185] command db52.$cmd command: createIndexes { createIndexes: "reindex_3", indexes: [ { key: { text: "text" }, name: "text_text" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 233636 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1, W: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 323ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.489-0400 m31102| 2015-07-09T14:14:44.285-0400 I INDEX [repl writer worker 3] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.489-0400 m31102| 2015-07-09T14:14:44.293-0400 I INDEX [repl writer worker 5] build index on: db52.reindex_3 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_3", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.489-0400 m31102| 2015-07-09T14:14:44.293-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.489-0400 m31100| 2015-07-09T14:14:44.297-0400 I INDEX [conn40] build index on: db52.reindex_4 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_4" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.489-0400 m31100| 2015-07-09T14:14:44.297-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.489-0400 m31100| 2015-07-09T14:14:44.302-0400 I INDEX [conn40] build index on: db52.reindex_4 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_4", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.490-0400 m31100| 2015-07-09T14:14:44.303-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.490-0400 m31100| 2015-07-09T14:14:44.308-0400 I INDEX [conn40] build index on: db52.reindex_4 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_4", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.490-0400 m31100| 2015-07-09T14:14:44.308-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.490-0400 m31100| 2015-07-09T14:14:44.313-0400 I INDEX [conn40] build index on: db52.reindex_4 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_4" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.490-0400 m31100| 2015-07-09T14:14:44.314-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.490-0400 m31101| 2015-07-09T14:14:44.337-0400 I INDEX [repl writer worker 7] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.490-0400 m31101| 2015-07-09T14:14:44.344-0400 I INDEX [repl writer worker 13] build index on: db52.reindex_3 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_3", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.491-0400 m31101| 2015-07-09T14:14:44.344-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.491-0400 m31102| 2015-07-09T14:14:44.387-0400 I INDEX [repl writer worker 5] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.491-0400 m31101| 2015-07-09T14:14:44.435-0400 I INDEX [repl writer worker 13] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.491-0400 m31100| 2015-07-09T14:14:44.470-0400 I INDEX [conn40] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.491-0400 m31100| 2015-07-09T14:14:44.470-0400 I COMMAND [conn40] command db52.reindex_4 command: reIndex { reIndex: "reindex_4" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:588 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 310189 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 496ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.491-0400 m31100| 2015-07-09T14:14:44.472-0400 I COMMAND [conn40] CMD: reIndex db52.reindex_4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.492-0400 m31100| 2015-07-09T14:14:44.475-0400 I INDEX [conn177] build index on: db52.reindex_1 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_1", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.492-0400 m31100| 2015-07-09T14:14:44.475-0400 I INDEX [conn177] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.555-0400 m31100| 2015-07-09T14:14:44.554-0400 I INDEX [conn177] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.556-0400 m31100| 2015-07-09T14:14:44.556-0400 I COMMAND [conn177] command db52.$cmd command: createIndexes { createIndexes: "reindex_1", indexes: [ { key: { text: "text" }, name: "text_text" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 466906 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1, W: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 552ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.561-0400 m31100| 2015-07-09T14:14:44.560-0400 I INDEX [conn181] build index on: db52.reindex_9 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_9", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.561-0400 m31100| 2015-07-09T14:14:44.560-0400 I INDEX [conn181] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.566-0400 m31101| 2015-07-09T14:14:44.565-0400 I INDEX [repl writer worker 0] build index on: db52.reindex_1 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_1", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.569-0400 m31101| 2015-07-09T14:14:44.566-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.569-0400 m31102| 2015-07-09T14:14:44.568-0400 I INDEX [repl writer worker 11] build index on: db52.reindex_1 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_1", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.570-0400 m31102| 2015-07-09T14:14:44.569-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.646-0400 m31101| 2015-07-09T14:14:44.645-0400 I INDEX [repl writer worker 0] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.652-0400 m31100| 2015-07-09T14:14:44.651-0400 I INDEX [conn181] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.653-0400 m31100| 2015-07-09T14:14:44.652-0400 I COMMAND [conn181] command db52.$cmd command: createIndexes { createIndexes: "reindex_9", indexes: [ { key: { text: "text" }, name: "text_text" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 1023453 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1, W: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 619ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.659-0400 m31101| 2015-07-09T14:14:44.657-0400 I INDEX [repl writer worker 4] build index on: db52.reindex_9 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_9", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.659-0400 m31101| 2015-07-09T14:14:44.657-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.660-0400 m31100| 2015-07-09T14:14:44.657-0400 I INDEX [conn49] build index on: db52.reindex_5 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_5", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.660-0400 m31100| 2015-07-09T14:14:44.657-0400 I INDEX [conn49] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.678-0400 m31102| 2015-07-09T14:14:44.677-0400 I INDEX [repl writer worker 11] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.684-0400 m31102| 2015-07-09T14:14:44.684-0400 I INDEX [repl writer worker 2] build index on: db52.reindex_9 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_9", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.685-0400 m31102| 2015-07-09T14:14:44.684-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.743-0400 m31100| 2015-07-09T14:14:44.743-0400 I INDEX [conn49] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.745-0400 m31100| 2015-07-09T14:14:44.744-0400 I COMMAND [conn49] command db52.$cmd command: createIndexes { createIndexes: "reindex_5", indexes: [ { key: { text: "text" }, name: "text_text" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 1107441 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1, W: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 699ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.750-0400 m31100| 2015-07-09T14:14:44.750-0400 I INDEX [conn180] build index on: db52.reindex_6 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_6", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.750-0400 m31100| 2015-07-09T14:14:44.750-0400 I INDEX [conn180] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.763-0400 m31101| 2015-07-09T14:14:44.763-0400 I INDEX [repl writer worker 4] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.769-0400 m31101| 2015-07-09T14:14:44.769-0400 I INDEX [repl writer worker 15] build index on: db52.reindex_5 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_5", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.770-0400 m31101| 2015-07-09T14:14:44.769-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.786-0400 m31102| 2015-07-09T14:14:44.786-0400 I INDEX [repl writer worker 2] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.791-0400 m31102| 2015-07-09T14:14:44.791-0400 I INDEX [repl writer worker 0] build index on: db52.reindex_5 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_5", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.792-0400 m31102| 2015-07-09T14:14:44.791-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.848-0400 m31100| 2015-07-09T14:14:44.847-0400 I INDEX [conn180] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.849-0400 m31100| 2015-07-09T14:14:44.848-0400 I COMMAND [conn180] command db52.$cmd command: createIndexes { createIndexes: "reindex_6", indexes: [ { key: { text: "text" }, name: "text_text" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 1190155 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1, W: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 793ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.853-0400 m31100| 2015-07-09T14:14:44.853-0400 I INDEX [conn179] build index on: db52.reindex_10 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_10", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.853-0400 m31100| 2015-07-09T14:14:44.853-0400 I INDEX [conn179] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.866-0400 m31101| 2015-07-09T14:14:44.866-0400 I INDEX [repl writer worker 15] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.873-0400 m31101| 2015-07-09T14:14:44.873-0400 I INDEX [repl writer worker 9] build index on: db52.reindex_6 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_6", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.873-0400 m31101| 2015-07-09T14:14:44.873-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.884-0400 m31102| 2015-07-09T14:14:44.883-0400 I INDEX [repl writer worker 0] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.889-0400 m31102| 2015-07-09T14:14:44.888-0400 I INDEX [repl writer worker 8] build index on: db52.reindex_6 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_6", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.889-0400 m31102| 2015-07-09T14:14:44.889-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.943-0400 m31100| 2015-07-09T14:14:44.942-0400 I INDEX [conn179] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.945-0400 m31100| 2015-07-09T14:14:44.943-0400 I COMMAND [conn179] command db52.$cmd command: createIndexes { createIndexes: "reindex_10", indexes: [ { key: { text: "text" }, name: "text_text" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 1279812 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1, W: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 873ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.951-0400 m31100| 2015-07-09T14:14:44.950-0400 I INDEX [conn178] build index on: db52.reindex_7 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_7", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.951-0400 m31100| 2015-07-09T14:14:44.950-0400 I INDEX [conn178] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.967-0400 m31102| 2015-07-09T14:14:44.967-0400 I INDEX [repl writer worker 8] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.974-0400 m31102| 2015-07-09T14:14:44.973-0400 I INDEX [repl writer worker 12] build index on: db52.reindex_10 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_10", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.974-0400 m31102| 2015-07-09T14:14:44.974-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.981-0400 m31101| 2015-07-09T14:14:44.981-0400 I INDEX [repl writer worker 9] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.990-0400 m31101| 2015-07-09T14:14:44.989-0400 I INDEX [repl writer worker 8] build index on: db52.reindex_10 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_10", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:44.990-0400 m31101| 2015-07-09T14:14:44.989-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.052-0400 m31102| 2015-07-09T14:14:45.052-0400 I INDEX [repl writer worker 12] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.075-0400 m31100| 2015-07-09T14:14:45.074-0400 I INDEX [conn178] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.076-0400 m31100| 2015-07-09T14:14:45.075-0400 I COMMAND [conn178] command db52.$cmd command: createIndexes { createIndexes: "reindex_7", indexes: [ { key: { text: "text" }, name: "text_text" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 1371790 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1, W: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 1002ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.083-0400 m31100| 2015-07-09T14:14:45.083-0400 I INDEX [conn50] build index on: db52.reindex_11 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_11", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.083-0400 m31100| 2015-07-09T14:14:45.083-0400 I INDEX [conn50] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.085-0400 m31102| 2015-07-09T14:14:45.085-0400 I INDEX [repl writer worker 1] build index on: db52.reindex_7 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_7", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.085-0400 m31102| 2015-07-09T14:14:45.085-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.102-0400 m31101| 2015-07-09T14:14:45.101-0400 I INDEX [repl writer worker 8] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.108-0400 m31101| 2015-07-09T14:14:45.107-0400 I INDEX [repl writer worker 14] build index on: db52.reindex_7 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_7", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.108-0400 m31101| 2015-07-09T14:14:45.107-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.162-0400 m31102| 2015-07-09T14:14:45.162-0400 I INDEX [repl writer worker 1] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.174-0400 m31100| 2015-07-09T14:14:45.174-0400 I INDEX [conn50] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.176-0400 m31100| 2015-07-09T14:14:45.175-0400 I COMMAND [conn50] command db52.$cmd command: createIndexes { createIndexes: "reindex_11", indexes: [ { key: { text: "text" }, name: "text_text" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 1500460 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1, W: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 1099ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.182-0400 m31101| 2015-07-09T14:14:45.182-0400 I INDEX [repl writer worker 14] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.183-0400 m31100| 2015-07-09T14:14:45.182-0400 I INDEX [conn175] build index on: db52.reindex_8 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_8", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.183-0400 m31100| 2015-07-09T14:14:45.182-0400 I INDEX [conn175] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.187-0400 m31102| 2015-07-09T14:14:45.186-0400 I INDEX [repl writer worker 7] build index on: db52.reindex_11 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_11", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.187-0400 m31102| 2015-07-09T14:14:45.186-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.192-0400 m31101| 2015-07-09T14:14:45.192-0400 I INDEX [repl writer worker 2] build index on: db52.reindex_11 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_11", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.192-0400 m31101| 2015-07-09T14:14:45.192-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.266-0400 m31100| 2015-07-09T14:14:45.265-0400 I INDEX [conn175] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.268-0400 m31100| 2015-07-09T14:14:45.267-0400 I COMMAND [conn175] command db52.$cmd command: createIndexes { createIndexes: "reindex_8", indexes: [ { key: { text: "text" }, name: "text_text" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 2590139 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1, W: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 1178ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.269-0400 m31100| 2015-07-09T14:14:45.268-0400 I COMMAND [conn182] command db52.reindex_12 command: listIndexes { listIndexes: "reindex_12" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:643 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 2674994 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 1174ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.271-0400 m31100| 2015-07-09T14:14:45.270-0400 I COMMAND [conn38] CMD: reIndex db52.reindex_12 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.278-0400 m31100| 2015-07-09T14:14:45.278-0400 I INDEX [conn45] build index on: db52.reindex_14 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_14", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.278-0400 m31100| 2015-07-09T14:14:45.278-0400 I INDEX [conn45] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.279-0400 m31101| 2015-07-09T14:14:45.279-0400 I INDEX [repl writer worker 2] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.287-0400 m31101| 2015-07-09T14:14:45.284-0400 I INDEX [repl writer worker 10] build index on: db52.reindex_8 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_8", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.287-0400 m31101| 2015-07-09T14:14:45.284-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.296-0400 m31102| 2015-07-09T14:14:45.296-0400 I INDEX [repl writer worker 7] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.301-0400 m31102| 2015-07-09T14:14:45.300-0400 I INDEX [repl writer worker 10] build index on: db52.reindex_8 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_8", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.301-0400 m31102| 2015-07-09T14:14:45.300-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.362-0400 m31100| 2015-07-09T14:14:45.362-0400 I INDEX [conn45] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.363-0400 m31100| 2015-07-09T14:14:45.363-0400 I COMMAND [conn45] command db52.$cmd command: createIndexes { createIndexes: "reindex_14", indexes: [ { key: { text: "text" }, name: "text_text" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 2684289 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1, W: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 1269ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.371-0400 m31100| 2015-07-09T14:14:45.370-0400 I INDEX [conn183] build index on: db52.reindex_2 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_2", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.371-0400 m31100| 2015-07-09T14:14:45.370-0400 I INDEX [conn183] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.373-0400 m31102| 2015-07-09T14:14:45.373-0400 I INDEX [repl writer worker 10] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.378-0400 m31102| 2015-07-09T14:14:45.377-0400 I INDEX [repl writer worker 15] build index on: db52.reindex_14 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_14", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.378-0400 m31102| 2015-07-09T14:14:45.377-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.382-0400 m31101| 2015-07-09T14:14:45.380-0400 I INDEX [repl writer worker 10] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.388-0400 m31101| 2015-07-09T14:14:45.387-0400 I INDEX [repl writer worker 11] build index on: db52.reindex_14 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_14", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.388-0400 m31101| 2015-07-09T14:14:45.387-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.390-0400 m31100| 2015-07-09T14:14:45.390-0400 I INDEX [conn183] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.393-0400 m31100| 2015-07-09T14:14:45.390-0400 I COMMAND [conn183] command db52.$cmd command: createIndexes { createIndexes: "reindex_2", indexes: [ { key: { geo: "2dsphere" }, name: "geo_2dsphere" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 2667393 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 1194ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.396-0400 m31100| 2015-07-09T14:14:45.395-0400 I INDEX [conn185] build index on: db52.reindex_3 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_3", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.396-0400 m31100| 2015-07-09T14:14:45.395-0400 I INDEX [conn185] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.417-0400 m31100| 2015-07-09T14:14:45.417-0400 I INDEX [conn185] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.418-0400 m31100| 2015-07-09T14:14:45.417-0400 I COMMAND [conn185] command db52.$cmd command: createIndexes { createIndexes: "reindex_3", indexes: [ { key: { geo: "2dsphere" }, name: "geo_2dsphere" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 2608778 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 1132ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.425-0400 m31100| 2015-07-09T14:14:45.424-0400 I INDEX [conn40] build index on: db52.reindex_4 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_4" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.425-0400 m31100| 2015-07-09T14:14:45.424-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.429-0400 m31100| 2015-07-09T14:14:45.429-0400 I INDEX [conn40] build index on: db52.reindex_4 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_4", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.430-0400 m31100| 2015-07-09T14:14:45.429-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.435-0400 m31100| 2015-07-09T14:14:45.434-0400 I INDEX [conn40] build index on: db52.reindex_4 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_4", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.435-0400 m31100| 2015-07-09T14:14:45.434-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.440-0400 m31100| 2015-07-09T14:14:45.438-0400 I INDEX [conn40] build index on: db52.reindex_4 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_4" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.440-0400 m31100| 2015-07-09T14:14:45.438-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.476-0400 m31102| 2015-07-09T14:14:45.475-0400 I INDEX [repl writer worker 15] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.480-0400 m31102| 2015-07-09T14:14:45.479-0400 I INDEX [repl writer worker 9] build index on: db52.reindex_2 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_2", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.481-0400 m31102| 2015-07-09T14:14:45.479-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.489-0400 m31101| 2015-07-09T14:14:45.488-0400 I INDEX [repl writer worker 11] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.493-0400 m31101| 2015-07-09T14:14:45.492-0400 I INDEX [repl writer worker 6] build index on: db52.reindex_2 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_2", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.493-0400 m31101| 2015-07-09T14:14:45.492-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.497-0400 m31102| 2015-07-09T14:14:45.497-0400 I INDEX [repl writer worker 9] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.503-0400 m31102| 2015-07-09T14:14:45.503-0400 I INDEX [repl writer worker 6] build index on: db52.reindex_3 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_3", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.503-0400 m31102| 2015-07-09T14:14:45.503-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.513-0400 m31101| 2015-07-09T14:14:45.513-0400 I INDEX [repl writer worker 6] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.518-0400 m31101| 2015-07-09T14:14:45.517-0400 I INDEX [repl writer worker 1] build index on: db52.reindex_3 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_3", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.518-0400 m31101| 2015-07-09T14:14:45.517-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.527-0400 m31102| 2015-07-09T14:14:45.527-0400 I INDEX [repl writer worker 6] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.532-0400 m31101| 2015-07-09T14:14:45.532-0400 I INDEX [repl writer worker 1] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.574-0400 m31100| 2015-07-09T14:14:45.573-0400 I INDEX [conn40] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.575-0400 m31100| 2015-07-09T14:14:45.574-0400 I COMMAND [conn40] command db52.reindex_4 command: reIndex { reIndex: "reindex_4" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:588 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 1446221 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 1102ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.576-0400 m31100| 2015-07-09T14:14:45.576-0400 I COMMAND [conn40] CMD: reIndex db52.reindex_4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.580-0400 m31100| 2015-07-09T14:14:45.579-0400 I INDEX [conn177] build index on: db52.reindex_1 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_1", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.580-0400 m31100| 2015-07-09T14:14:45.579-0400 I INDEX [conn177] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.593-0400 m31100| 2015-07-09T14:14:45.592-0400 I INDEX [conn177] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.595-0400 m31100| 2015-07-09T14:14:45.593-0400 I COMMAND [conn177] command db52.$cmd command: createIndexes { createIndexes: "reindex_1", indexes: [ { key: { geo: "2dsphere" }, name: "geo_2dsphere" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 2519295 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 1035ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.602-0400 m31100| 2015-07-09T14:14:45.602-0400 I INDEX [conn181] build index on: db52.reindex_9 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_9", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.603-0400 m31100| 2015-07-09T14:14:45.602-0400 I INDEX [conn181] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.607-0400 m31102| 2015-07-09T14:14:45.607-0400 I INDEX [repl writer worker 13] build index on: db52.reindex_1 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_1", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.608-0400 m31102| 2015-07-09T14:14:45.607-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.608-0400 m31101| 2015-07-09T14:14:45.607-0400 I INDEX [repl writer worker 5] build index on: db52.reindex_1 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_1", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.608-0400 m31101| 2015-07-09T14:14:45.607-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.623-0400 m31101| 2015-07-09T14:14:45.622-0400 I INDEX [repl writer worker 5] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.623-0400 m31100| 2015-07-09T14:14:45.623-0400 I INDEX [conn181] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.624-0400 m31100| 2015-07-09T14:14:45.623-0400 I COMMAND [conn181] command db52.$cmd command: createIndexes { createIndexes: "reindex_9", indexes: [ { key: { geo: "2dsphere" }, name: "geo_2dsphere" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 1439767 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 969ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.632-0400 m31100| 2015-07-09T14:14:45.631-0400 I INDEX [conn49] build index on: db52.reindex_5 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_5", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.632-0400 m31100| 2015-07-09T14:14:45.631-0400 I INDEX [conn49] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.633-0400 m31102| 2015-07-09T14:14:45.633-0400 I INDEX [repl writer worker 13] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.635-0400 m31101| 2015-07-09T14:14:45.634-0400 I INDEX [repl writer worker 12] build index on: db52.reindex_9 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_9", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.636-0400 m31101| 2015-07-09T14:14:45.635-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.641-0400 m31102| 2015-07-09T14:14:45.640-0400 I INDEX [repl writer worker 4] build index on: db52.reindex_9 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_9", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.641-0400 m31102| 2015-07-09T14:14:45.640-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.650-0400 m31100| 2015-07-09T14:14:45.649-0400 I INDEX [conn49] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.651-0400 m31100| 2015-07-09T14:14:45.650-0400 I COMMAND [conn49] command db52.$cmd command: createIndexes { createIndexes: "reindex_5", indexes: [ { key: { geo: "2dsphere" }, name: "geo_2dsphere" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 1378708 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 904ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.655-0400 m31100| 2015-07-09T14:14:45.654-0400 I INDEX [conn180] build index on: db52.reindex_6 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_6", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.655-0400 m31100| 2015-07-09T14:14:45.655-0400 I INDEX [conn180] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.660-0400 m31101| 2015-07-09T14:14:45.660-0400 I INDEX [repl writer worker 12] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.663-0400 m31102| 2015-07-09T14:14:45.662-0400 I INDEX [repl writer worker 4] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.668-0400 m31101| 2015-07-09T14:14:45.668-0400 I INDEX [repl writer worker 3] build index on: db52.reindex_5 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_5", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.668-0400 m31101| 2015-07-09T14:14:45.668-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.671-0400 m31102| 2015-07-09T14:14:45.671-0400 I INDEX [repl writer worker 14] build index on: db52.reindex_5 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_5", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.671-0400 m31102| 2015-07-09T14:14:45.671-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.675-0400 m31100| 2015-07-09T14:14:45.674-0400 I INDEX [conn180] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.676-0400 m31100| 2015-07-09T14:14:45.674-0400 I COMMAND [conn180] command db52.$cmd command: createIndexes { createIndexes: "reindex_6", indexes: [ { key: { geo: "2dsphere" }, name: "geo_2dsphere" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 1302046 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 824ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.678-0400 m31100| 2015-07-09T14:14:45.678-0400 I INDEX [conn179] build index on: db52.reindex_10 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_10", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.679-0400 m31100| 2015-07-09T14:14:45.678-0400 I INDEX [conn179] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.686-0400 m31101| 2015-07-09T14:14:45.685-0400 I INDEX [repl writer worker 3] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.688-0400 m31102| 2015-07-09T14:14:45.687-0400 I INDEX [repl writer worker 14] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.693-0400 m31101| 2015-07-09T14:14:45.692-0400 I INDEX [repl writer worker 7] build index on: db52.reindex_6 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_6", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.693-0400 m31101| 2015-07-09T14:14:45.692-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.697-0400 m31102| 2015-07-09T14:14:45.694-0400 I INDEX [repl writer worker 3] build index on: db52.reindex_6 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_6", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.697-0400 m31102| 2015-07-09T14:14:45.695-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.699-0400 m31100| 2015-07-09T14:14:45.699-0400 I INDEX [conn179] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.701-0400 m31100| 2015-07-09T14:14:45.700-0400 I COMMAND [conn179] command db52.$cmd command: createIndexes { createIndexes: "reindex_10", indexes: [ { key: { geo: "2dsphere" }, name: "geo_2dsphere" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 1229343 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 754ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.708-0400 m31100| 2015-07-09T14:14:45.707-0400 I INDEX [conn178] build index on: db52.reindex_7 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_7", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.708-0400 m31100| 2015-07-09T14:14:45.707-0400 I INDEX [conn178] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.711-0400 m31101| 2015-07-09T14:14:45.710-0400 I INDEX [repl writer worker 7] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.715-0400 m31102| 2015-07-09T14:14:45.715-0400 I INDEX [repl writer worker 3] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.719-0400 m31101| 2015-07-09T14:14:45.718-0400 I INDEX [repl writer worker 13] build index on: db52.reindex_10 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_10", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.720-0400 m31101| 2015-07-09T14:14:45.718-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.725-0400 m31102| 2015-07-09T14:14:45.724-0400 I INDEX [repl writer worker 5] build index on: db52.reindex_10 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_10", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.726-0400 m31102| 2015-07-09T14:14:45.724-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.728-0400 m31100| 2015-07-09T14:14:45.726-0400 I INDEX [conn178] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.729-0400 m31100| 2015-07-09T14:14:45.727-0400 I COMMAND [conn178] command db52.$cmd command: createIndexes { createIndexes: "reindex_7", indexes: [ { key: { geo: "2dsphere" }, name: "geo_2dsphere" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 1123764 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 649ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.732-0400 m31100| 2015-07-09T14:14:45.732-0400 I INDEX [conn50] build index on: db52.reindex_11 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_11", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.732-0400 m31100| 2015-07-09T14:14:45.732-0400 I INDEX [conn50] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.739-0400 m31102| 2015-07-09T14:14:45.739-0400 I INDEX [repl writer worker 5] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.744-0400 m31101| 2015-07-09T14:14:45.744-0400 I INDEX [repl writer worker 13] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.749-0400 m31102| 2015-07-09T14:14:45.749-0400 I INDEX [repl writer worker 11] build index on: db52.reindex_7 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_7", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.749-0400 m31102| 2015-07-09T14:14:45.749-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.754-0400 m31101| 2015-07-09T14:14:45.754-0400 I INDEX [repl writer worker 0] build index on: db52.reindex_7 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_7", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.754-0400 m31101| 2015-07-09T14:14:45.754-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.757-0400 m31100| 2015-07-09T14:14:45.757-0400 I INDEX [conn50] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.759-0400 m31100| 2015-07-09T14:14:45.758-0400 I COMMAND [conn50] command db52.$cmd command: createIndexes { createIndexes: "reindex_11", indexes: [ { key: { geo: "2dsphere" }, name: "geo_2dsphere" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 1051903 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 581ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.772-0400 m31102| 2015-07-09T14:14:45.772-0400 I INDEX [repl writer worker 11] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.776-0400 m31100| 2015-07-09T14:14:45.775-0400 I INDEX [conn175] build index on: db52.reindex_8 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_8", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.776-0400 m31100| 2015-07-09T14:14:45.776-0400 I INDEX [conn175] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.781-0400 m31102| 2015-07-09T14:14:45.778-0400 I INDEX [repl writer worker 2] build index on: db52.reindex_11 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_11", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.782-0400 m31102| 2015-07-09T14:14:45.778-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.782-0400 m31101| 2015-07-09T14:14:45.780-0400 I INDEX [repl writer worker 0] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.787-0400 m31101| 2015-07-09T14:14:45.786-0400 I INDEX [repl writer worker 4] build index on: db52.reindex_11 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_11", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.787-0400 m31101| 2015-07-09T14:14:45.786-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.795-0400 m31100| 2015-07-09T14:14:45.795-0400 I INDEX [conn175] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.796-0400 m31100| 2015-07-09T14:14:45.795-0400 I COMMAND [conn175] command db52.$cmd command: createIndexes { createIndexes: "reindex_8", indexes: [ { key: { geo: "2dsphere" }, name: "geo_2dsphere" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 1004253 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 527ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.800-0400 m31102| 2015-07-09T14:14:45.799-0400 I INDEX [repl writer worker 2] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.806-0400 m31101| 2015-07-09T14:14:45.805-0400 I INDEX [repl writer worker 4] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.807-0400 m31102| 2015-07-09T14:14:45.806-0400 I INDEX [repl writer worker 0] build index on: db52.reindex_8 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_8", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.807-0400 m31102| 2015-07-09T14:14:45.806-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.810-0400 m31100| 2015-07-09T14:14:45.810-0400 I INDEX [conn38] build index on: db52.reindex_12 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_12" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.811-0400 m31100| 2015-07-09T14:14:45.810-0400 I INDEX [conn38] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.813-0400 m31101| 2015-07-09T14:14:45.812-0400 I INDEX [repl writer worker 15] build index on: db52.reindex_8 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_8", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.813-0400 m31101| 2015-07-09T14:14:45.812-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.817-0400 m31100| 2015-07-09T14:14:45.815-0400 I INDEX [conn38] build index on: db52.reindex_12 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_12", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.817-0400 m31100| 2015-07-09T14:14:45.815-0400 I INDEX [conn38] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.821-0400 m31100| 2015-07-09T14:14:45.820-0400 I INDEX [conn38] build index on: db52.reindex_12 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_12", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.821-0400 m31100| 2015-07-09T14:14:45.820-0400 I INDEX [conn38] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.825-0400 m31100| 2015-07-09T14:14:45.824-0400 I INDEX [conn38] build index on: db52.reindex_12 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_12" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.825-0400 m31100| 2015-07-09T14:14:45.824-0400 I INDEX [conn38] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.832-0400 m31102| 2015-07-09T14:14:45.831-0400 I INDEX [repl writer worker 0] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.832-0400 m31101| 2015-07-09T14:14:45.831-0400 I INDEX [repl writer worker 15] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.936-0400 m31100| 2015-07-09T14:14:45.936-0400 I INDEX [conn38] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.937-0400 m31100| 2015-07-09T14:14:45.937-0400 I COMMAND [conn38] command db52.reindex_12 command: reIndex { reIndex: "reindex_12" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:592 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 1025603 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 666ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.938-0400 m31100| 2015-07-09T14:14:45.938-0400 I COMMAND [conn38] CMD: reIndex db52.reindex_12 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.942-0400 m31100| 2015-07-09T14:14:45.941-0400 I INDEX [conn45] build index on: db52.reindex_14 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_14", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.942-0400 m31100| 2015-07-09T14:14:45.942-0400 I INDEX [conn45] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.963-0400 m31100| 2015-07-09T14:14:45.962-0400 I INDEX [conn45] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.963-0400 m31100| 2015-07-09T14:14:45.963-0400 I COMMAND [conn45] command db52.$cmd command: createIndexes { createIndexes: "reindex_14", indexes: [ { key: { geo: "2dsphere" }, name: "geo_2dsphere" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 1073103 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 597ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.970-0400 m31100| 2015-07-09T14:14:45.970-0400 I INDEX [conn183] build index on: db52.reindex_2 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_2" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.970-0400 m31100| 2015-07-09T14:14:45.970-0400 I INDEX [conn183] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.973-0400 m31102| 2015-07-09T14:14:45.972-0400 I INDEX [repl writer worker 8] build index on: db52.reindex_14 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_14", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.974-0400 m31102| 2015-07-09T14:14:45.972-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.974-0400 m31101| 2015-07-09T14:14:45.973-0400 I INDEX [repl writer worker 9] build index on: db52.reindex_14 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_14", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.974-0400 m31101| 2015-07-09T14:14:45.973-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.980-0400 m31100| 2015-07-09T14:14:45.980-0400 I INDEX [conn183] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.982-0400 m31100| 2015-07-09T14:14:45.980-0400 I COMMAND [conn183] command db52.$cmd command: createIndexes { createIndexes: "reindex_2", indexes: [ { key: { integer: 1.0 }, name: "integer_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 1069342 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 586ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.989-0400 m31100| 2015-07-09T14:14:45.989-0400 I INDEX [conn185] build index on: db52.reindex_3 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_3" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.990-0400 m31100| 2015-07-09T14:14:45.989-0400 I INDEX [conn185] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:45.996-0400 m31101| 2015-07-09T14:14:45.995-0400 I INDEX [repl writer worker 9] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.002-0400 m31102| 2015-07-09T14:14:46.001-0400 I INDEX [repl writer worker 8] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.004-0400 m31100| 2015-07-09T14:14:46.003-0400 I INDEX [conn185] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.006-0400 m31100| 2015-07-09T14:14:46.005-0400 I COMMAND [conn185] command db52.$cmd command: createIndexes { createIndexes: "reindex_3", indexes: [ { key: { integer: 1.0 }, name: "integer_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 1065336 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 586ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.006-0400 m31101| 2015-07-09T14:14:46.005-0400 I INDEX [repl writer worker 8] build index on: db52.reindex_2 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_2" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.007-0400 m31101| 2015-07-09T14:14:46.005-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.010-0400 m31102| 2015-07-09T14:14:46.009-0400 I INDEX [repl writer worker 12] build index on: db52.reindex_2 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_2" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.010-0400 m31102| 2015-07-09T14:14:46.009-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.016-0400 m31101| 2015-07-09T14:14:46.016-0400 I INDEX [repl writer worker 8] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.018-0400 m31100| 2015-07-09T14:14:46.018-0400 I INDEX [conn40] build index on: db52.reindex_4 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_4" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.019-0400 m31100| 2015-07-09T14:14:46.018-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.021-0400 m31102| 2015-07-09T14:14:46.021-0400 I INDEX [repl writer worker 12] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.025-0400 m31100| 2015-07-09T14:14:46.024-0400 I INDEX [conn40] build index on: db52.reindex_4 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_4", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.025-0400 m31100| 2015-07-09T14:14:46.024-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.028-0400 m31102| 2015-07-09T14:14:46.028-0400 I INDEX [repl writer worker 1] build index on: db52.reindex_3 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_3" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.029-0400 m31102| 2015-07-09T14:14:46.028-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.035-0400 m31101| 2015-07-09T14:14:46.033-0400 I INDEX [repl writer worker 14] build index on: db52.reindex_3 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_3" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.035-0400 m31101| 2015-07-09T14:14:46.033-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.035-0400 m31100| 2015-07-09T14:14:46.035-0400 I INDEX [conn40] build index on: db52.reindex_4 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_4", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.036-0400 m31100| 2015-07-09T14:14:46.035-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.040-0400 m31100| 2015-07-09T14:14:46.039-0400 I INDEX [conn40] build index on: db52.reindex_4 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_4" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.040-0400 m31100| 2015-07-09T14:14:46.039-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.044-0400 m31102| 2015-07-09T14:14:46.044-0400 I INDEX [repl writer worker 1] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.046-0400 m31101| 2015-07-09T14:14:46.046-0400 I INDEX [repl writer worker 14] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.148-0400 m31100| 2015-07-09T14:14:46.147-0400 I INDEX [conn40] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.149-0400 m31100| 2015-07-09T14:14:46.148-0400 I COMMAND [conn40] command db52.reindex_4 command: reIndex { reIndex: "reindex_4" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:588 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 428883 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 572ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.150-0400 m31100| 2015-07-09T14:14:46.150-0400 I COMMAND [conn40] CMD: reIndex db52.reindex_4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.155-0400 m31100| 2015-07-09T14:14:46.154-0400 I INDEX [conn177] build index on: db52.reindex_1 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_1" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.155-0400 m31100| 2015-07-09T14:14:46.154-0400 I INDEX [conn177] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.166-0400 m31100| 2015-07-09T14:14:46.165-0400 I INDEX [conn177] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.167-0400 m31100| 2015-07-09T14:14:46.166-0400 I COMMAND [conn177] command db52.$cmd command: createIndexes { createIndexes: "reindex_1", indexes: [ { key: { integer: 1.0 }, name: "integer_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 1055343 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 571ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.168-0400 m31100| 2015-07-09T14:14:46.168-0400 I COMMAND [conn132] CMD: reIndex db52.reindex_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.170-0400 m31100| 2015-07-09T14:14:46.169-0400 I INDEX [conn181] build index on: db52.reindex_9 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_9" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.170-0400 m31100| 2015-07-09T14:14:46.169-0400 I INDEX [conn181] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.175-0400 m31102| 2015-07-09T14:14:46.175-0400 I INDEX [repl writer worker 7] build index on: db52.reindex_1 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_1" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.176-0400 m31102| 2015-07-09T14:14:46.175-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.177-0400 m31101| 2015-07-09T14:14:46.177-0400 I INDEX [repl writer worker 2] build index on: db52.reindex_1 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_1" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.178-0400 m31101| 2015-07-09T14:14:46.177-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.182-0400 m31100| 2015-07-09T14:14:46.182-0400 I INDEX [conn181] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.184-0400 m31100| 2015-07-09T14:14:46.183-0400 I COMMAND [conn181] command db52.$cmd command: createIndexes { createIndexes: "reindex_9", indexes: [ { key: { integer: 1.0 }, name: "integer_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 1041002 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 557ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.184-0400 m31102| 2015-07-09T14:14:46.184-0400 I INDEX [repl writer worker 7] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.185-0400 m31100| 2015-07-09T14:14:46.185-0400 I COMMAND [conn35] CMD: reIndex db52.reindex_9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.188-0400 m31100| 2015-07-09T14:14:46.188-0400 I INDEX [conn49] build index on: db52.reindex_5 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_5" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.188-0400 m31100| 2015-07-09T14:14:46.188-0400 I INDEX [conn49] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.192-0400 m31102| 2015-07-09T14:14:46.191-0400 I INDEX [repl writer worker 10] build index on: db52.reindex_9 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_9" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.192-0400 m31102| 2015-07-09T14:14:46.192-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.194-0400 m31101| 2015-07-09T14:14:46.194-0400 I INDEX [repl writer worker 2] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.200-0400 m31101| 2015-07-09T14:14:46.199-0400 I INDEX [repl writer worker 10] build index on: db52.reindex_9 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_9" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.200-0400 m31101| 2015-07-09T14:14:46.199-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.205-0400 m31100| 2015-07-09T14:14:46.205-0400 I INDEX [conn49] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.209-0400 m31100| 2015-07-09T14:14:46.206-0400 I COMMAND [conn49] command db52.$cmd command: createIndexes { createIndexes: "reindex_5", indexes: [ { key: { integer: 1.0 }, name: "integer_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 1032413 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 553ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.210-0400 m31100| 2015-07-09T14:14:46.208-0400 I COMMAND [conn32] CMD: reIndex db52.reindex_5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.210-0400 m31102| 2015-07-09T14:14:46.209-0400 I INDEX [repl writer worker 10] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.216-0400 m31100| 2015-07-09T14:14:46.215-0400 I INDEX [conn180] build index on: db52.reindex_6 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_6" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.216-0400 m31100| 2015-07-09T14:14:46.215-0400 I INDEX [conn180] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.217-0400 m31101| 2015-07-09T14:14:46.215-0400 I INDEX [repl writer worker 10] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.223-0400 m31102| 2015-07-09T14:14:46.223-0400 I INDEX [repl writer worker 15] build index on: db52.reindex_5 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_5" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.224-0400 m31102| 2015-07-09T14:14:46.223-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.227-0400 m31101| 2015-07-09T14:14:46.227-0400 I INDEX [repl writer worker 11] build index on: db52.reindex_5 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_5" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.227-0400 m31101| 2015-07-09T14:14:46.227-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.232-0400 m31100| 2015-07-09T14:14:46.231-0400 I INDEX [conn180] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.236-0400 m31100| 2015-07-09T14:14:46.234-0400 I COMMAND [conn180] command db52.$cmd command: createIndexes { createIndexes: "reindex_6", indexes: [ { key: { integer: 1.0 }, name: "integer_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 1031691 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 558ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.237-0400 m31100| 2015-07-09T14:14:46.237-0400 I COMMAND [conn15] CMD: reIndex db52.reindex_6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.243-0400 m31102| 2015-07-09T14:14:46.242-0400 I INDEX [repl writer worker 15] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.243-0400 m31101| 2015-07-09T14:14:46.242-0400 I INDEX [repl writer worker 11] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.245-0400 m31100| 2015-07-09T14:14:46.244-0400 I INDEX [conn179] build index on: db52.reindex_10 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_10" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.246-0400 m31100| 2015-07-09T14:14:46.244-0400 I INDEX [conn179] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.249-0400 m31101| 2015-07-09T14:14:46.249-0400 I INDEX [repl writer worker 6] build index on: db52.reindex_6 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_6" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.250-0400 m31101| 2015-07-09T14:14:46.249-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.259-0400 m31102| 2015-07-09T14:14:46.257-0400 I INDEX [repl writer worker 9] build index on: db52.reindex_6 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_6" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.259-0400 m31102| 2015-07-09T14:14:46.257-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.260-0400 m31100| 2015-07-09T14:14:46.257-0400 I INDEX [conn179] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.261-0400 m31100| 2015-07-09T14:14:46.258-0400 I COMMAND [conn179] command db52.$cmd command: createIndexes { createIndexes: "reindex_10", indexes: [ { key: { integer: 1.0 }, name: "integer_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 1032191 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 554ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.261-0400 m31100| 2015-07-09T14:14:46.261-0400 I COMMAND [conn34] CMD: reIndex db52.reindex_10 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.264-0400 m31101| 2015-07-09T14:14:46.263-0400 I INDEX [repl writer worker 6] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.266-0400 m31100| 2015-07-09T14:14:46.265-0400 I INDEX [conn178] build index on: db52.reindex_7 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_7" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.266-0400 m31100| 2015-07-09T14:14:46.266-0400 I INDEX [conn178] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.271-0400 m31101| 2015-07-09T14:14:46.270-0400 I INDEX [repl writer worker 1] build index on: db52.reindex_10 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_10" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.271-0400 m31101| 2015-07-09T14:14:46.270-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.271-0400 m31102| 2015-07-09T14:14:46.270-0400 I INDEX [repl writer worker 9] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.277-0400 m31102| 2015-07-09T14:14:46.276-0400 I INDEX [repl writer worker 6] build index on: db52.reindex_10 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_10" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.277-0400 m31102| 2015-07-09T14:14:46.276-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.289-0400 m31100| 2015-07-09T14:14:46.287-0400 I INDEX [conn178] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.290-0400 m31100| 2015-07-09T14:14:46.288-0400 I COMMAND [conn178] command db52.$cmd command: createIndexes { createIndexes: "reindex_7", indexes: [ { key: { integer: 1.0 }, name: "integer_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 1029933 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 559ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.290-0400 m31101| 2015-07-09T14:14:46.288-0400 I INDEX [repl writer worker 1] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.291-0400 m31100| 2015-07-09T14:14:46.291-0400 I QUERY [conn138] getmore db52.reindex_13 query: { $text: { $search: "ipsum" } } cursorid:3393855222480 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 8 }, timeAcquiringMicros: { r: 4191681 } }, Collection: { acquireCount: { r: 8 } } } 2246ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.295-0400 m31102| 2015-07-09T14:14:46.294-0400 I INDEX [repl writer worker 6] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.302-0400 m31101| 2015-07-09T14:14:46.301-0400 I INDEX [repl writer worker 5] build index on: db52.reindex_7 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_7" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.302-0400 m31101| 2015-07-09T14:14:46.301-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.302-0400 m31100| 2015-07-09T14:14:46.301-0400 I INDEX [conn50] build index on: db52.reindex_11 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_11" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.303-0400 m31100| 2015-07-09T14:14:46.301-0400 I INDEX [conn50] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.303-0400 m31102| 2015-07-09T14:14:46.302-0400 I INDEX [repl writer worker 13] build index on: db52.reindex_7 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_7" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.303-0400 m31102| 2015-07-09T14:14:46.302-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.310-0400 m31101| 2015-07-09T14:14:46.310-0400 I INDEX [repl writer worker 5] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.315-0400 m31100| 2015-07-09T14:14:46.314-0400 I INDEX [conn50] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.316-0400 m31100| 2015-07-09T14:14:46.315-0400 I COMMAND [conn50] command db52.$cmd command: createIndexes { createIndexes: "reindex_11", indexes: [ { key: { integer: 1.0 }, name: "integer_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 1029565 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 552ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.321-0400 m31102| 2015-07-09T14:14:46.320-0400 I INDEX [repl writer worker 13] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.329-0400 m31100| 2015-07-09T14:14:46.329-0400 I INDEX [conn175] build index on: db52.reindex_8 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_8" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.329-0400 m31100| 2015-07-09T14:14:46.329-0400 I INDEX [conn175] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.333-0400 m31101| 2015-07-09T14:14:46.331-0400 I INDEX [repl writer worker 12] build index on: db52.reindex_11 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_11" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.333-0400 m31101| 2015-07-09T14:14:46.331-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.333-0400 m31102| 2015-07-09T14:14:46.332-0400 I INDEX [repl writer worker 4] build index on: db52.reindex_11 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_11" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.333-0400 m31102| 2015-07-09T14:14:46.332-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.340-0400 m31101| 2015-07-09T14:14:46.339-0400 I INDEX [repl writer worker 12] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.341-0400 m31100| 2015-07-09T14:14:46.339-0400 I INDEX [conn175] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.342-0400 m31100| 2015-07-09T14:14:46.341-0400 I COMMAND [conn175] command db52.$cmd command: createIndexes { createIndexes: "reindex_8", indexes: [ { key: { integer: 1.0 }, name: "integer_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 1018969 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 543ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.342-0400 m31100| 2015-07-09T14:14:46.342-0400 I COMMAND [conn37] CMD: reIndex db52.reindex_8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.346-0400 m31102| 2015-07-09T14:14:46.346-0400 I INDEX [repl writer worker 4] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.352-0400 m31101| 2015-07-09T14:14:46.351-0400 I INDEX [repl writer worker 3] build index on: db52.reindex_8 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_8" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.352-0400 m31101| 2015-07-09T14:14:46.351-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.355-0400 m31100| 2015-07-09T14:14:46.354-0400 I INDEX [conn38] build index on: db52.reindex_12 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_12" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.355-0400 m31100| 2015-07-09T14:14:46.354-0400 I INDEX [conn38] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.356-0400 m31102| 2015-07-09T14:14:46.355-0400 I INDEX [repl writer worker 14] build index on: db52.reindex_8 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_8" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.356-0400 m31102| 2015-07-09T14:14:46.355-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.359-0400 m31100| 2015-07-09T14:14:46.359-0400 I INDEX [conn38] build index on: db52.reindex_12 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_12", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.359-0400 m31100| 2015-07-09T14:14:46.359-0400 I INDEX [conn38] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.364-0400 m31101| 2015-07-09T14:14:46.363-0400 I INDEX [repl writer worker 3] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.366-0400 m31100| 2015-07-09T14:14:46.366-0400 I INDEX [conn38] build index on: db52.reindex_12 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_12", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.366-0400 m31100| 2015-07-09T14:14:46.366-0400 I INDEX [conn38] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.369-0400 m31102| 2015-07-09T14:14:46.368-0400 I INDEX [repl writer worker 14] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.373-0400 m31100| 2015-07-09T14:14:46.372-0400 I INDEX [conn38] build index on: db52.reindex_12 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_12" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.373-0400 m31100| 2015-07-09T14:14:46.372-0400 I INDEX [conn38] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.494-0400 m31100| 2015-07-09T14:14:46.494-0400 I INDEX [conn38] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.495-0400 m31100| 2015-07-09T14:14:46.494-0400 I COMMAND [conn38] command db52.reindex_12 command: reIndex { reIndex: "reindex_12" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:592 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 402826 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 556ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.497-0400 m31100| 2015-07-09T14:14:46.496-0400 I COMMAND [conn38] CMD: reIndex db52.reindex_12 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.502-0400 m31100| 2015-07-09T14:14:46.502-0400 I INDEX [conn45] build index on: db52.reindex_14 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_14" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.502-0400 m31100| 2015-07-09T14:14:46.502-0400 I INDEX [conn45] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.514-0400 m31100| 2015-07-09T14:14:46.514-0400 I INDEX [conn45] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.515-0400 m31100| 2015-07-09T14:14:46.515-0400 I COMMAND [conn45] command db52.$cmd command: createIndexes { createIndexes: "reindex_14", indexes: [ { key: { integer: 1.0 }, name: "integer_1" } ] } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:173 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 1031380 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_query 550ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.518-0400 m31100| 2015-07-09T14:14:46.517-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63761 #187 (111 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.521-0400 m31100| 2015-07-09T14:14:46.520-0400 I COMMAND [conn187] CMD: reIndex db52.reindex_14 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.522-0400 m31100| 2015-07-09T14:14:46.520-0400 I INDEX [conn40] build index on: db52.reindex_4 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_4" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.522-0400 m31100| 2015-07-09T14:14:46.520-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.528-0400 m31101| 2015-07-09T14:14:46.527-0400 I INDEX [repl writer worker 7] build index on: db52.reindex_14 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_14" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.528-0400 m31102| 2015-07-09T14:14:46.527-0400 I INDEX [repl writer worker 3] build index on: db52.reindex_14 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_14" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.529-0400 m31101| 2015-07-09T14:14:46.527-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.529-0400 m31100| 2015-07-09T14:14:46.527-0400 I INDEX [conn40] build index on: db52.reindex_4 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_4", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.530-0400 m31102| 2015-07-09T14:14:46.527-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.530-0400 m31100| 2015-07-09T14:14:46.527-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.532-0400 m31100| 2015-07-09T14:14:46.531-0400 I INDEX [conn40] build index on: db52.reindex_4 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_4", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.532-0400 m31100| 2015-07-09T14:14:46.531-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.540-0400 m31100| 2015-07-09T14:14:46.539-0400 I INDEX [conn40] build index on: db52.reindex_4 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_4" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.541-0400 m31100| 2015-07-09T14:14:46.539-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.543-0400 m31102| 2015-07-09T14:14:46.543-0400 I INDEX [repl writer worker 3] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.547-0400 m31101| 2015-07-09T14:14:46.547-0400 I INDEX [repl writer worker 7] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.649-0400 m31100| 2015-07-09T14:14:46.649-0400 I INDEX [conn40] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.650-0400 m31100| 2015-07-09T14:14:46.649-0400 I COMMAND [conn40] command db52.reindex_4 command: reIndex { reIndex: "reindex_4" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:588 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 365055 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 499ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.651-0400 m31100| 2015-07-09T14:14:46.651-0400 I COMMAND [conn40] CMD: reIndex db52.reindex_4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.657-0400 m31100| 2015-07-09T14:14:46.657-0400 I INDEX [conn132] build index on: db52.reindex_1 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_1" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.658-0400 m31100| 2015-07-09T14:14:46.657-0400 I INDEX [conn132] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.665-0400 m31100| 2015-07-09T14:14:46.664-0400 I INDEX [conn132] build index on: db52.reindex_1 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_1", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.666-0400 m31100| 2015-07-09T14:14:46.664-0400 I INDEX [conn132] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.672-0400 m31100| 2015-07-09T14:14:46.671-0400 I INDEX [conn132] build index on: db52.reindex_1 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_1", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.672-0400 m31100| 2015-07-09T14:14:46.671-0400 I INDEX [conn132] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.677-0400 m31100| 2015-07-09T14:14:46.677-0400 I INDEX [conn132] build index on: db52.reindex_1 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_1" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.677-0400 m31100| 2015-07-09T14:14:46.677-0400 I INDEX [conn132] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.799-0400 m31100| 2015-07-09T14:14:46.798-0400 I INDEX [conn132] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.801-0400 m31100| 2015-07-09T14:14:46.801-0400 I COMMAND [conn132] command db52.reindex_1 command: reIndex { reIndex: "reindex_1" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:588 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 481741 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 633ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.807-0400 m31100| 2015-07-09T14:14:46.807-0400 I INDEX [conn35] build index on: db52.reindex_9 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_9" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.808-0400 m31100| 2015-07-09T14:14:46.807-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.814-0400 m31100| 2015-07-09T14:14:46.813-0400 I INDEX [conn35] build index on: db52.reindex_9 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_9", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.814-0400 m31100| 2015-07-09T14:14:46.813-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.822-0400 m31100| 2015-07-09T14:14:46.821-0400 I INDEX [conn35] build index on: db52.reindex_9 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_9", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.822-0400 m31100| 2015-07-09T14:14:46.822-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.828-0400 m31100| 2015-07-09T14:14:46.828-0400 I INDEX [conn35] build index on: db52.reindex_9 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_9" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.828-0400 m31100| 2015-07-09T14:14:46.828-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.935-0400 m31100| 2015-07-09T14:14:46.935-0400 I INDEX [conn35] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.937-0400 m31100| 2015-07-09T14:14:46.936-0400 I COMMAND [conn35] command db52.reindex_9 command: reIndex { reIndex: "reindex_9" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:588 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 1116181 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 750ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.948-0400 m31100| 2015-07-09T14:14:46.947-0400 I INDEX [conn32] build index on: db52.reindex_5 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_5" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.948-0400 m31100| 2015-07-09T14:14:46.947-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.957-0400 m31100| 2015-07-09T14:14:46.955-0400 I INDEX [conn32] build index on: db52.reindex_5 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_5", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.957-0400 m31100| 2015-07-09T14:14:46.955-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.963-0400 m31100| 2015-07-09T14:14:46.962-0400 I INDEX [conn32] build index on: db52.reindex_5 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_5", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.963-0400 m31100| 2015-07-09T14:14:46.962-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.972-0400 m31100| 2015-07-09T14:14:46.972-0400 I INDEX [conn32] build index on: db52.reindex_5 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_5" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:46.973-0400 m31100| 2015-07-09T14:14:46.972-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.123-0400 m31100| 2015-07-09T14:14:47.123-0400 I INDEX [conn32] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.125-0400 m31100| 2015-07-09T14:14:47.124-0400 I COMMAND [conn32] command db52.reindex_5 command: reIndex { reIndex: "reindex_5" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:588 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 1228446 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 915ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.125-0400 m31100| 2015-07-09T14:14:47.125-0400 I COMMAND [conn32] CMD: reIndex db52.reindex_5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.130-0400 m31100| 2015-07-09T14:14:47.129-0400 I INDEX [conn15] build index on: db52.reindex_6 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_6" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.130-0400 m31100| 2015-07-09T14:14:47.129-0400 I INDEX [conn15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.137-0400 m31100| 2015-07-09T14:14:47.137-0400 I INDEX [conn15] build index on: db52.reindex_6 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_6", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.137-0400 m31100| 2015-07-09T14:14:47.137-0400 I INDEX [conn15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.144-0400 m31100| 2015-07-09T14:14:47.143-0400 I INDEX [conn15] build index on: db52.reindex_6 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_6", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.144-0400 m31100| 2015-07-09T14:14:47.143-0400 I INDEX [conn15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.148-0400 m31100| 2015-07-09T14:14:47.148-0400 I INDEX [conn15] build index on: db52.reindex_6 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_6" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.149-0400 m31100| 2015-07-09T14:14:47.148-0400 I INDEX [conn15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.253-0400 m31100| 2015-07-09T14:14:47.253-0400 I INDEX [conn15] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.254-0400 m31100| 2015-07-09T14:14:47.254-0400 I COMMAND [conn15] command db52.reindex_6 command: reIndex { reIndex: "reindex_6" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:588 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 1388248 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 1016ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.255-0400 m31100| 2015-07-09T14:14:47.255-0400 I COMMAND [conn15] CMD: reIndex db52.reindex_6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.261-0400 m31100| 2015-07-09T14:14:47.260-0400 I INDEX [conn34] build index on: db52.reindex_10 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_10" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.261-0400 m31100| 2015-07-09T14:14:47.260-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.266-0400 m31100| 2015-07-09T14:14:47.265-0400 I INDEX [conn34] build index on: db52.reindex_10 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_10", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.267-0400 m31100| 2015-07-09T14:14:47.265-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.273-0400 m31100| 2015-07-09T14:14:47.273-0400 I INDEX [conn34] build index on: db52.reindex_10 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_10", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.273-0400 m31100| 2015-07-09T14:14:47.273-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.278-0400 m31100| 2015-07-09T14:14:47.277-0400 I INDEX [conn34] build index on: db52.reindex_10 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_10" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.278-0400 m31100| 2015-07-09T14:14:47.278-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.413-0400 m31100| 2015-07-09T14:14:47.413-0400 I INDEX [conn34] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.414-0400 m31100| 2015-07-09T14:14:47.414-0400 I COMMAND [conn34] command db52.reindex_10 command: reIndex { reIndex: "reindex_10" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:592 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 1492940 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 1152ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.416-0400 m31100| 2015-07-09T14:14:47.415-0400 I COMMAND [conn49] command db52.reindex_13 command: listIndexes { listIndexes: "reindex_13" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:643 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 2618024 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 1115ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.417-0400 m31100| 2015-07-09T14:14:47.416-0400 I COMMAND [conn34] CMD: reIndex db52.reindex_10 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.417-0400 m31100| 2015-07-09T14:14:47.417-0400 I COMMAND [conn35] CMD: reIndex db52.reindex_13 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.427-0400 m31100| 2015-07-09T14:14:47.426-0400 I INDEX [conn37] build index on: db52.reindex_8 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_8" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.427-0400 m31100| 2015-07-09T14:14:47.426-0400 I INDEX [conn37] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.431-0400 m31100| 2015-07-09T14:14:47.431-0400 I INDEX [conn37] build index on: db52.reindex_8 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_8", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.432-0400 m31100| 2015-07-09T14:14:47.431-0400 I INDEX [conn37] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.435-0400 m31100| 2015-07-09T14:14:47.435-0400 I INDEX [conn37] build index on: db52.reindex_8 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_8", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.436-0400 m31100| 2015-07-09T14:14:47.435-0400 I INDEX [conn37] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.441-0400 m31100| 2015-07-09T14:14:47.441-0400 I INDEX [conn37] build index on: db52.reindex_8 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_8" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.442-0400 m31100| 2015-07-09T14:14:47.441-0400 I INDEX [conn37] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.558-0400 m31100| 2015-07-09T14:14:47.557-0400 I INDEX [conn37] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.559-0400 m31100| 2015-07-09T14:14:47.558-0400 I COMMAND [conn37] command db52.reindex_8 command: reIndex { reIndex: "reindex_8" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:588 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 2578370 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 1215ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.559-0400 m31100| 2015-07-09T14:14:47.559-0400 I COMMAND [conn37] CMD: reIndex db52.reindex_8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.565-0400 m31100| 2015-07-09T14:14:47.564-0400 I INDEX [conn38] build index on: db52.reindex_12 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_12" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.565-0400 m31100| 2015-07-09T14:14:47.564-0400 I INDEX [conn38] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.572-0400 m31100| 2015-07-09T14:14:47.571-0400 I INDEX [conn38] build index on: db52.reindex_12 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_12", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.572-0400 m31100| 2015-07-09T14:14:47.572-0400 I INDEX [conn38] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.577-0400 m31100| 2015-07-09T14:14:47.577-0400 I INDEX [conn38] build index on: db52.reindex_12 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_12", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.578-0400 m31100| 2015-07-09T14:14:47.577-0400 I INDEX [conn38] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.581-0400 m31100| 2015-07-09T14:14:47.581-0400 I INDEX [conn38] build index on: db52.reindex_12 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_12" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.581-0400 m31100| 2015-07-09T14:14:47.581-0400 I INDEX [conn38] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.692-0400 m31100| 2015-07-09T14:14:47.692-0400 I INDEX [conn38] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.693-0400 m31100| 2015-07-09T14:14:47.693-0400 I COMMAND [conn38] command db52.reindex_12 command: reIndex { reIndex: "reindex_12" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:592 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 2565397 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 1196ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.700-0400 m31100| 2015-07-09T14:14:47.699-0400 I INDEX [conn187] build index on: db52.reindex_14 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_14" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.700-0400 m31100| 2015-07-09T14:14:47.699-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.704-0400 m31100| 2015-07-09T14:14:47.704-0400 I INDEX [conn187] build index on: db52.reindex_14 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_14", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.705-0400 m31100| 2015-07-09T14:14:47.704-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.712-0400 m31100| 2015-07-09T14:14:47.711-0400 I INDEX [conn187] build index on: db52.reindex_14 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_14", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.712-0400 m31100| 2015-07-09T14:14:47.711-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.717-0400 m31100| 2015-07-09T14:14:47.717-0400 I INDEX [conn187] build index on: db52.reindex_14 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_14" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.717-0400 m31100| 2015-07-09T14:14:47.717-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.835-0400 m31100| 2015-07-09T14:14:47.835-0400 I INDEX [conn187] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.836-0400 m31100| 2015-07-09T14:14:47.836-0400 I COMMAND [conn187] command db52.reindex_14 command: reIndex { reIndex: "reindex_14" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:592 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 2674421 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 1315ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.837-0400 m31100| 2015-07-09T14:14:47.837-0400 I COMMAND [conn187] CMD: reIndex db52.reindex_14 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.845-0400 m31100| 2015-07-09T14:14:47.844-0400 I INDEX [conn40] build index on: db52.reindex_4 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_4" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.845-0400 m31100| 2015-07-09T14:14:47.844-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.851-0400 m31100| 2015-07-09T14:14:47.850-0400 I INDEX [conn40] build index on: db52.reindex_4 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_4", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.851-0400 m31100| 2015-07-09T14:14:47.851-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.856-0400 m31100| 2015-07-09T14:14:47.856-0400 I INDEX [conn40] build index on: db52.reindex_4 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_4", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.857-0400 m31100| 2015-07-09T14:14:47.856-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.862-0400 m31100| 2015-07-09T14:14:47.862-0400 I INDEX [conn40] build index on: db52.reindex_4 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_4" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:47.863-0400 m31100| 2015-07-09T14:14:47.862-0400 I INDEX [conn40] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.007-0400 m31100| 2015-07-09T14:14:48.006-0400 I INDEX [conn40] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.008-0400 m31100| 2015-07-09T14:14:48.007-0400 I COMMAND [conn40] command db52.reindex_4 command: reIndex { reIndex: "reindex_4" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:588 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 2685937 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 1356ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.015-0400 m31100| 2015-07-09T14:14:48.015-0400 I INDEX [conn32] build index on: db52.reindex_5 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_5" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.016-0400 m31100| 2015-07-09T14:14:48.015-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.020-0400 m31100| 2015-07-09T14:14:48.020-0400 I INDEX [conn32] build index on: db52.reindex_5 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_5", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.020-0400 m31100| 2015-07-09T14:14:48.020-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.027-0400 m31100| 2015-07-09T14:14:48.026-0400 I INDEX [conn32] build index on: db52.reindex_5 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_5", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.027-0400 m31100| 2015-07-09T14:14:48.026-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.032-0400 m31100| 2015-07-09T14:14:48.031-0400 I INDEX [conn32] build index on: db52.reindex_5 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_5" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.032-0400 m31100| 2015-07-09T14:14:48.032-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.173-0400 m31100| 2015-07-09T14:14:48.173-0400 I INDEX [conn32] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.174-0400 m31100| 2015-07-09T14:14:48.173-0400 I COMMAND [conn32] command db52.reindex_5 command: reIndex { reIndex: "reindex_5" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:588 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 1382964 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 1048ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.180-0400 m31100| 2015-07-09T14:14:48.180-0400 I INDEX [conn15] build index on: db52.reindex_6 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_6" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.181-0400 m31100| 2015-07-09T14:14:48.180-0400 I INDEX [conn15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.187-0400 m31100| 2015-07-09T14:14:48.186-0400 I INDEX [conn15] build index on: db52.reindex_6 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_6", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.187-0400 m31100| 2015-07-09T14:14:48.186-0400 I INDEX [conn15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.191-0400 m31100| 2015-07-09T14:14:48.190-0400 I INDEX [conn15] build index on: db52.reindex_6 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_6", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.191-0400 m31100| 2015-07-09T14:14:48.190-0400 I INDEX [conn15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.198-0400 m31100| 2015-07-09T14:14:48.197-0400 I INDEX [conn15] build index on: db52.reindex_6 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_6" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.198-0400 m31100| 2015-07-09T14:14:48.198-0400 I INDEX [conn15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.321-0400 m31100| 2015-07-09T14:14:48.321-0400 I INDEX [conn15] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.325-0400 m31100| 2015-07-09T14:14:48.325-0400 I COMMAND [conn15] command db52.reindex_6 command: reIndex { reIndex: "reindex_6" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:588 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 1419356 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 1069ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.327-0400 m31100| 2015-07-09T14:14:48.326-0400 I COMMAND [conn15] CMD: reIndex db52.reindex_6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.329-0400 m31100| 2015-07-09T14:14:48.329-0400 I QUERY [conn56] query db52.reindex_0 query: { $text: { $search: "ipsum" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 } cursorid:3416179584894 ntoreturn:0 ntoskip:0 nscanned:1000 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:101 reslen:24159 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 9 }, timeAcquiringMicros: { r: 8241926 } }, Collection: { acquireCount: { r: 9 } } } 4246ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.335-0400 m31100| 2015-07-09T14:14:48.335-0400 I INDEX [conn34] build index on: db52.reindex_10 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_10" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.335-0400 m31100| 2015-07-09T14:14:48.335-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.341-0400 m31100| 2015-07-09T14:14:48.341-0400 I INDEX [conn34] build index on: db52.reindex_10 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_10", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.341-0400 m31100| 2015-07-09T14:14:48.341-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.346-0400 m31100| 2015-07-09T14:14:48.346-0400 I INDEX [conn34] build index on: db52.reindex_10 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_10", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.346-0400 m31100| 2015-07-09T14:14:48.346-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.350-0400 m31100| 2015-07-09T14:14:48.350-0400 I INDEX [conn34] build index on: db52.reindex_10 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_10" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.350-0400 m31100| 2015-07-09T14:14:48.350-0400 I INDEX [conn34] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.456-0400 m31100| 2015-07-09T14:14:48.455-0400 I INDEX [conn34] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.457-0400 m31100| 2015-07-09T14:14:48.456-0400 I COMMAND [conn34] command db52.reindex_10 command: reIndex { reIndex: "reindex_10" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:592 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 1413378 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 1039ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.469-0400 m31100| 2015-07-09T14:14:48.468-0400 I INDEX [conn35] build index on: db52.reindex_13 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_13" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.469-0400 m31100| 2015-07-09T14:14:48.468-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.475-0400 m31100| 2015-07-09T14:14:48.475-0400 I INDEX [conn35] build index on: db52.reindex_13 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_13", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.476-0400 m31100| 2015-07-09T14:14:48.475-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.482-0400 m31100| 2015-07-09T14:14:48.481-0400 I INDEX [conn35] build index on: db52.reindex_13 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_13", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.482-0400 m31100| 2015-07-09T14:14:48.481-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.487-0400 m31100| 2015-07-09T14:14:48.486-0400 I INDEX [conn35] build index on: db52.reindex_13 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_13" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.487-0400 m31100| 2015-07-09T14:14:48.486-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.640-0400 m31100| 2015-07-09T14:14:48.639-0400 I INDEX [conn35] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.641-0400 m31100| 2015-07-09T14:14:48.640-0400 I COMMAND [conn35] command db52.reindex_13 command: reIndex { reIndex: "reindex_13" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:592 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 2541971 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 1223ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.642-0400 m31100| 2015-07-09T14:14:48.642-0400 I COMMAND [conn35] CMD: reIndex db52.reindex_13 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.647-0400 m31100| 2015-07-09T14:14:48.646-0400 I INDEX [conn37] build index on: db52.reindex_8 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_8" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.647-0400 m31100| 2015-07-09T14:14:48.647-0400 I INDEX [conn37] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.651-0400 m31100| 2015-07-09T14:14:48.651-0400 I INDEX [conn37] build index on: db52.reindex_8 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_8", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.651-0400 m31100| 2015-07-09T14:14:48.651-0400 I INDEX [conn37] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.655-0400 m31100| 2015-07-09T14:14:48.654-0400 I INDEX [conn37] build index on: db52.reindex_8 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_8", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.655-0400 m31100| 2015-07-09T14:14:48.655-0400 I INDEX [conn37] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.662-0400 m31100| 2015-07-09T14:14:48.661-0400 I INDEX [conn37] build index on: db52.reindex_8 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_8" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.662-0400 m31100| 2015-07-09T14:14:48.661-0400 I INDEX [conn37] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.802-0400 m31100| 2015-07-09T14:14:48.802-0400 I INDEX [conn37] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.803-0400 m31100| 2015-07-09T14:14:48.802-0400 I COMMAND [conn37] command db52.reindex_8 command: reIndex { reIndex: "reindex_8" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:588 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 2583625 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 1242ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.804-0400 m31100| 2015-07-09T14:14:48.804-0400 I COMMAND [conn37] CMD: reIndex db52.reindex_8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.811-0400 m31100| 2015-07-09T14:14:48.811-0400 I INDEX [conn187] build index on: db52.reindex_14 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_14" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.812-0400 m31100| 2015-07-09T14:14:48.811-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.817-0400 m31100| 2015-07-09T14:14:48.815-0400 I INDEX [conn187] build index on: db52.reindex_14 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_14", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.817-0400 m31100| 2015-07-09T14:14:48.816-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.823-0400 m31100| 2015-07-09T14:14:48.823-0400 I INDEX [conn187] build index on: db52.reindex_14 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_14", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.823-0400 m31100| 2015-07-09T14:14:48.823-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.828-0400 m31100| 2015-07-09T14:14:48.827-0400 I INDEX [conn187] build index on: db52.reindex_14 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_14" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.828-0400 m31100| 2015-07-09T14:14:48.827-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.937-0400 m31100| 2015-07-09T14:14:48.937-0400 I INDEX [conn187] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.944-0400 m31100| 2015-07-09T14:14:48.938-0400 I COMMAND [conn187] command db52.reindex_14 command: reIndex { reIndex: "reindex_14" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:592 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 1465994 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 1100ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.944-0400 m31100| 2015-07-09T14:14:48.943-0400 I COMMAND [conn187] CMD: reIndex db52.reindex_14 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.954-0400 m31100| 2015-07-09T14:14:48.954-0400 I INDEX [conn15] build index on: db52.reindex_6 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_6" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.955-0400 m31100| 2015-07-09T14:14:48.954-0400 I INDEX [conn15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.958-0400 m31100| 2015-07-09T14:14:48.957-0400 I INDEX [conn15] build index on: db52.reindex_6 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_6", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.958-0400 m31100| 2015-07-09T14:14:48.957-0400 I INDEX [conn15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.963-0400 m31100| 2015-07-09T14:14:48.962-0400 I INDEX [conn15] build index on: db52.reindex_6 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_6", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.963-0400 m31100| 2015-07-09T14:14:48.962-0400 I INDEX [conn15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.967-0400 m31100| 2015-07-09T14:14:48.967-0400 I INDEX [conn15] build index on: db52.reindex_6 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_6" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:48.967-0400 m31100| 2015-07-09T14:14:48.967-0400 I INDEX [conn15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.081-0400 m31100| 2015-07-09T14:14:49.080-0400 I INDEX [conn15] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.082-0400 m31100| 2015-07-09T14:14:49.081-0400 I COMMAND [conn15] command db52.reindex_6 command: reIndex { reIndex: "reindex_6" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:588 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 1121247 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 754ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.082-0400 m31100| 2015-07-09T14:14:49.082-0400 I COMMAND [conn15] CMD: reIndex db52.reindex_6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.088-0400 m31100| 2015-07-09T14:14:49.087-0400 I INDEX [conn35] build index on: db52.reindex_13 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_13" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.088-0400 m31100| 2015-07-09T14:14:49.087-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.094-0400 m31100| 2015-07-09T14:14:49.093-0400 I INDEX [conn35] build index on: db52.reindex_13 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_13", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.094-0400 m31100| 2015-07-09T14:14:49.093-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.097-0400 m31100| 2015-07-09T14:14:49.097-0400 I INDEX [conn35] build index on: db52.reindex_13 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_13", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.098-0400 m31100| 2015-07-09T14:14:49.097-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.104-0400 m31100| 2015-07-09T14:14:49.104-0400 I INDEX [conn35] build index on: db52.reindex_13 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_13" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.105-0400 m31100| 2015-07-09T14:14:49.104-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.221-0400 m31100| 2015-07-09T14:14:49.221-0400 I INDEX [conn35] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.222-0400 m31100| 2015-07-09T14:14:49.221-0400 I COMMAND [conn35] command db52.reindex_13 command: reIndex { reIndex: "reindex_13" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:592 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 438672 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 579ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.227-0400 m31100| 2015-07-09T14:14:49.227-0400 I INDEX [conn37] build index on: db52.reindex_8 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_8" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.228-0400 m31100| 2015-07-09T14:14:49.227-0400 I INDEX [conn37] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.233-0400 m31100| 2015-07-09T14:14:49.232-0400 I INDEX [conn37] build index on: db52.reindex_8 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_8", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.234-0400 m31100| 2015-07-09T14:14:49.232-0400 I INDEX [conn37] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.240-0400 m31100| 2015-07-09T14:14:49.240-0400 I INDEX [conn37] build index on: db52.reindex_8 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_8", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.240-0400 m31100| 2015-07-09T14:14:49.240-0400 I INDEX [conn37] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.245-0400 m31100| 2015-07-09T14:14:49.245-0400 I INDEX [conn37] build index on: db52.reindex_8 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_8" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.245-0400 m31100| 2015-07-09T14:14:49.245-0400 I INDEX [conn37] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.356-0400 m31100| 2015-07-09T14:14:49.355-0400 I INDEX [conn37] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.363-0400 m31100| 2015-07-09T14:14:49.356-0400 I COMMAND [conn37] command db52.reindex_8 command: reIndex { reIndex: "reindex_8" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:588 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 417482 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 552ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.371-0400 m31100| 2015-07-09T14:14:49.370-0400 I INDEX [conn187] build index on: db52.reindex_14 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_14" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.371-0400 m31100| 2015-07-09T14:14:49.370-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.376-0400 m31100| 2015-07-09T14:14:49.375-0400 I INDEX [conn187] build index on: db52.reindex_14 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_14", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.376-0400 m31100| 2015-07-09T14:14:49.375-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.382-0400 m31100| 2015-07-09T14:14:49.381-0400 I INDEX [conn187] build index on: db52.reindex_14 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_14", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.383-0400 m31100| 2015-07-09T14:14:49.381-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.387-0400 m31100| 2015-07-09T14:14:49.386-0400 I INDEX [conn187] build index on: db52.reindex_14 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_14" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.387-0400 m31100| 2015-07-09T14:14:49.386-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.495-0400 m31100| 2015-07-09T14:14:49.495-0400 I INDEX [conn187] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.497-0400 m31100| 2015-07-09T14:14:49.495-0400 I COMMAND [conn187] command db52.reindex_14 command: reIndex { reIndex: "reindex_14" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:592 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 421205 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 552ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.497-0400 m31100| 2015-07-09T14:14:49.496-0400 I COMMAND [conn187] CMD: reIndex db52.reindex_14 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.503-0400 m31100| 2015-07-09T14:14:49.502-0400 I INDEX [conn15] build index on: db52.reindex_6 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_6" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.503-0400 m31100| 2015-07-09T14:14:49.502-0400 I INDEX [conn15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.507-0400 m31100| 2015-07-09T14:14:49.507-0400 I INDEX [conn15] build index on: db52.reindex_6 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_6", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.508-0400 m31100| 2015-07-09T14:14:49.507-0400 I INDEX [conn15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.512-0400 m31100| 2015-07-09T14:14:49.511-0400 I INDEX [conn15] build index on: db52.reindex_6 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_6", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.513-0400 m31100| 2015-07-09T14:14:49.511-0400 I INDEX [conn15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.517-0400 m31100| 2015-07-09T14:14:49.517-0400 I INDEX [conn15] build index on: db52.reindex_6 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_6" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.517-0400 m31100| 2015-07-09T14:14:49.517-0400 I INDEX [conn15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.626-0400 m31100| 2015-07-09T14:14:49.625-0400 I INDEX [conn15] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.631-0400 m31100| 2015-07-09T14:14:49.630-0400 I COMMAND [conn15] command db52.reindex_6 command: reIndex { reIndex: "reindex_6" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:588 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 413378 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 548ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.631-0400 m31100| 2015-07-09T14:14:49.631-0400 I COMMAND [conn15] CMD: reIndex db52.reindex_6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.646-0400 m31100| 2015-07-09T14:14:49.645-0400 I INDEX [conn187] build index on: db52.reindex_14 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_14" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.646-0400 m31100| 2015-07-09T14:14:49.646-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.651-0400 m31100| 2015-07-09T14:14:49.651-0400 I INDEX [conn187] build index on: db52.reindex_14 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_14", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.652-0400 m31100| 2015-07-09T14:14:49.651-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.656-0400 m31100| 2015-07-09T14:14:49.656-0400 I INDEX [conn187] build index on: db52.reindex_14 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_14", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.657-0400 m31100| 2015-07-09T14:14:49.656-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.661-0400 m31100| 2015-07-09T14:14:49.661-0400 I INDEX [conn187] build index on: db52.reindex_14 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_14" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.662-0400 m31100| 2015-07-09T14:14:49.661-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.772-0400 m31100| 2015-07-09T14:14:49.772-0400 I INDEX [conn187] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.774-0400 m31100| 2015-07-09T14:14:49.772-0400 I COMMAND [conn187] command db52.reindex_14 command: reIndex { reIndex: "reindex_14" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:592 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 142165 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 275ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.775-0400 m31100| 2015-07-09T14:14:49.774-0400 I COMMAND [conn187] CMD: reIndex db52.reindex_14 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.787-0400 m31100| 2015-07-09T14:14:49.786-0400 I INDEX [conn15] build index on: db52.reindex_6 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_6" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.787-0400 m31100| 2015-07-09T14:14:49.786-0400 I INDEX [conn15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.793-0400 m31100| 2015-07-09T14:14:49.791-0400 I INDEX [conn15] build index on: db52.reindex_6 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_6", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.793-0400 m31100| 2015-07-09T14:14:49.791-0400 I INDEX [conn15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.797-0400 m31100| 2015-07-09T14:14:49.796-0400 I INDEX [conn15] build index on: db52.reindex_6 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_6", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.797-0400 m31100| 2015-07-09T14:14:49.796-0400 I INDEX [conn15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.804-0400 m31100| 2015-07-09T14:14:49.804-0400 I INDEX [conn15] build index on: db52.reindex_6 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_6" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.804-0400 m31100| 2015-07-09T14:14:49.804-0400 I INDEX [conn15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.914-0400 m31100| 2015-07-09T14:14:49.914-0400 I INDEX [conn15] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.919-0400 m31100| 2015-07-09T14:14:49.914-0400 I COMMAND [conn15] command db52.reindex_6 command: reIndex { reIndex: "reindex_6" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:588 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 150470 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 283ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.931-0400 m31100| 2015-07-09T14:14:49.931-0400 I INDEX [conn187] build index on: db52.reindex_14 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_14" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.931-0400 m31100| 2015-07-09T14:14:49.931-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.936-0400 m31100| 2015-07-09T14:14:49.935-0400 I INDEX [conn187] build index on: db52.reindex_14 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_14", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.936-0400 m31100| 2015-07-09T14:14:49.935-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.939-0400 m31100| 2015-07-09T14:14:49.939-0400 I INDEX [conn187] build index on: db52.reindex_14 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_14", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.940-0400 m31100| 2015-07-09T14:14:49.939-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.943-0400 m31100| 2015-07-09T14:14:49.943-0400 I INDEX [conn187] build index on: db52.reindex_14 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_14" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:49.943-0400 m31100| 2015-07-09T14:14:49.943-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.072-0400 m31100| 2015-07-09T14:14:50.071-0400 I INDEX [conn187] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.073-0400 m31100| 2015-07-09T14:14:50.072-0400 I COMMAND [conn187] command db52.reindex_14 command: reIndex { reIndex: "reindex_14" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:592 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 149993 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 297ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.075-0400 m31100| 2015-07-09T14:14:50.074-0400 I COMMAND [conn187] CMD: reIndex db52.reindex_14 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.087-0400 m31100| 2015-07-09T14:14:50.087-0400 I INDEX [conn187] build index on: db52.reindex_14 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_14" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.087-0400 m31100| 2015-07-09T14:14:50.087-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.092-0400 m31100| 2015-07-09T14:14:50.091-0400 I INDEX [conn187] build index on: db52.reindex_14 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_14", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.092-0400 m31100| 2015-07-09T14:14:50.091-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.095-0400 m31100| 2015-07-09T14:14:50.095-0400 I INDEX [conn187] build index on: db52.reindex_14 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_14", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.096-0400 m31100| 2015-07-09T14:14:50.095-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.101-0400 m31100| 2015-07-09T14:14:50.100-0400 I INDEX [conn187] build index on: db52.reindex_14 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_14" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.101-0400 m31100| 2015-07-09T14:14:50.100-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.224-0400 m31100| 2015-07-09T14:14:50.223-0400 I INDEX [conn187] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.231-0400 m31100| 2015-07-09T14:14:50.224-0400 I COMMAND [conn187] command db52.reindex_14 command: reIndex { reIndex: "reindex_14" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:592 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 6303 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 149ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.232-0400 m31100| 2015-07-09T14:14:50.227-0400 I QUERY [conn150] getmore db52.reindex_2 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:3428520653200 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 8 }, timeAcquiringMicros: { r: 3776338 } }, Collection: { acquireCount: { r: 8 } } } 1905ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.233-0400 m31100| 2015-07-09T14:14:50.231-0400 I QUERY [conn138] getmore db52.reindex_3 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:3410978603518 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 8 }, timeAcquiringMicros: { r: 3776860 } }, Collection: { acquireCount: { r: 8 } } } 1908ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.235-0400 m31100| 2015-07-09T14:14:50.233-0400 I QUERY [conn148] getmore db52.reindex_1 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:3401789402493 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 7 }, timeAcquiringMicros: { r: 2375413 } }, Collection: { acquireCount: { r: 8 } } } 1293ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.236-0400 m31100| 2015-07-09T14:14:50.233-0400 I QUERY [conn142] getmore db52.reindex_12 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:3423687624709 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 6 }, timeAcquiringMicros: { r: 1257673 } }, Collection: { acquireCount: { r: 8 } } } 873ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.241-0400 m31100| 2015-07-09T14:14:50.234-0400 I QUERY [conn140] getmore db52.reindex_7 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:3432435102977 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 7 }, timeAcquiringMicros: { r: 2375496 } }, Collection: { acquireCount: { r: 8 } } } 1294ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.241-0400 m31100| 2015-07-09T14:14:50.236-0400 I QUERY [conn139] getmore db52.reindex_5 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:3445721942583 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 6 }, timeAcquiringMicros: { r: 1256222 } }, Collection: { acquireCount: { r: 8 } } } 878ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.242-0400 m31100| 2015-07-09T14:14:50.240-0400 I QUERY [conn141] getmore db52.reindex_10 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:3407706139020 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 5 }, timeAcquiringMicros: { r: 842137 } }, Collection: { acquireCount: { r: 8 } } } 610ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.246-0400 m31100| 2015-07-09T14:14:50.243-0400 I QUERY [conn42] getmore db52.reindex_11 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:3449748951637 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 7 }, timeAcquiringMicros: { r: 2374894 } }, Collection: { acquireCount: { r: 8 } } } 1305ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.246-0400 m31100| 2015-07-09T14:14:50.244-0400 I QUERY [conn149] getmore db52.reindex_8 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:3420662105742 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 3 }, timeAcquiringMicros: { r: 436122 } }, Collection: { acquireCount: { r: 8 } } } 325ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.250-0400 m31100| 2015-07-09T14:14:50.244-0400 I QUERY [conn44] getmore db52.reindex_4 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:3397739051626 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 6 }, timeAcquiringMicros: { r: 1257788 } }, Collection: { acquireCount: { r: 8 } } } 882ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.257-0400 m31100| 2015-07-09T14:14:50.251-0400 I COMMAND [conn187] CMD: reIndex db52.reindex_14 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.257-0400 m31100| 2015-07-09T14:14:50.251-0400 I QUERY [conn137] getmore db52.reindex_13 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:3394111121607 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 4 }, timeAcquiringMicros: { r: 578385 } }, Collection: { acquireCount: { r: 8 } } } 478ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.257-0400 m31100| 2015-07-09T14:14:50.251-0400 I QUERY [conn134] getmore db52.reindex_9 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:3436433026364 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 7 }, timeAcquiringMicros: { r: 2379672 } }, Collection: { acquireCount: { r: 8 } } } 1313ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.260-0400 m31100| 2015-07-09T14:14:50.251-0400 I QUERY [conn86] getmore db52.reindex_0 query: { $text: { $search: "ipsum" } } cursorid:3416179584894 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 7 }, timeAcquiringMicros: { r: 2352092 } }, Collection: { acquireCount: { r: 8 } } } 1312ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.273-0400 m31100| 2015-07-09T14:14:50.273-0400 I INDEX [conn187] build index on: db52.reindex_14 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_14" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.273-0400 m31100| 2015-07-09T14:14:50.273-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.278-0400 m31100| 2015-07-09T14:14:50.278-0400 I INDEX [conn187] build index on: db52.reindex_14 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_14", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.278-0400 m31100| 2015-07-09T14:14:50.278-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.282-0400 m31100| 2015-07-09T14:14:50.282-0400 I INDEX [conn187] build index on: db52.reindex_14 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_14", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.283-0400 m31100| 2015-07-09T14:14:50.282-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.287-0400 m31100| 2015-07-09T14:14:50.287-0400 I INDEX [conn187] build index on: db52.reindex_14 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_14" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.287-0400 m31100| 2015-07-09T14:14:50.287-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.394-0400 m31100| 2015-07-09T14:14:50.394-0400 I INDEX [conn187] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.408-0400 m31100| 2015-07-09T14:14:50.395-0400 I COMMAND [conn187] command db52.reindex_14 command: reIndex { reIndex: "reindex_14" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:592 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 12787 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 144ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.408-0400 m31100| 2015-07-09T14:14:50.396-0400 I COMMAND [conn180] command db52.reindex_0 command: listIndexes { listIndexes: "reindex_0" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:638 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 121431 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 121ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.426-0400 m31100| 2015-07-09T14:14:50.417-0400 I QUERY [conn45] query db52.reindex_10 query: { $text: { $search: "ipsum" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 } cursorid:3407317265527 ntoreturn:0 ntoskip:0 nscanned:1000 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:101 reslen:24159 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 145533 } }, Collection: { acquireCount: { r: 9 } } } 165ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.428-0400 m31100| 2015-07-09T14:14:50.420-0400 I QUERY [conn185] query db52.reindex_3 query: { $text: { $search: "ipsum" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 } cursorid:3410587372707 ntoreturn:0 ntoskip:0 nscanned:1000 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:101 reslen:24159 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 139846 } }, Collection: { acquireCount: { r: 9 } } } 178ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.428-0400 m31100| 2015-07-09T14:14:50.420-0400 I COMMAND [conn187] CMD: reIndex db52.reindex_14 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.439-0400 m31100| 2015-07-09T14:14:50.439-0400 I INDEX [conn187] build index on: db52.reindex_14 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_14" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.440-0400 m31100| 2015-07-09T14:14:50.439-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.448-0400 m31100| 2015-07-09T14:14:50.447-0400 I INDEX [conn187] build index on: db52.reindex_14 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_14", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.448-0400 m31100| 2015-07-09T14:14:50.447-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.453-0400 m31100| 2015-07-09T14:14:50.452-0400 I INDEX [conn187] build index on: db52.reindex_14 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_14", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.453-0400 m31100| 2015-07-09T14:14:50.452-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.457-0400 m31100| 2015-07-09T14:14:50.457-0400 I INDEX [conn187] build index on: db52.reindex_14 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_14" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.458-0400 m31100| 2015-07-09T14:14:50.457-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.590-0400 m31100| 2015-07-09T14:14:50.589-0400 I INDEX [conn187] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.604-0400 m31100| 2015-07-09T14:14:50.603-0400 I COMMAND [conn187] command db52.reindex_14 command: reIndex { reIndex: "reindex_14" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:592 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 12569 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 182ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.613-0400 m31100| 2015-07-09T14:14:50.612-0400 I QUERY [conn176] query db52.reindex_13 query: { $text: { $search: "ipsum" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 } cursorid:3393247837030 ntoreturn:0 ntoskip:0 nscanned:1000 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:9 nreturned:101 reslen:24159 locks:{ Global: { acquireCount: { r: 20 } }, Database: { acquireCount: { r: 10 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 293161 } }, Collection: { acquireCount: { r: 10 } } } 216ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.623-0400 m30999| 2015-07-09T14:14:50.612-0400 I NETWORK [conn331] end connection 127.0.0.1:63751 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.624-0400 m31100| 2015-07-09T14:14:50.616-0400 I QUERY [conn49] query db52.reindex_9 query: { $text: { $search: "ipsum" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 } cursorid:3436572370639 ntoreturn:0 ntoskip:0 nscanned:1000 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:9 nreturned:101 reslen:24159 locks:{ Global: { acquireCount: { r: 20 } }, Database: { acquireCount: { r: 10 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 292141 } }, Collection: { acquireCount: { r: 10 } } } 221ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.624-0400 m31100| 2015-07-09T14:14:50.621-0400 I QUERY [conn179] query db52.reindex_8 query: { $text: { $search: "ipsum" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 } cursorid:3420264842237 ntoreturn:0 ntoskip:0 nscanned:1000 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:101 reslen:24159 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 301325 } }, Collection: { acquireCount: { r: 9 } } } 216ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.660-0400 m31100| 2015-07-09T14:14:50.642-0400 I QUERY [conn50] query db52.reindex_5 query: { $text: { $search: "ipsum" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 } cursorid:3446677309966 ntoreturn:0 ntoskip:0 nscanned:1000 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:101 reslen:24159 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 318055 } }, Collection: { acquireCount: { r: 9 } } } 227ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.736-0400 m31100| 2015-07-09T14:14:50.732-0400 I QUERY [conn44] getmore db52.reindex_12 query: { $text: { $search: "ipsum" } } cursorid:3424937711560 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 168076 } }, Collection: { acquireCount: { r: 8 } } } 311ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.742-0400 m31100| 2015-07-09T14:14:50.741-0400 I QUERY [conn42] getmore db52.reindex_11 query: { $text: { $search: "ipsum" } } cursorid:3449093479532 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 165275 } }, Collection: { acquireCount: { r: 8 } } } 151ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.746-0400 m31100| 2015-07-09T14:14:50.745-0400 I COMMAND [conn187] CMD: reIndex db52.reindex_8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.760-0400 m30999| 2015-07-09T14:14:50.757-0400 I NETWORK [conn332] end connection 127.0.0.1:63754 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.768-0400 m31100| 2015-07-09T14:14:50.767-0400 I QUERY [conn141] getmore db52.reindex_2 query: { $text: { $search: "ipsum" } } cursorid:3429450854768 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 168648 } }, Collection: { acquireCount: { r: 9 } } } 170ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.773-0400 m31100| 2015-07-09T14:14:50.773-0400 I QUERY [conn134] getmore db52.reindex_1 query: { $text: { $search: "ipsum" } } cursorid:3402461389995 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 167938 } }, Collection: { acquireCount: { r: 8 } } } 182ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.783-0400 m31100| 2015-07-09T14:14:50.782-0400 I INDEX [conn187] build index on: db52.reindex_8 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_8" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.783-0400 m31100| 2015-07-09T14:14:50.782-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.789-0400 m31100| 2015-07-09T14:14:50.788-0400 I INDEX [conn187] build index on: db52.reindex_8 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_8", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.789-0400 m31100| 2015-07-09T14:14:50.789-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.794-0400 m31100| 2015-07-09T14:14:50.794-0400 I INDEX [conn187] build index on: db52.reindex_8 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_8", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.794-0400 m31100| 2015-07-09T14:14:50.794-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.798-0400 m31100| 2015-07-09T14:14:50.798-0400 I INDEX [conn187] build index on: db52.reindex_8 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_8" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.799-0400 m31100| 2015-07-09T14:14:50.798-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.933-0400 m31100| 2015-07-09T14:14:50.932-0400 I INDEX [conn187] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.943-0400 m31100| 2015-07-09T14:14:50.943-0400 I COMMAND [conn187] command db52.reindex_8 command: reIndex { reIndex: "reindex_8" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:588 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 9238 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 197ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.947-0400 m31100| 2015-07-09T14:14:50.946-0400 I COMMAND [conn175] command db52.reindex_2 command: listIndexes { listIndexes: "reindex_2" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:638 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 169455 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 173ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.948-0400 m31100| 2015-07-09T14:14:50.947-0400 I COMMAND [conn49] command db52.reindex_1 command: listIndexes { listIndexes: "reindex_1" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:638 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 163630 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 168ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.953-0400 m31100| 2015-07-09T14:14:50.952-0400 I QUERY [conn50] query db52.reindex_11 query: { $text: { $search: "ipsum" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 } cursorid:3449683039706 ntoreturn:0 ntoskip:0 nscanned:1000 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:101 reslen:24159 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 171709 } }, Collection: { acquireCount: { r: 9 } } } 186ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.960-0400 m31100| 2015-07-09T14:14:50.960-0400 I QUERY [conn150] getmore db52.reindex_6 query: { $text: { $search: "ipsum" } } cursorid:3390325743971 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 167201 } }, Collection: { acquireCount: { r: 9 } } } 294ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.977-0400 m31100| 2015-07-09T14:14:50.976-0400 I QUERY [conn139] getmore db52.reindex_3 query: { $text: { $search: "ipsum" } } cursorid:3410587372707 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 333765 } }, Collection: { acquireCount: { r: 8 } } } 386ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.985-0400 m31100| 2015-07-09T14:14:50.985-0400 I COMMAND [conn35] CMD: reIndex db52.reindex_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:50.990-0400 m31100| 2015-07-09T14:14:50.989-0400 I COMMAND [conn187] CMD: reIndex db52.reindex_6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.002-0400 m31100| 2015-07-09T14:14:51.002-0400 I QUERY [conn148] getmore db52.reindex_13 query: { $text: { $search: "ipsum" } } cursorid:3393247837030 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 166798 } }, Collection: { acquireCount: { r: 9 } } } 352ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.012-0400 m31100| 2015-07-09T14:14:51.012-0400 I INDEX [conn35] build index on: db52.reindex_3 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_3" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.013-0400 m31100| 2015-07-09T14:14:51.012-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.018-0400 m31100| 2015-07-09T14:14:51.018-0400 I INDEX [conn35] build index on: db52.reindex_3 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_3", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.019-0400 m31100| 2015-07-09T14:14:51.018-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.022-0400 m31100| 2015-07-09T14:14:51.022-0400 I INDEX [conn35] build index on: db52.reindex_3 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_3", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.022-0400 m31100| 2015-07-09T14:14:51.022-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.027-0400 m31100| 2015-07-09T14:14:51.026-0400 I INDEX [conn35] build index on: db52.reindex_3 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_3" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.027-0400 m31100| 2015-07-09T14:14:51.026-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.130-0400 m31100| 2015-07-09T14:14:51.130-0400 I INDEX [conn35] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.131-0400 m31100| 2015-07-09T14:14:51.131-0400 I COMMAND [conn35] command db52.reindex_3 command: reIndex { reIndex: "reindex_3" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:588 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 11280 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 145ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.132-0400 m31100| 2015-07-09T14:14:51.132-0400 I COMMAND [conn35] CMD: reIndex db52.reindex_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.136-0400 m31100| 2015-07-09T14:14:51.136-0400 I INDEX [conn187] build index on: db52.reindex_6 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_6" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.137-0400 m31100| 2015-07-09T14:14:51.136-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.142-0400 m31100| 2015-07-09T14:14:51.142-0400 I INDEX [conn187] build index on: db52.reindex_6 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_6", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.142-0400 m31100| 2015-07-09T14:14:51.142-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.147-0400 m31100| 2015-07-09T14:14:51.147-0400 I INDEX [conn187] build index on: db52.reindex_6 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_6", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.147-0400 m31100| 2015-07-09T14:14:51.147-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.152-0400 m31100| 2015-07-09T14:14:51.151-0400 I INDEX [conn187] build index on: db52.reindex_6 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_6" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.152-0400 m31100| 2015-07-09T14:14:51.152-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.264-0400 m31100| 2015-07-09T14:14:51.264-0400 I INDEX [conn187] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.275-0400 m31100| 2015-07-09T14:14:51.270-0400 I COMMAND [conn187] command db52.reindex_6 command: reIndex { reIndex: "reindex_6" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:588 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 133038 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 281ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.275-0400 m31100| 2015-07-09T14:14:51.271-0400 I QUERY [conn149] getmore db52.reindex_4 query: { $text: { $search: "ipsum" } } cursorid:3397888848338 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 3 }, timeAcquiringMicros: { r: 597683 } }, Collection: { acquireCount: { r: 9 } } } 679ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.275-0400 m31100| 2015-07-09T14:14:51.271-0400 I QUERY [conn138] getmore db52.reindex_5 query: { $text: { $search: "ipsum" } } cursorid:3446677309966 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:9 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 20 } }, Database: { acquireCount: { r: 10 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 430567 } }, Collection: { acquireCount: { r: 10 } } } 585ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.276-0400 m31100| 2015-07-09T14:14:51.273-0400 I COMMAND [conn187] CMD: reIndex db52.reindex_6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.276-0400 m31100| 2015-07-09T14:14:51.273-0400 I COMMAND [conn49] command db52.reindex_13 command: listIndexes { listIndexes: "reindex_13" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:643 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 263015 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 263ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.276-0400 m31100| 2015-07-09T14:14:51.274-0400 I COMMAND [conn32] CMD: reIndex db52.reindex_13 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.279-0400 m31100| 2015-07-09T14:14:51.278-0400 I QUERY [conn137] getmore db52.reindex_7 query: { $text: { $search: "ipsum" } } cursorid:3432045967419 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 3 }, timeAcquiringMicros: { r: 604567 } }, Collection: { acquireCount: { r: 9 } } } 677ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.280-0400 m31100| 2015-07-09T14:14:51.279-0400 I QUERY [conn86] getmore db52.reindex_10 query: { $text: { $search: "ipsum" } } cursorid:3407317265527 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:10 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 22 } }, Database: { acquireCount: { r: 11 }, acquireWaitCount: { r: 3 }, timeAcquiringMicros: { r: 607806 } }, Collection: { acquireCount: { r: 11 } } } 682ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.286-0400 m31100| 2015-07-09T14:14:51.285-0400 I INDEX [conn35] build index on: db52.reindex_3 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_3" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.287-0400 m31100| 2015-07-09T14:14:51.285-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.291-0400 m31100| 2015-07-09T14:14:51.291-0400 I INDEX [conn35] build index on: db52.reindex_3 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_3", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.292-0400 m31100| 2015-07-09T14:14:51.291-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.294-0400 m31100| 2015-07-09T14:14:51.294-0400 I INDEX [conn35] build index on: db52.reindex_3 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_3", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.295-0400 m31100| 2015-07-09T14:14:51.294-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.300-0400 m31100| 2015-07-09T14:14:51.300-0400 I INDEX [conn35] build index on: db52.reindex_3 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_3" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.301-0400 m31100| 2015-07-09T14:14:51.300-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.407-0400 m31100| 2015-07-09T14:14:51.406-0400 I INDEX [conn35] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.410-0400 m31100| 2015-07-09T14:14:51.410-0400 I COMMAND [conn35] command db52.reindex_3 command: reIndex { reIndex: "reindex_3" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:588 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 148121 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 277ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.411-0400 m31100| 2015-07-09T14:14:51.410-0400 I COMMAND [conn45] command db52.reindex_10 command: listIndexes { listIndexes: "reindex_10" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:643 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 117099 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 117ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.413-0400 m31100| 2015-07-09T14:14:51.410-0400 I COMMAND [conn50] command db52.reindex_7 command: listIndexes { listIndexes: "reindex_7" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:638 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 123012 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 123ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.414-0400 m31100| 2015-07-09T14:14:51.412-0400 I COMMAND [conn179] command db52.reindex_4 command: listIndexes { listIndexes: "reindex_4" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:638 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 127079 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 127ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.414-0400 m31100| 2015-07-09T14:14:51.412-0400 I COMMAND [conn49] command db52.reindex_5 command: listIndexes { listIndexes: "reindex_5" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:638 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 129626 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 129ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.416-0400 m31100| 2015-07-09T14:14:51.414-0400 I QUERY [conn140] getmore db52.reindex_9 query: { $text: { $search: "ipsum" } } cursorid:3436572370639 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:9 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 20 } }, Database: { acquireCount: { r: 10 }, acquireWaitCount: { r: 3 }, timeAcquiringMicros: { r: 573979 } }, Collection: { acquireCount: { r: 10 } } } 768ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.416-0400 m31100| 2015-07-09T14:14:51.415-0400 I COMMAND [conn37] CMD: reIndex db52.reindex_4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.416-0400 m31100| 2015-07-09T14:14:51.415-0400 I COMMAND [conn15] CMD: reIndex db52.reindex_10 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.416-0400 m31100| 2015-07-09T14:14:51.416-0400 I COMMAND [conn132] CMD: reIndex db52.reindex_5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.422-0400 m31100| 2015-07-09T14:14:51.421-0400 I INDEX [conn187] build index on: db52.reindex_6 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_6" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.423-0400 m31100| 2015-07-09T14:14:51.422-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.427-0400 m31100| 2015-07-09T14:14:51.426-0400 I INDEX [conn187] build index on: db52.reindex_6 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_6", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.428-0400 m31100| 2015-07-09T14:14:51.426-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.432-0400 m31100| 2015-07-09T14:14:51.431-0400 I INDEX [conn187] build index on: db52.reindex_6 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_6", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.432-0400 m31100| 2015-07-09T14:14:51.431-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.437-0400 m31100| 2015-07-09T14:14:51.436-0400 I INDEX [conn187] build index on: db52.reindex_6 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_6" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.437-0400 m31100| 2015-07-09T14:14:51.436-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.553-0400 m31100| 2015-07-09T14:14:51.553-0400 I INDEX [conn187] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.554-0400 m31100| 2015-07-09T14:14:51.553-0400 I COMMAND [conn187] command db52.reindex_6 command: reIndex { reIndex: "reindex_6" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:588 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 143153 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 280ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.559-0400 m30999| 2015-07-09T14:14:51.559-0400 I NETWORK [conn327] end connection 127.0.0.1:63745 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.562-0400 m31100| 2015-07-09T14:14:51.561-0400 I INDEX [conn32] build index on: db52.reindex_13 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_13" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.562-0400 m31100| 2015-07-09T14:14:51.561-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.567-0400 m31100| 2015-07-09T14:14:51.566-0400 I INDEX [conn32] build index on: db52.reindex_13 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_13", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.567-0400 m31100| 2015-07-09T14:14:51.566-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.571-0400 m31100| 2015-07-09T14:14:51.571-0400 I INDEX [conn32] build index on: db52.reindex_13 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_13", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.571-0400 m31100| 2015-07-09T14:14:51.571-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.576-0400 m31100| 2015-07-09T14:14:51.575-0400 I INDEX [conn32] build index on: db52.reindex_13 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_13" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.576-0400 m31100| 2015-07-09T14:14:51.575-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.724-0400 m31100| 2015-07-09T14:14:51.723-0400 I INDEX [conn32] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.726-0400 m31100| 2015-07-09T14:14:51.725-0400 I COMMAND [conn32] command db52.reindex_13 command: reIndex { reIndex: "reindex_13" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:592 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 279059 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 450ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.727-0400 m31100| 2015-07-09T14:14:51.726-0400 I COMMAND [conn176] command db52.reindex_9 command: listIndexes { listIndexes: "reindex_9" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:638 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 305920 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 306ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.728-0400 m31100| 2015-07-09T14:14:51.728-0400 I COMMAND [conn32] CMD: reIndex db52.reindex_9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.732-0400 m31100| 2015-07-09T14:14:51.731-0400 I QUERY [conn135] getmore db52.reindex_0 query: { $text: { $search: "ipsum" } } cursorid:3415991625416 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 4 }, timeAcquiringMicros: { r: 877997 } }, Collection: { acquireCount: { r: 9 } } } 1016ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.741-0400 m31100| 2015-07-09T14:14:51.741-0400 I INDEX [conn37] build index on: db52.reindex_4 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_4" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.741-0400 m31100| 2015-07-09T14:14:51.741-0400 I INDEX [conn37] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.747-0400 m31100| 2015-07-09T14:14:51.746-0400 I INDEX [conn37] build index on: db52.reindex_4 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_4", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.747-0400 m31100| 2015-07-09T14:14:51.746-0400 I INDEX [conn37] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.753-0400 m31100| 2015-07-09T14:14:51.752-0400 I INDEX [conn37] build index on: db52.reindex_4 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_4", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.754-0400 m31100| 2015-07-09T14:14:51.752-0400 I INDEX [conn37] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.757-0400 m31100| 2015-07-09T14:14:51.757-0400 I INDEX [conn37] build index on: db52.reindex_4 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_4" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.757-0400 m31100| 2015-07-09T14:14:51.757-0400 I INDEX [conn37] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.863-0400 m31100| 2015-07-09T14:14:51.863-0400 I INDEX [conn37] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.864-0400 m31100| 2015-07-09T14:14:51.863-0400 I COMMAND [conn37] command db52.reindex_4 command: reIndex { reIndex: "reindex_4" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:588 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 320103 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 448ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.865-0400 m31100| 2015-07-09T14:14:51.865-0400 I COMMAND [conn37] CMD: reIndex db52.reindex_4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.872-0400 m31100| 2015-07-09T14:14:51.871-0400 I INDEX [conn15] build index on: db52.reindex_10 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_10" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.872-0400 m31100| 2015-07-09T14:14:51.871-0400 I INDEX [conn15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.877-0400 m31100| 2015-07-09T14:14:51.877-0400 I INDEX [conn15] build index on: db52.reindex_10 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_10", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.878-0400 m31100| 2015-07-09T14:14:51.877-0400 I INDEX [conn15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.882-0400 m31100| 2015-07-09T14:14:51.882-0400 I INDEX [conn15] build index on: db52.reindex_10 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_10", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.882-0400 m31100| 2015-07-09T14:14:51.882-0400 I INDEX [conn15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.888-0400 m31100| 2015-07-09T14:14:51.888-0400 I INDEX [conn15] build index on: db52.reindex_10 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_10" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:51.888-0400 m31100| 2015-07-09T14:14:51.888-0400 I INDEX [conn15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.004-0400 m31100| 2015-07-09T14:14:52.003-0400 I INDEX [conn15] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.004-0400 m31100| 2015-07-09T14:14:52.004-0400 I COMMAND [conn15] command db52.reindex_10 command: reIndex { reIndex: "reindex_10" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:592 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 447838 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 588ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.005-0400 m31100| 2015-07-09T14:14:52.005-0400 I COMMAND [conn15] CMD: reIndex db52.reindex_10 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.012-0400 m31100| 2015-07-09T14:14:52.011-0400 I INDEX [conn132] build index on: db52.reindex_5 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_5" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.012-0400 m31100| 2015-07-09T14:14:52.011-0400 I INDEX [conn132] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.017-0400 m31100| 2015-07-09T14:14:52.017-0400 I INDEX [conn132] build index on: db52.reindex_5 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_5", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.018-0400 m31100| 2015-07-09T14:14:52.017-0400 I INDEX [conn132] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.023-0400 m31100| 2015-07-09T14:14:52.023-0400 I INDEX [conn132] build index on: db52.reindex_5 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_5", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.024-0400 m31100| 2015-07-09T14:14:52.023-0400 I INDEX [conn132] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.028-0400 m31100| 2015-07-09T14:14:52.028-0400 I INDEX [conn132] build index on: db52.reindex_5 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_5" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.028-0400 m31100| 2015-07-09T14:14:52.028-0400 I INDEX [conn132] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.148-0400 m31100| 2015-07-09T14:14:52.147-0400 I INDEX [conn132] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.151-0400 m31100| 2015-07-09T14:14:52.150-0400 I COMMAND [conn132] command db52.reindex_5 command: reIndex { reIndex: "reindex_5" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:588 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 1087902 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 733ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.152-0400 m31100| 2015-07-09T14:14:52.151-0400 I COMMAND [conn179] command db52.reindex_0 command: listIndexes { listIndexes: "reindex_0" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:638 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 413926 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 414ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.153-0400 m31100| 2015-07-09T14:14:52.152-0400 I COMMAND [conn187] CMD: reIndex db52.reindex_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.160-0400 m31100| 2015-07-09T14:14:52.160-0400 I INDEX [conn32] build index on: db52.reindex_9 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_9" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.161-0400 m31100| 2015-07-09T14:14:52.160-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.166-0400 m31100| 2015-07-09T14:14:52.165-0400 I INDEX [conn32] build index on: db52.reindex_9 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_9", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.166-0400 m31100| 2015-07-09T14:14:52.165-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.171-0400 m31100| 2015-07-09T14:14:52.170-0400 I INDEX [conn32] build index on: db52.reindex_9 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_9", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.172-0400 m31100| 2015-07-09T14:14:52.170-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.177-0400 m31100| 2015-07-09T14:14:52.176-0400 I INDEX [conn32] build index on: db52.reindex_9 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_9" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.177-0400 m31100| 2015-07-09T14:14:52.176-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.298-0400 m31100| 2015-07-09T14:14:52.298-0400 I INDEX [conn32] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.299-0400 m31100| 2015-07-09T14:14:52.298-0400 I COMMAND [conn32] command db52.reindex_9 command: reIndex { reIndex: "reindex_9" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:588 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 426430 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 569ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.300-0400 m31100| 2015-07-09T14:14:52.299-0400 I COMMAND [conn32] CMD: reIndex db52.reindex_9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.304-0400 m31100| 2015-07-09T14:14:52.304-0400 I INDEX [conn37] build index on: db52.reindex_4 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_4" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.305-0400 m31100| 2015-07-09T14:14:52.304-0400 I INDEX [conn37] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.310-0400 m31100| 2015-07-09T14:14:52.309-0400 I INDEX [conn37] build index on: db52.reindex_4 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_4", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.310-0400 m31100| 2015-07-09T14:14:52.309-0400 I INDEX [conn37] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.313-0400 m31100| 2015-07-09T14:14:52.313-0400 I INDEX [conn37] build index on: db52.reindex_4 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_4", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.314-0400 m31100| 2015-07-09T14:14:52.313-0400 I INDEX [conn37] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.319-0400 m31100| 2015-07-09T14:14:52.318-0400 I INDEX [conn37] build index on: db52.reindex_4 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_4" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.319-0400 m31100| 2015-07-09T14:14:52.318-0400 I INDEX [conn37] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.422-0400 m31100| 2015-07-09T14:14:52.422-0400 I INDEX [conn37] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.423-0400 m31100| 2015-07-09T14:14:52.423-0400 I COMMAND [conn37] command db52.reindex_4 command: reIndex { reIndex: "reindex_4" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:588 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 433391 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 558ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.428-0400 m30999| 2015-07-09T14:14:52.428-0400 I NETWORK [conn330] end connection 127.0.0.1:63749 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.429-0400 m31100| 2015-07-09T14:14:52.428-0400 I INDEX [conn15] build index on: db52.reindex_10 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_10" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.430-0400 m31100| 2015-07-09T14:14:52.428-0400 I INDEX [conn15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.436-0400 m31100| 2015-07-09T14:14:52.436-0400 I INDEX [conn15] build index on: db52.reindex_10 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_10", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.436-0400 m31100| 2015-07-09T14:14:52.436-0400 I INDEX [conn15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.442-0400 m31100| 2015-07-09T14:14:52.441-0400 I INDEX [conn15] build index on: db52.reindex_10 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_10", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.442-0400 m31100| 2015-07-09T14:14:52.441-0400 I INDEX [conn15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.446-0400 m31100| 2015-07-09T14:14:52.446-0400 I INDEX [conn15] build index on: db52.reindex_10 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_10" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.446-0400 m31100| 2015-07-09T14:14:52.446-0400 I INDEX [conn15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.554-0400 m31100| 2015-07-09T14:14:52.554-0400 I INDEX [conn15] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.556-0400 m31100| 2015-07-09T14:14:52.555-0400 I COMMAND [conn15] command db52.reindex_10 command: reIndex { reIndex: "reindex_10" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:592 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 418090 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 550ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.568-0400 m31100| 2015-07-09T14:14:52.568-0400 I INDEX [conn187] build index on: db52.reindex_0 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_0" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.568-0400 m31100| 2015-07-09T14:14:52.568-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.573-0400 m31100| 2015-07-09T14:14:52.572-0400 I INDEX [conn187] build index on: db52.reindex_0 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_0", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.573-0400 m31100| 2015-07-09T14:14:52.572-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.578-0400 m31100| 2015-07-09T14:14:52.577-0400 I INDEX [conn187] build index on: db52.reindex_0 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_0", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.579-0400 m31100| 2015-07-09T14:14:52.577-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.584-0400 m31100| 2015-07-09T14:14:52.584-0400 I INDEX [conn187] build index on: db52.reindex_0 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_0" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.584-0400 m31100| 2015-07-09T14:14:52.584-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.692-0400 m31100| 2015-07-09T14:14:52.691-0400 I INDEX [conn187] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.693-0400 m31100| 2015-07-09T14:14:52.692-0400 I COMMAND [conn187] command db52.reindex_0 command: reIndex { reIndex: "reindex_0" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:588 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 408441 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 539ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.693-0400 m31100| 2015-07-09T14:14:52.693-0400 I COMMAND [conn187] CMD: reIndex db52.reindex_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.697-0400 m31100| 2015-07-09T14:14:52.697-0400 I INDEX [conn32] build index on: db52.reindex_9 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_9" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.698-0400 m31100| 2015-07-09T14:14:52.697-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.704-0400 m31100| 2015-07-09T14:14:52.704-0400 I INDEX [conn32] build index on: db52.reindex_9 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_9", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.705-0400 m31100| 2015-07-09T14:14:52.704-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.709-0400 m31100| 2015-07-09T14:14:52.707-0400 I INDEX [conn32] build index on: db52.reindex_9 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_9", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.709-0400 m31100| 2015-07-09T14:14:52.708-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.714-0400 m31100| 2015-07-09T14:14:52.714-0400 I INDEX [conn32] build index on: db52.reindex_9 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_9" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.714-0400 m31100| 2015-07-09T14:14:52.714-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.820-0400 m31100| 2015-07-09T14:14:52.819-0400 I INDEX [conn32] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.824-0400 m31100| 2015-07-09T14:14:52.823-0400 I COMMAND [conn32] command db52.reindex_9 command: reIndex { reIndex: "reindex_9" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:588 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 392609 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 523ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.825-0400 m31100| 2015-07-09T14:14:52.824-0400 I COMMAND [conn32] CMD: reIndex db52.reindex_9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.833-0400 m31100| 2015-07-09T14:14:52.832-0400 I INDEX [conn187] build index on: db52.reindex_0 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_0" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.833-0400 m31100| 2015-07-09T14:14:52.833-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.838-0400 m31100| 2015-07-09T14:14:52.836-0400 I INDEX [conn187] build index on: db52.reindex_0 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_0", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.838-0400 m31100| 2015-07-09T14:14:52.837-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.842-0400 m31100| 2015-07-09T14:14:52.842-0400 I INDEX [conn187] build index on: db52.reindex_0 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_0", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.842-0400 m31100| 2015-07-09T14:14:52.842-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.848-0400 m31100| 2015-07-09T14:14:52.848-0400 I INDEX [conn187] build index on: db52.reindex_0 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_0" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.848-0400 m31100| 2015-07-09T14:14:52.848-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.955-0400 m31100| 2015-07-09T14:14:52.954-0400 I INDEX [conn187] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.957-0400 m31100| 2015-07-09T14:14:52.955-0400 I COMMAND [conn187] command db52.reindex_0 command: reIndex { reIndex: "reindex_0" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:588 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 133199 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 261ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.957-0400 m31100| 2015-07-09T14:14:52.957-0400 I QUERY [conn42] getmore db52.reindex_1 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:3402750885597 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 7 }, timeAcquiringMicros: { r: 1934443 } }, Collection: { acquireCount: { r: 8 } } } 1966ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.965-0400 m31100| 2015-07-09T14:14:52.964-0400 I QUERY [conn134] getmore db52.reindex_11 query: { $text: { $search: "ipsum" } } cursorid:3449683039706 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 7 }, timeAcquiringMicros: { r: 1911767 } }, Collection: { acquireCount: { r: 8 } } } 1990ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.971-0400 m31100| 2015-07-09T14:14:52.970-0400 I INDEX [conn32] build index on: db52.reindex_9 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_9" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.971-0400 m31100| 2015-07-09T14:14:52.970-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.977-0400 m31100| 2015-07-09T14:14:52.976-0400 I INDEX [conn32] build index on: db52.reindex_9 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_9", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.977-0400 m31100| 2015-07-09T14:14:52.977-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.981-0400 m31100| 2015-07-09T14:14:52.981-0400 I INDEX [conn32] build index on: db52.reindex_9 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_9", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.982-0400 m31100| 2015-07-09T14:14:52.981-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.986-0400 m31100| 2015-07-09T14:14:52.986-0400 I INDEX [conn32] build index on: db52.reindex_9 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_9" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:52.986-0400 m31100| 2015-07-09T14:14:52.986-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.098-0400 m31100| 2015-07-09T14:14:53.098-0400 I INDEX [conn32] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.101-0400 m31100| 2015-07-09T14:14:53.100-0400 I COMMAND [conn32] command db52.reindex_9 command: reIndex { reIndex: "reindex_9" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:588 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 139916 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 275ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.102-0400 m31100| 2015-07-09T14:14:53.100-0400 I QUERY [conn141] getmore db52.reindex_8 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:3419446875438 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 8 }, timeAcquiringMicros: { r: 2078917 } }, Collection: { acquireCount: { r: 8 } } } 1835ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.103-0400 m31100| 2015-07-09T14:14:53.101-0400 I COMMAND [conn50] command db52.reindex_11 command: listIndexes { listIndexes: "reindex_11" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:643 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 129794 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 130ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.103-0400 m31100| 2015-07-09T14:14:53.103-0400 I COMMAND [conn132] CMD: reIndex db52.reindex_11 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.109-0400 m31100| 2015-07-09T14:14:53.105-0400 I QUERY [conn175] query db52.reindex_2 query: { $text: { $search: "ipsum" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 } cursorid:3428590762182 ntoreturn:0 ntoskip:0 nscanned:1000 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:101 reslen:24159 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 8 }, timeAcquiringMicros: { r: 2079553 } }, Collection: { acquireCount: { r: 9 } } } 1839ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.109-0400 m31100| 2015-07-09T14:14:53.107-0400 I QUERY [conn138] getmore db52.reindex_13 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:3393798658088 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 3 }, timeAcquiringMicros: { r: 537224 } }, Collection: { acquireCount: { r: 8 } } } 285ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.110-0400 m31100| 2015-07-09T14:14:53.108-0400 I QUERY [conn148] getmore db52.reindex_5 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:3444777376151 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 270033 } }, Collection: { acquireCount: { r: 8 } } } 151ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.112-0400 m31100| 2015-07-09T14:14:53.110-0400 I QUERY [conn137] getmore db52.reindex_3 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:3411942052446 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 4 }, timeAcquiringMicros: { r: 938549 } }, Collection: { acquireCount: { r: 8 } } } 555ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.117-0400 m31100| 2015-07-09T14:14:53.116-0400 I COMMAND [conn32] CMD: reIndex db52.reindex_9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.118-0400 m31100| 2015-07-09T14:14:53.118-0400 I INDEX [conn132] build index on: db52.reindex_11 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_11" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.119-0400 m31100| 2015-07-09T14:14:53.118-0400 I INDEX [conn132] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.123-0400 m31100| 2015-07-09T14:14:53.123-0400 I INDEX [conn132] build index on: db52.reindex_11 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_11", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.124-0400 m31100| 2015-07-09T14:14:53.123-0400 I INDEX [conn132] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.130-0400 m31100| 2015-07-09T14:14:53.129-0400 I INDEX [conn132] build index on: db52.reindex_11 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_11", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.130-0400 m31100| 2015-07-09T14:14:53.129-0400 I INDEX [conn132] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.134-0400 m31100| 2015-07-09T14:14:53.133-0400 I INDEX [conn132] build index on: db52.reindex_11 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_11" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.134-0400 m31100| 2015-07-09T14:14:53.134-0400 I INDEX [conn132] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.247-0400 m31100| 2015-07-09T14:14:53.246-0400 I INDEX [conn132] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.249-0400 m31100| 2015-07-09T14:14:53.247-0400 I COMMAND [conn132] command db52.reindex_11 command: reIndex { reIndex: "reindex_11" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:592 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 791 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 144ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.249-0400 m31100| 2015-07-09T14:14:53.247-0400 I QUERY [conn140] getmore db52.reindex_7 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:3433367086753 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 5 }, timeAcquiringMicros: { r: 1075270 } }, Collection: { acquireCount: { r: 8 } } } 692ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.250-0400 m31100| 2015-07-09T14:14:53.250-0400 I COMMAND [conn132] CMD: reIndex db52.reindex_11 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.264-0400 m31100| 2015-07-09T14:14:53.264-0400 I INDEX [conn32] build index on: db52.reindex_9 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_9" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.265-0400 m31100| 2015-07-09T14:14:53.264-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.269-0400 m31100| 2015-07-09T14:14:53.269-0400 I INDEX [conn32] build index on: db52.reindex_9 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_9", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.269-0400 m31100| 2015-07-09T14:14:53.269-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.273-0400 m31100| 2015-07-09T14:14:53.272-0400 I INDEX [conn32] build index on: db52.reindex_9 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_9", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.273-0400 m31100| 2015-07-09T14:14:53.273-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.277-0400 m31100| 2015-07-09T14:14:53.277-0400 I INDEX [conn32] build index on: db52.reindex_9 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_9" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.278-0400 m31100| 2015-07-09T14:14:53.277-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.379-0400 m31100| 2015-07-09T14:14:53.378-0400 I INDEX [conn32] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.381-0400 m31100| 2015-07-09T14:14:53.380-0400 I COMMAND [conn32] command db52.reindex_9 command: reIndex { reIndex: "reindex_9" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:588 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 141190 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 263ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.393-0400 m31100| 2015-07-09T14:14:53.392-0400 I INDEX [conn132] build index on: db52.reindex_11 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_11" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.393-0400 m31100| 2015-07-09T14:14:53.393-0400 I INDEX [conn132] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.398-0400 m31100| 2015-07-09T14:14:53.397-0400 I INDEX [conn132] build index on: db52.reindex_11 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_11", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.398-0400 m31100| 2015-07-09T14:14:53.397-0400 I INDEX [conn132] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.401-0400 m31100| 2015-07-09T14:14:53.401-0400 I INDEX [conn132] build index on: db52.reindex_11 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_11", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.402-0400 m31100| 2015-07-09T14:14:53.401-0400 I INDEX [conn132] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.407-0400 m31100| 2015-07-09T14:14:53.406-0400 I INDEX [conn132] build index on: db52.reindex_11 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_11" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.407-0400 m31100| 2015-07-09T14:14:53.406-0400 I INDEX [conn132] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.533-0400 m31100| 2015-07-09T14:14:53.532-0400 I INDEX [conn132] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.552-0400 m31100| 2015-07-09T14:14:53.533-0400 I COMMAND [conn132] command db52.reindex_11 command: reIndex { reIndex: "reindex_11" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:592 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 136034 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 283ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.553-0400 m31100| 2015-07-09T14:14:53.540-0400 I QUERY [conn135] getmore db52.reindex_10 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:3406490909228 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 4 }, timeAcquiringMicros: { r: 558468 } }, Collection: { acquireCount: { r: 8 } } } 440ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.555-0400 m31100| 2015-07-09T14:14:53.542-0400 I QUERY [conn176] query db52.reindex_3 query: { $text: { $search: "ipsum" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 } cursorid:3410909021461 ntoreturn:0 ntoskip:0 nscanned:1000 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:101 reslen:24159 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 3 }, timeAcquiringMicros: { r: 408021 } }, Collection: { acquireCount: { r: 9 } } } 293ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.556-0400 m31100| 2015-07-09T14:14:53.543-0400 I QUERY [conn185] query db52.reindex_7 query: { $text: { $search: "ipsum" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 } cursorid:3432370762125 ntoreturn:0 ntoskip:0 nscanned:1000 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:101 reslen:24159 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 274453 } }, Collection: { acquireCount: { r: 9 } } } 162ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.556-0400 m31100| 2015-07-09T14:14:53.545-0400 I QUERY [conn50] query db52.reindex_5 query: { $text: { $search: "ipsum" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 } cursorid:3445656781752 ntoreturn:0 ntoskip:0 nscanned:1000 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:101 reslen:24159 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 3 }, timeAcquiringMicros: { r: 412333 } }, Collection: { acquireCount: { r: 9 } } } 296ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.556-0400 m31100| 2015-07-09T14:14:53.551-0400 I QUERY [conn86] getmore db52.reindex_0 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:3415385969583 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 286780 } }, Collection: { acquireCount: { r: 8 } } } 171ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.557-0400 m31100| 2015-07-09T14:14:53.552-0400 I QUERY [conn175] query db52.reindex_8 query: { $text: { $search: "ipsum" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 } cursorid:3420596980299 ntoreturn:0 ntoskip:0 nscanned:1000 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:101 reslen:24159 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 3 }, timeAcquiringMicros: { r: 425021 } }, Collection: { acquireCount: { r: 9 } } } 302ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.557-0400 m31100| 2015-07-09T14:14:53.554-0400 I QUERY [conn181] query db52.reindex_13 query: { $text: { $search: "ipsum" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 } cursorid:3394504864079 ntoreturn:0 ntoskip:0 nscanned:1000 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:101 reslen:24159 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 3 }, timeAcquiringMicros: { r: 420325 } }, Collection: { acquireCount: { r: 9 } } } 305ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.562-0400 m31100| 2015-07-09T14:14:53.557-0400 I QUERY [conn49] query db52.reindex_1 query: { $text: { $search: "ipsum" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 } cursorid:3401855053085 ntoreturn:0 ntoskip:0 nscanned:1000 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:101 reslen:24159 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 4 }, timeAcquiringMicros: { r: 555920 } }, Collection: { acquireCount: { r: 9 } } } 456ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.639-0400 m31100| 2015-07-09T14:14:53.636-0400 I QUERY [conn141] getmore db52.reindex_2 query: { $text: { $search: "ipsum" } } cursorid:3428590762182 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 3 }, timeAcquiringMicros: { r: 406364 } }, Collection: { acquireCount: { r: 8 } } } 387ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.662-0400 m31100| 2015-07-09T14:14:53.661-0400 I COMMAND [conn187] CMD: reIndex db52.reindex_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.681-0400 m31100| 2015-07-09T14:14:53.681-0400 I INDEX [conn187] build index on: db52.reindex_2 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_2" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.681-0400 m31100| 2015-07-09T14:14:53.681-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.688-0400 m31100| 2015-07-09T14:14:53.687-0400 I INDEX [conn187] build index on: db52.reindex_2 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_2", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.688-0400 m31100| 2015-07-09T14:14:53.688-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.693-0400 m31100| 2015-07-09T14:14:53.692-0400 I INDEX [conn187] build index on: db52.reindex_2 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_2", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.693-0400 m31100| 2015-07-09T14:14:53.692-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.697-0400 m31100| 2015-07-09T14:14:53.697-0400 I INDEX [conn187] build index on: db52.reindex_2 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_2" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.698-0400 m31100| 2015-07-09T14:14:53.697-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.804-0400 m31100| 2015-07-09T14:14:53.803-0400 I INDEX [conn187] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.808-0400 m31100| 2015-07-09T14:14:53.804-0400 I COMMAND [conn187] command db52.reindex_2 command: reIndex { reIndex: "reindex_2" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:588 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 9850 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 143ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.858-0400 m31100| 2015-07-09T14:14:53.857-0400 I QUERY [conn140] getmore db52.reindex_3 query: { $text: { $search: "ipsum" } } cursorid:3410909021461 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:9 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 20 } }, Database: { acquireCount: { r: 10 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 131577 } }, Collection: { acquireCount: { r: 10 } } } 302ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.864-0400 m31100| 2015-07-09T14:14:53.859-0400 I QUERY [conn137] getmore db52.reindex_7 query: { $text: { $search: "ipsum" } } cursorid:3432370762125 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 130478 } }, Collection: { acquireCount: { r: 9 } } } 294ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.872-0400 m31100| 2015-07-09T14:14:53.871-0400 I COMMAND [conn132] CMD: reIndex db52.reindex_7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.875-0400 m31100| 2015-07-09T14:14:53.874-0400 I QUERY [conn148] getmore db52.reindex_13 query: { $text: { $search: "ipsum" } } cursorid:3394504864079 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:9 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 20 } }, Database: { acquireCount: { r: 10 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 144844 } }, Collection: { acquireCount: { r: 10 } } } 298ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.875-0400 m31100| 2015-07-09T14:14:53.874-0400 I COMMAND [conn32] CMD: reIndex db52.reindex_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.880-0400 m31100| 2015-07-09T14:14:53.879-0400 I QUERY [conn134] getmore db52.reindex_1 query: { $text: { $search: "ipsum" } } cursorid:3401855053085 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 160928 } }, Collection: { acquireCount: { r: 9 } } } 310ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.883-0400 m31100| 2015-07-09T14:14:53.881-0400 I QUERY [conn138] getmore db52.reindex_5 query: { $text: { $search: "ipsum" } } cursorid:3445656781752 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:10 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 22 } }, Database: { acquireCount: { r: 11 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 139208 } }, Collection: { acquireCount: { r: 11 } } } 315ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.891-0400 m31100| 2015-07-09T14:14:53.891-0400 I INDEX [conn132] build index on: db52.reindex_7 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_7" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.891-0400 m31100| 2015-07-09T14:14:53.891-0400 I INDEX [conn132] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.897-0400 m31100| 2015-07-09T14:14:53.896-0400 I INDEX [conn132] build index on: db52.reindex_7 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_7", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.897-0400 m31100| 2015-07-09T14:14:53.896-0400 I INDEX [conn132] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.902-0400 m31100| 2015-07-09T14:14:53.901-0400 I INDEX [conn132] build index on: db52.reindex_7 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_7", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.902-0400 m31100| 2015-07-09T14:14:53.902-0400 I INDEX [conn132] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.907-0400 m31100| 2015-07-09T14:14:53.906-0400 I INDEX [conn132] build index on: db52.reindex_7 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_7" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:53.907-0400 m31100| 2015-07-09T14:14:53.906-0400 I INDEX [conn132] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.026-0400 m31100| 2015-07-09T14:14:54.026-0400 I INDEX [conn132] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.027-0400 m31100| 2015-07-09T14:14:54.026-0400 I COMMAND [conn132] command db52.reindex_7 command: reIndex { reIndex: "reindex_7" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:588 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 11797 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 154ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.028-0400 m31100| 2015-07-09T14:14:54.027-0400 I COMMAND [conn50] command db52.reindex_13 command: listIndexes { listIndexes: "reindex_13" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:643 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 145917 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 146ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.028-0400 m31100| 2015-07-09T14:14:54.027-0400 I COMMAND [conn177] command db52.reindex_1 command: listIndexes { listIndexes: "reindex_1" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:638 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 137682 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 137ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.029-0400 m31100| 2015-07-09T14:14:54.027-0400 I COMMAND [conn49] command db52.reindex_5 command: listIndexes { listIndexes: "reindex_5" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:638 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 138041 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 138ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.029-0400 m31100| 2015-07-09T14:14:54.028-0400 I COMMAND [conn35] CMD: reIndex db52.reindex_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.029-0400 m31100| 2015-07-09T14:14:54.028-0400 I QUERY [conn135] getmore db52.reindex_0 query: { $text: { $search: "ipsum" } } cursorid:3414729103893 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 285776 } }, Collection: { acquireCount: { r: 9 } } } 390ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.029-0400 m31100| 2015-07-09T14:14:54.029-0400 I COMMAND [conn39] CMD: reIndex db52.reindex_5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.051-0400 m31100| 2015-07-09T14:14:54.051-0400 I INDEX [conn32] build index on: db52.reindex_3 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_3" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.052-0400 m31100| 2015-07-09T14:14:54.051-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.057-0400 m31100| 2015-07-09T14:14:54.056-0400 I INDEX [conn32] build index on: db52.reindex_3 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_3", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.057-0400 m31100| 2015-07-09T14:14:54.056-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.061-0400 m31100| 2015-07-09T14:14:54.060-0400 I INDEX [conn32] build index on: db52.reindex_3 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_3", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.062-0400 m31100| 2015-07-09T14:14:54.060-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.065-0400 m31100| 2015-07-09T14:14:54.065-0400 I INDEX [conn32] build index on: db52.reindex_3 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_3" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.066-0400 m31100| 2015-07-09T14:14:54.065-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.206-0400 m31100| 2015-07-09T14:14:54.205-0400 I INDEX [conn32] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.207-0400 m31100| 2015-07-09T14:14:54.206-0400 I COMMAND [conn32] command db52.reindex_3 command: reIndex { reIndex: "reindex_3" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:588 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 170945 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 331ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.207-0400 m31100| 2015-07-09T14:14:54.207-0400 I COMMAND [conn32] CMD: reIndex db52.reindex_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.211-0400 m31100| 2015-07-09T14:14:54.211-0400 I INDEX [conn35] build index on: db52.reindex_1 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_1" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.212-0400 m31100| 2015-07-09T14:14:54.211-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.216-0400 m31100| 2015-07-09T14:14:54.216-0400 I INDEX [conn35] build index on: db52.reindex_1 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_1", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.217-0400 m31100| 2015-07-09T14:14:54.216-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.222-0400 m31100| 2015-07-09T14:14:54.221-0400 I INDEX [conn35] build index on: db52.reindex_1 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_1", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.222-0400 m31100| 2015-07-09T14:14:54.221-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.227-0400 m31100| 2015-07-09T14:14:54.226-0400 I INDEX [conn35] build index on: db52.reindex_1 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_1" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.227-0400 m31100| 2015-07-09T14:14:54.226-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.367-0400 m31100| 2015-07-09T14:14:54.366-0400 I INDEX [conn35] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.375-0400 m31100| 2015-07-09T14:14:54.367-0400 I COMMAND [conn35] command db52.reindex_1 command: reIndex { reIndex: "reindex_1" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:588 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 177889 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 339ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.379-0400 m31100| 2015-07-09T14:14:54.377-0400 I QUERY [conn139] getmore db52.reindex_11 query: { $text: { $search: "ipsum" } } cursorid:3450537040223 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 3 }, timeAcquiringMicros: { r: 609466 } }, Collection: { acquireCount: { r: 8 } } } 572ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.380-0400 m31100| 2015-07-09T14:14:54.380-0400 I QUERY [conn86] getmore db52.reindex_8 query: { $text: { $search: "ipsum" } } cursorid:3420596980299 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:9 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 20 } }, Database: { acquireCount: { r: 10 }, acquireWaitCount: { r: 3 }, timeAcquiringMicros: { r: 620929 } }, Collection: { acquireCount: { r: 10 } } } 807ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.386-0400 m31100| 2015-07-09T14:14:54.385-0400 I COMMAND [conn175] command db52.reindex_0 command: listIndexes { listIndexes: "reindex_0" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:638 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 330271 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 348ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.391-0400 m31100| 2015-07-09T14:14:54.386-0400 I QUERY [conn149] getmore db52.reindex_10 query: { $text: { $search: "ipsum" } } cursorid:3407774705089 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:9 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 20 } }, Database: { acquireCount: { r: 10 }, acquireWaitCount: { r: 3 }, timeAcquiringMicros: { r: 608146 } }, Collection: { acquireCount: { r: 10 } } } 766ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.391-0400 m31100| 2015-07-09T14:14:54.390-0400 I COMMAND [conn187] CMD: reIndex db52.reindex_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.394-0400 m31100| 2015-07-09T14:14:54.393-0400 I INDEX [conn39] build index on: db52.reindex_5 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_5" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.394-0400 m31100| 2015-07-09T14:14:54.393-0400 I INDEX [conn39] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.398-0400 m31100| 2015-07-09T14:14:54.398-0400 I INDEX [conn39] build index on: db52.reindex_5 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_5", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.398-0400 m31100| 2015-07-09T14:14:54.398-0400 I INDEX [conn39] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.404-0400 m31100| 2015-07-09T14:14:54.403-0400 I INDEX [conn39] build index on: db52.reindex_5 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_5", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.404-0400 m31100| 2015-07-09T14:14:54.403-0400 I INDEX [conn39] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.409-0400 m31100| 2015-07-09T14:14:54.409-0400 I INDEX [conn39] build index on: db52.reindex_5 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_5" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.409-0400 m31100| 2015-07-09T14:14:54.409-0400 I INDEX [conn39] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.520-0400 m31100| 2015-07-09T14:14:54.520-0400 I INDEX [conn39] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.701-0400 m31100| 2015-07-09T14:14:54.521-0400 I COMMAND [conn39] command db52.reindex_5 command: reIndex { reIndex: "reindex_5" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:588 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 357606 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 491ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.701-0400 m31100| 2015-07-09T14:14:54.526-0400 I INDEX [conn32] build index on: db52.reindex_3 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_3" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.702-0400 m31100| 2015-07-09T14:14:54.526-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.702-0400 m31100| 2015-07-09T14:14:54.530-0400 I INDEX [conn32] build index on: db52.reindex_3 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_3", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.702-0400 m31100| 2015-07-09T14:14:54.530-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.703-0400 m31100| 2015-07-09T14:14:54.537-0400 I INDEX [conn32] build index on: db52.reindex_3 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_3", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.704-0400 m31100| 2015-07-09T14:14:54.537-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.704-0400 m31100| 2015-07-09T14:14:54.545-0400 I INDEX [conn32] build index on: db52.reindex_3 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_3" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.704-0400 m31100| 2015-07-09T14:14:54.545-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.704-0400 m31100| 2015-07-09T14:14:54.678-0400 I INDEX [conn32] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.704-0400 m31100| 2015-07-09T14:14:54.680-0400 I COMMAND [conn32] command db52.reindex_3 command: reIndex { reIndex: "reindex_3" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:588 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 313790 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 472ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.705-0400 m31100| 2015-07-09T14:14:54.681-0400 I COMMAND [conn175] command db52.reindex_8 command: listIndexes { listIndexes: "reindex_8" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:638 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 285616 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 285ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.705-0400 m31100| 2015-07-09T14:14:54.682-0400 I COMMAND [conn181] command db52.reindex_11 command: listIndexes { listIndexes: "reindex_11" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:643 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 291212 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 291ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.706-0400 m31100| 2015-07-09T14:14:54.683-0400 I QUERY [conn42] getmore db52.reindex_9 query: { $text: { $search: "ipsum" } } cursorid:3436501331833 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 4 }, timeAcquiringMicros: { r: 913335 } }, Collection: { acquireCount: { r: 8 } } } 878ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.706-0400 m31100| 2015-07-09T14:14:54.685-0400 I COMMAND [conn45] command db52.reindex_10 command: listIndexes { listIndexes: "reindex_10" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:643 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 288830 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 288ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.706-0400 m31100| 2015-07-09T14:14:54.685-0400 I COMMAND [conn32] CMD: reIndex db52.reindex_11 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.706-0400 m31100| 2015-07-09T14:14:54.687-0400 I COMMAND [conn15] CMD: reIndex db52.reindex_10 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.707-0400 m31100| 2015-07-09T14:14:54.693-0400 I INDEX [conn187] build index on: db52.reindex_0 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_0" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.707-0400 m31100| 2015-07-09T14:14:54.693-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.707-0400 m31100| 2015-07-09T14:14:54.698-0400 I INDEX [conn187] build index on: db52.reindex_0 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_0", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.708-0400 m31100| 2015-07-09T14:14:54.698-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.708-0400 m31100| 2015-07-09T14:14:54.704-0400 I INDEX [conn187] build index on: db52.reindex_0 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_0", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.708-0400 m31100| 2015-07-09T14:14:54.704-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.708-0400 m31100| 2015-07-09T14:14:54.707-0400 I INDEX [conn187] build index on: db52.reindex_0 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_0" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.709-0400 m31100| 2015-07-09T14:14:54.707-0400 I INDEX [conn187] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.812-0400 m31100| 2015-07-09T14:14:54.812-0400 I INDEX [conn187] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.815-0400 m31100| 2015-07-09T14:14:54.813-0400 I COMMAND [conn187] command db52.reindex_0 command: reIndex { reIndex: "reindex_0" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:588 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 297462 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 422ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.815-0400 m31100| 2015-07-09T14:14:54.815-0400 I COMMAND [conn49] command db52.reindex_9 command: listIndexes { listIndexes: "reindex_9" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:638 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 119741 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 119ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.825-0400 m31100| 2015-07-09T14:14:54.824-0400 I INDEX [conn32] build index on: db52.reindex_11 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_11" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.825-0400 m31100| 2015-07-09T14:14:54.824-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.829-0400 m31100| 2015-07-09T14:14:54.828-0400 I INDEX [conn32] build index on: db52.reindex_11 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_11", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.830-0400 m31100| 2015-07-09T14:14:54.828-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.835-0400 m31100| 2015-07-09T14:14:54.834-0400 I INDEX [conn32] build index on: db52.reindex_11 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_11", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.835-0400 m31100| 2015-07-09T14:14:54.834-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.839-0400 m31100| 2015-07-09T14:14:54.839-0400 I INDEX [conn32] build index on: db52.reindex_11 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_11" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.839-0400 m31100| 2015-07-09T14:14:54.839-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.958-0400 m31100| 2015-07-09T14:14:54.957-0400 I INDEX [conn32] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.958-0400 m31100| 2015-07-09T14:14:54.958-0400 I COMMAND [conn32] command db52.reindex_11 command: reIndex { reIndex: "reindex_11" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:592 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 131986 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 272ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.965-0400 m31100| 2015-07-09T14:14:54.964-0400 I INDEX [conn15] build index on: db52.reindex_10 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_10" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.965-0400 m31100| 2015-07-09T14:14:54.965-0400 I INDEX [conn15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.971-0400 m31100| 2015-07-09T14:14:54.969-0400 I INDEX [conn15] build index on: db52.reindex_10 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_10", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.971-0400 m31100| 2015-07-09T14:14:54.970-0400 I INDEX [conn15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.976-0400 m31100| 2015-07-09T14:14:54.975-0400 I INDEX [conn15] build index on: db52.reindex_10 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_10", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.976-0400 m31100| 2015-07-09T14:14:54.975-0400 I INDEX [conn15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.981-0400 m31100| 2015-07-09T14:14:54.981-0400 I INDEX [conn15] build index on: db52.reindex_10 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_10" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:54.981-0400 m31100| 2015-07-09T14:14:54.981-0400 I INDEX [conn15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.092-0400 m31100| 2015-07-09T14:14:55.092-0400 I INDEX [conn15] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.098-0400 m31100| 2015-07-09T14:14:55.092-0400 I COMMAND [conn15] command db52.reindex_10 command: reIndex { reIndex: "reindex_10" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:592 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 271067 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 405ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.106-0400 m31100| 2015-07-09T14:14:55.105-0400 I QUERY [conn139] getmore db52.reindex_13 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:3394463048976 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 401914 } }, Collection: { acquireCount: { r: 8 } } } 290ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.116-0400 m31100| 2015-07-09T14:14:55.112-0400 I QUERY [conn42] getmore db52.reindex_7 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:3433326375574 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 403037 } }, Collection: { acquireCount: { r: 8 } } } 298ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.123-0400 m31100| 2015-07-09T14:14:55.120-0400 I QUERY [conn141] getmore db52.reindex_2 query: { $text: { $search: "ipsum" } } cursorid:3427697460326 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 5 }, timeAcquiringMicros: { r: 1185476 } }, Collection: { acquireCount: { r: 8 } } } 1270ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.284-0400 m31100| 2015-07-09T14:14:55.283-0400 I COMMAND [conn32] CMD: reIndex db52.reindex_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.300-0400 m31100| 2015-07-09T14:14:55.299-0400 I INDEX [conn32] build index on: db52.reindex_1 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_1" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.300-0400 m31100| 2015-07-09T14:14:55.299-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.305-0400 m31100| 2015-07-09T14:14:55.305-0400 I INDEX [conn32] build index on: db52.reindex_1 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_1", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.305-0400 m31100| 2015-07-09T14:14:55.305-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.310-0400 m31100| 2015-07-09T14:14:55.310-0400 I INDEX [conn32] build index on: db52.reindex_1 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_1", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.310-0400 m31100| 2015-07-09T14:14:55.310-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.315-0400 m31100| 2015-07-09T14:14:55.314-0400 I INDEX [conn32] build index on: db52.reindex_1 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_1" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.315-0400 m31100| 2015-07-09T14:14:55.314-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.423-0400 m31100| 2015-07-09T14:14:55.422-0400 I INDEX [conn32] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.429-0400 m31100| 2015-07-09T14:14:55.428-0400 I COMMAND [conn32] command db52.reindex_1 command: reIndex { reIndex: "reindex_1" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:588 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 8810 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 144ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.433-0400 m31100| 2015-07-09T14:14:55.433-0400 I COMMAND [conn32] CMD: reIndex db52.reindex_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.441-0400 m31100| 2015-07-09T14:14:55.440-0400 I QUERY [conn42] getmore db52.reindex_7 query: { $text: { $search: "ipsum" } } cursorid:3433760063051 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 136191 } }, Collection: { acquireCount: { r: 8 } } } 265ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.451-0400 m31100| 2015-07-09T14:14:55.450-0400 I INDEX [conn32] build index on: db52.reindex_1 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_1" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.451-0400 m31100| 2015-07-09T14:14:55.450-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.457-0400 m31100| 2015-07-09T14:14:55.455-0400 I INDEX [conn32] build index on: db52.reindex_1 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_1", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.457-0400 m31100| 2015-07-09T14:14:55.455-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.461-0400 m31100| 2015-07-09T14:14:55.461-0400 I INDEX [conn32] build index on: db52.reindex_1 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_1", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.462-0400 m31100| 2015-07-09T14:14:55.461-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.466-0400 m31100| 2015-07-09T14:14:55.466-0400 I INDEX [conn32] build index on: db52.reindex_1 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_1" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.466-0400 m31100| 2015-07-09T14:14:55.466-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.584-0400 m31100| 2015-07-09T14:14:55.584-0400 I INDEX [conn32] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.593-0400 m31100| 2015-07-09T14:14:55.592-0400 I QUERY [conn148] getmore db52.reindex_13 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:3394642025385 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 141532 } }, Collection: { acquireCount: { r: 8 } } } 157ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.593-0400 m31100| 2015-07-09T14:14:55.593-0400 I COMMAND [conn49] command db52.reindex_7 command: listIndexes { listIndexes: "reindex_7" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:638 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 139864 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 145ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.593-0400 m31100| 2015-07-09T14:14:55.593-0400 I COMMAND [conn32] command db52.reindex_1 command: reIndex { reIndex: "reindex_1" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:588 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 5877 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 160ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.594-0400 m31100| 2015-07-09T14:14:55.594-0400 I COMMAND [conn32] CMD: reIndex db52.reindex_7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.598-0400 m31100| 2015-07-09T14:14:55.597-0400 I QUERY [conn135] getmore db52.reindex_10 query: { $text: { $search: "ipsum" } } cursorid:3406886611485 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 276854 } }, Collection: { acquireCount: { r: 8 } } } 343ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.601-0400 m31100| 2015-07-09T14:14:55.599-0400 I QUERY [conn141] getmore db52.reindex_0 query: { $text: { $search: "ipsum" } } cursorid:3415929632427 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 281564 } }, Collection: { acquireCount: { r: 8 } } } 347ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.602-0400 m31100| 2015-07-09T14:14:55.601-0400 I QUERY [conn134] getmore db52.reindex_11 query: { $text: { $search: "ipsum" } } cursorid:3449875056057 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 276900 } }, Collection: { acquireCount: { r: 8 } } } 394ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.609-0400 m31100| 2015-07-09T14:14:55.608-0400 I QUERY [conn149] getmore db52.reindex_2 query: { $text: { $search: "ipsum" } } cursorid:3428631521445 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 287674 } }, Collection: { acquireCount: { r: 8 } } } 377ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.616-0400 m31100| 2015-07-09T14:14:55.616-0400 I INDEX [conn32] build index on: db52.reindex_7 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_7" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.616-0400 m31100| 2015-07-09T14:14:55.616-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.621-0400 m31100| 2015-07-09T14:14:55.620-0400 I INDEX [conn32] build index on: db52.reindex_7 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_7", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.621-0400 m31100| 2015-07-09T14:14:55.620-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.625-0400 m31100| 2015-07-09T14:14:55.624-0400 I INDEX [conn32] build index on: db52.reindex_7 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_7", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.625-0400 m31100| 2015-07-09T14:14:55.624-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.629-0400 m31100| 2015-07-09T14:14:55.628-0400 I INDEX [conn32] build index on: db52.reindex_7 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_7" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.629-0400 m31100| 2015-07-09T14:14:55.628-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.741-0400 m31100| 2015-07-09T14:14:55.741-0400 I INDEX [conn32] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.749-0400 m31100| 2015-07-09T14:14:55.742-0400 I COMMAND [conn179] command db52.reindex_10 command: listIndexes { listIndexes: "reindex_10" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:643 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 132544 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 134ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.749-0400 m31100| 2015-07-09T14:14:55.742-0400 I COMMAND [conn185] command db52.reindex_11 command: listIndexes { listIndexes: "reindex_11" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:643 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 132539 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 133ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.750-0400 m31100| 2015-07-09T14:14:55.743-0400 I COMMAND [conn45] command db52.reindex_0 command: listIndexes { listIndexes: "reindex_0" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:638 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 132258 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 133ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.750-0400 m31100| 2015-07-09T14:14:55.744-0400 I COMMAND [conn39] CMD: reIndex db52.reindex_11 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.750-0400 m31100| 2015-07-09T14:14:55.744-0400 I COMMAND [conn32] command db52.reindex_7 command: reIndex { reIndex: "reindex_7" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:588 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 7026 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 150ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.750-0400 m31100| 2015-07-09T14:14:55.748-0400 I COMMAND [conn56] command db52.reindex_2 command: listIndexes { listIndexes: "reindex_2" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:638 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 125615 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 131ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.752-0400 m31100| 2015-07-09T14:14:55.751-0400 I COMMAND [conn15] CMD: reIndex db52.reindex_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.754-0400 m31100| 2015-07-09T14:14:55.753-0400 I QUERY [conn137] getmore db52.reindex_3 query: { $text: { $search: "ipsum" } } cursorid:3412338374183 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 3 }, timeAcquiringMicros: { r: 418598 } }, Collection: { acquireCount: { r: 8 } } } 535ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.756-0400 m30999| 2015-07-09T14:14:55.756-0400 I NETWORK [conn333] end connection 127.0.0.1:63755 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.759-0400 m31100| 2015-07-09T14:14:55.759-0400 I INDEX [conn39] build index on: db52.reindex_11 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_11" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.761-0400 m31100| 2015-07-09T14:14:55.759-0400 I INDEX [conn39] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.766-0400 m31100| 2015-07-09T14:14:55.765-0400 I INDEX [conn39] build index on: db52.reindex_11 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_11", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.766-0400 m31100| 2015-07-09T14:14:55.765-0400 I INDEX [conn39] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.770-0400 m31100| 2015-07-09T14:14:55.769-0400 I INDEX [conn39] build index on: db52.reindex_11 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_11", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.770-0400 m31100| 2015-07-09T14:14:55.769-0400 I INDEX [conn39] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.775-0400 m31100| 2015-07-09T14:14:55.774-0400 I INDEX [conn39] build index on: db52.reindex_11 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_11" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.775-0400 m31100| 2015-07-09T14:14:55.775-0400 I INDEX [conn39] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.886-0400 m31100| 2015-07-09T14:14:55.885-0400 I INDEX [conn39] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.889-0400 m31100| 2015-07-09T14:14:55.886-0400 I COMMAND [conn39] command db52.reindex_11 command: reIndex { reIndex: "reindex_11" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:592 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 9824 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 142ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.889-0400 m31100| 2015-07-09T14:14:55.888-0400 I COMMAND [conn177] command db52.reindex_3 command: listIndexes { listIndexes: "reindex_3" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:638 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 123700 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 123ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.898-0400 m31100| 2015-07-09T14:14:55.897-0400 I COMMAND [conn39] CMD: reIndex db52.reindex_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.908-0400 m31100| 2015-07-09T14:14:55.908-0400 I INDEX [conn15] build index on: db52.reindex_2 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_2" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.908-0400 m31100| 2015-07-09T14:14:55.908-0400 I INDEX [conn15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.908-0400 m30998| 2015-07-09T14:14:55.908-0400 I NETWORK [conn333] end connection 127.0.0.1:63759 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.913-0400 m31100| 2015-07-09T14:14:55.912-0400 I INDEX [conn15] build index on: db52.reindex_2 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_2", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.913-0400 m31100| 2015-07-09T14:14:55.912-0400 I INDEX [conn15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.919-0400 m31100| 2015-07-09T14:14:55.918-0400 I INDEX [conn15] build index on: db52.reindex_2 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_2", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.919-0400 m31100| 2015-07-09T14:14:55.919-0400 I INDEX [conn15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.925-0400 m31100| 2015-07-09T14:14:55.924-0400 I INDEX [conn15] build index on: db52.reindex_2 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_2" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:55.925-0400 m31100| 2015-07-09T14:14:55.924-0400 I INDEX [conn15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.030-0400 m31100| 2015-07-09T14:14:56.029-0400 I INDEX [conn15] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.030-0400 m31100| 2015-07-09T14:14:56.030-0400 I COMMAND [conn15] command db52.reindex_2 command: reIndex { reIndex: "reindex_2" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:588 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 148829 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 278ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.034-0400 m31100| 2015-07-09T14:14:56.033-0400 I QUERY [conn86] getmore db52.reindex_8 query: { $text: { $search: "ipsum" } } cursorid:3419834467708 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 5 }, timeAcquiringMicros: { r: 681151 } }, Collection: { acquireCount: { r: 9 } } } 804ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.034-0400 m31100| 2015-07-09T14:14:56.034-0400 I COMMAND [conn15] CMD: reIndex db52.reindex_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.045-0400 m31100| 2015-07-09T14:14:56.045-0400 I INDEX [conn39] build index on: db52.reindex_3 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_3" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.045-0400 m31100| 2015-07-09T14:14:56.045-0400 I INDEX [conn39] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.051-0400 m31100| 2015-07-09T14:14:56.049-0400 I INDEX [conn39] build index on: db52.reindex_3 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_3", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.051-0400 m31100| 2015-07-09T14:14:56.050-0400 I INDEX [conn39] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.055-0400 m31100| 2015-07-09T14:14:56.054-0400 I INDEX [conn39] build index on: db52.reindex_3 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_3", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.056-0400 m31100| 2015-07-09T14:14:56.055-0400 I INDEX [conn39] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.060-0400 m31100| 2015-07-09T14:14:56.059-0400 I INDEX [conn39] build index on: db52.reindex_3 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_3" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.060-0400 m31100| 2015-07-09T14:14:56.059-0400 I INDEX [conn39] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.171-0400 m31100| 2015-07-09T14:14:56.170-0400 I INDEX [conn39] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.173-0400 m31100| 2015-07-09T14:14:56.171-0400 I COMMAND [conn39] command db52.reindex_3 command: reIndex { reIndex: "reindex_3" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:588 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 138834 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 273ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.173-0400 m31100| 2015-07-09T14:14:56.173-0400 I COMMAND [conn45] command db52.reindex_8 command: listIndexes { listIndexes: "reindex_8" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:638 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 126326 } }, Collection: { acquireCount: { r: 1 } } } protocol:op_command 126ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.187-0400 m30998| 2015-07-09T14:14:56.185-0400 I NETWORK [conn330] end connection 127.0.0.1:63753 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.187-0400 m31100| 2015-07-09T14:14:56.187-0400 I INDEX [conn15] build index on: db52.reindex_2 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db52.reindex_2" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.187-0400 m31100| 2015-07-09T14:14:56.187-0400 I INDEX [conn15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.191-0400 m31100| 2015-07-09T14:14:56.190-0400 I INDEX [conn15] build index on: db52.reindex_2 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "text_text", ns: "db52.reindex_2", weights: { text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.191-0400 m31100| 2015-07-09T14:14:56.190-0400 I INDEX [conn15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.197-0400 m31100| 2015-07-09T14:14:56.195-0400 I INDEX [conn15] build index on: db52.reindex_2 properties: { v: 1, key: { geo: "2dsphere" }, name: "geo_2dsphere", ns: "db52.reindex_2", 2dsphereIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.197-0400 m31100| 2015-07-09T14:14:56.195-0400 I INDEX [conn15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.201-0400 m31100| 2015-07-09T14:14:56.200-0400 I INDEX [conn15] build index on: db52.reindex_2 properties: { v: 1, key: { integer: 1.0 }, name: "integer_1", ns: "db52.reindex_2" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.201-0400 m31100| 2015-07-09T14:14:56.200-0400 I INDEX [conn15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.310-0400 m31100| 2015-07-09T14:14:56.310-0400 I INDEX [conn15] build index done. scanned 1000 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.317-0400 m31100| 2015-07-09T14:14:56.311-0400 I COMMAND [conn15] command db52.reindex_2 command: reIndex { reIndex: "reindex_2" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:588 locks:{ Global: { acquireCount: { r: 1, w: 1 } }, Database: { acquireCount: { W: 1 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 146807 } }, Metadata: { acquireCount: { W: 1 } } } protocol:op_query 277ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.322-0400 m31100| 2015-07-09T14:14:56.316-0400 I QUERY [conn176] query db52.reindex_5 query: { $text: { $search: "ipsum" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 } cursorid:3445848544219 ntoreturn:0 ntoskip:0 nscanned:1000 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:101 reslen:24159 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 5 }, timeAcquiringMicros: { r: 690917 } }, Collection: { acquireCount: { r: 9 } } } 574ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.323-0400 m31100| 2015-07-09T14:14:56.319-0400 I QUERY [conn50] query db52.reindex_13 query: { $text: { $search: "ipsum" } } planSummary: IXSCAN { _fts: "text", _ftsx: 1 } cursorid:3393929123106 ntoreturn:0 ntoskip:0 nscanned:1000 nscannedObjects:101 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:101 reslen:24159 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 }, acquireWaitCount: { r: 5 }, timeAcquiringMicros: { r: 694328 } }, Collection: { acquireCount: { r: 9 } } } 575ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.325-0400 m31100| 2015-07-09T14:14:56.323-0400 I QUERY [conn86] getmore db52.reindex_0 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:3416357246151 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 274028 } }, Collection: { acquireCount: { r: 8 } } } 151ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.326-0400 m31100| 2015-07-09T14:14:56.324-0400 I QUERY [conn137] getmore db52.reindex_1 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:3402003238592 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 3 }, timeAcquiringMicros: { r: 409881 } }, Collection: { acquireCount: { r: 8 } } } 293ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.334-0400 m31100| 2015-07-09T14:14:56.332-0400 I QUERY [conn139] getmore db52.reindex_9 query: { $text: { $search: "ipsum" } } cursorid:3437223187298 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 7 }, timeAcquiringMicros: { r: 954224 } }, Collection: { acquireCount: { r: 8 } } } 902ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.336-0400 m31100| 2015-07-09T14:14:56.336-0400 I QUERY [conn134] getmore db52.reindex_7 query: { geo: { $geoWithin: { $geometry: { type: "Polygon", coordinates: [ [ [ -26.0, -26.0 ], [ -26.0, 26.0 ], [ 26.0, 26.0 ], [ 26.0, -26.0 ], [ -26.0, -26.0 ] ] ] } } } } cursorid:3432114866757 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 274931 } }, Collection: { acquireCount: { r: 8 } } } 164ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.400-0400 m30998| 2015-07-09T14:14:56.400-0400 I NETWORK [conn332] end connection 127.0.0.1:63758 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.463-0400 m31100| 2015-07-09T14:14:56.460-0400 I QUERY [conn86] getmore db52.reindex_2 query: { $text: { $search: "ipsum" } } cursorid:3429406330891 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 } }, Collection: { acquireCount: { r: 8 } } } 106ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.474-0400 m31100| 2015-07-09T14:14:56.474-0400 I QUERY [conn138] getmore db52.reindex_5 query: { $text: { $search: "ipsum" } } cursorid:3445848544219 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 } }, Collection: { acquireCount: { r: 9 } } } 146ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.477-0400 m31100| 2015-07-09T14:14:56.477-0400 I QUERY [conn137] getmore db52.reindex_13 query: { $text: { $search: "ipsum" } } cursorid:3393929123106 ntoreturn:0 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:899 reslen:214881 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 } }, Collection: { acquireCount: { r: 9 } } } 138ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.506-0400 m30998| 2015-07-09T14:14:56.505-0400 I NETWORK [conn327] end connection 127.0.0.1:63747 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.515-0400 m30999| 2015-07-09T14:14:56.515-0400 I NETWORK [conn328] end connection 127.0.0.1:63746 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.528-0400 m30998| 2015-07-09T14:14:56.526-0400 I NETWORK [conn328] end connection 127.0.0.1:63750 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.558-0400 m30998| 2015-07-09T14:14:56.558-0400 I NETWORK [conn329] end connection 127.0.0.1:63752 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.574-0400 m30999| 2015-07-09T14:14:56.573-0400 I NETWORK [conn329] end connection 127.0.0.1:63748 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.595-0400 m30999| 2015-07-09T14:14:56.591-0400 I NETWORK [conn334] end connection 127.0.0.1:63756 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.685-0400 m30998| 2015-07-09T14:14:56.685-0400 I NETWORK [conn331] end connection 127.0.0.1:63757 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.707-0400 m30999| 2015-07-09T14:14:56.707-0400 I COMMAND [conn1] DROP: db52.reindex_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.707-0400 m30999| 2015-07-09T14:14:56.707-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.707-0400 m31100| 2015-07-09T14:14:56.707-0400 I COMMAND [conn45] CMD: drop db52.reindex_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.711-0400 m30999| 2015-07-09T14:14:56.711-0400 I COMMAND [conn1] DROP: db52.reindex_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.711-0400 m30999| 2015-07-09T14:14:56.711-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.712-0400 m31100| 2015-07-09T14:14:56.712-0400 I COMMAND [conn45] CMD: drop db52.reindex_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.712-0400 m31101| 2015-07-09T14:14:56.712-0400 I COMMAND [repl writer worker 13] CMD: drop db52.reindex_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.713-0400 m31102| 2015-07-09T14:14:56.712-0400 I COMMAND [repl writer worker 5] CMD: drop db52.reindex_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.715-0400 m30999| 2015-07-09T14:14:56.714-0400 I COMMAND [conn1] DROP: db52.reindex_10 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.715-0400 m30999| 2015-07-09T14:14:56.714-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.715-0400 m31100| 2015-07-09T14:14:56.715-0400 I COMMAND [conn45] CMD: drop db52.reindex_10 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.719-0400 m31102| 2015-07-09T14:14:56.718-0400 I COMMAND [repl writer worker 11] CMD: drop db52.reindex_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.719-0400 m30999| 2015-07-09T14:14:56.719-0400 I COMMAND [conn1] DROP: db52.reindex_11 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.719-0400 m30999| 2015-07-09T14:14:56.719-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.719-0400 m31100| 2015-07-09T14:14:56.719-0400 I COMMAND [conn45] CMD: drop db52.reindex_11 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.721-0400 m30999| 2015-07-09T14:14:56.721-0400 I COMMAND [conn1] DROP: db52.reindex_12 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.722-0400 m30999| 2015-07-09T14:14:56.721-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.722-0400 m31101| 2015-07-09T14:14:56.721-0400 I COMMAND [repl writer worker 0] CMD: drop db52.reindex_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.722-0400 m31100| 2015-07-09T14:14:56.721-0400 I COMMAND [conn45] CMD: drop db52.reindex_12 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.723-0400 m31102| 2015-07-09T14:14:56.723-0400 I COMMAND [repl writer worker 2] CMD: drop db52.reindex_10 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.724-0400 m31101| 2015-07-09T14:14:56.724-0400 I COMMAND [repl writer worker 4] CMD: drop db52.reindex_10 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.725-0400 m30999| 2015-07-09T14:14:56.725-0400 I COMMAND [conn1] DROP: db52.reindex_13 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.725-0400 m30999| 2015-07-09T14:14:56.725-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.725-0400 m31100| 2015-07-09T14:14:56.725-0400 I COMMAND [conn45] CMD: drop db52.reindex_13 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.726-0400 m31102| 2015-07-09T14:14:56.726-0400 I COMMAND [repl writer worker 0] CMD: drop db52.reindex_11 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.727-0400 m31101| 2015-07-09T14:14:56.727-0400 I COMMAND [repl writer worker 15] CMD: drop db52.reindex_11 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.728-0400 m30999| 2015-07-09T14:14:56.727-0400 I COMMAND [conn1] DROP: db52.reindex_14 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.728-0400 m30999| 2015-07-09T14:14:56.728-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.728-0400 m31100| 2015-07-09T14:14:56.728-0400 I COMMAND [conn45] CMD: drop db52.reindex_14 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.729-0400 m31102| 2015-07-09T14:14:56.729-0400 I COMMAND [repl writer worker 8] CMD: drop db52.reindex_12 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.730-0400 m31101| 2015-07-09T14:14:56.730-0400 I COMMAND [repl writer worker 9] CMD: drop db52.reindex_12 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.731-0400 m30999| 2015-07-09T14:14:56.730-0400 I COMMAND [conn1] DROP: db52.reindex_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.731-0400 m30999| 2015-07-09T14:14:56.730-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.731-0400 m31100| 2015-07-09T14:14:56.730-0400 I COMMAND [conn45] CMD: drop db52.reindex_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.732-0400 m31102| 2015-07-09T14:14:56.731-0400 I COMMAND [repl writer worker 12] CMD: drop db52.reindex_13 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.734-0400 m31101| 2015-07-09T14:14:56.733-0400 I COMMAND [repl writer worker 8] CMD: drop db52.reindex_13 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.734-0400 m30999| 2015-07-09T14:14:56.734-0400 I COMMAND [conn1] DROP: db52.reindex_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.734-0400 m30999| 2015-07-09T14:14:56.734-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.735-0400 m31100| 2015-07-09T14:14:56.734-0400 I COMMAND [conn45] CMD: drop db52.reindex_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.735-0400 m31102| 2015-07-09T14:14:56.735-0400 I COMMAND [repl writer worker 1] CMD: drop db52.reindex_14 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.737-0400 m30999| 2015-07-09T14:14:56.737-0400 I COMMAND [conn1] DROP: db52.reindex_4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.737-0400 m30999| 2015-07-09T14:14:56.737-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.737-0400 m31100| 2015-07-09T14:14:56.737-0400 I COMMAND [conn45] CMD: drop db52.reindex_4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.738-0400 m31101| 2015-07-09T14:14:56.737-0400 I COMMAND [repl writer worker 14] CMD: drop db52.reindex_14 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.738-0400 m31102| 2015-07-09T14:14:56.737-0400 I COMMAND [repl writer worker 7] CMD: drop db52.reindex_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.740-0400 m31101| 2015-07-09T14:14:56.740-0400 I COMMAND [repl writer worker 2] CMD: drop db52.reindex_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.740-0400 m31102| 2015-07-09T14:14:56.740-0400 I COMMAND [repl writer worker 10] CMD: drop db52.reindex_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.740-0400 m30999| 2015-07-09T14:14:56.740-0400 I COMMAND [conn1] DROP: db52.reindex_5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.741-0400 m30999| 2015-07-09T14:14:56.740-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.741-0400 m31100| 2015-07-09T14:14:56.740-0400 I COMMAND [conn45] CMD: drop db52.reindex_5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.743-0400 m31102| 2015-07-09T14:14:56.743-0400 I COMMAND [repl writer worker 15] CMD: drop db52.reindex_4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.743-0400 m31101| 2015-07-09T14:14:56.742-0400 I COMMAND [repl writer worker 10] CMD: drop db52.reindex_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.743-0400 m30999| 2015-07-09T14:14:56.743-0400 I COMMAND [conn1] DROP: db52.reindex_6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.743-0400 m30999| 2015-07-09T14:14:56.743-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.743-0400 m31100| 2015-07-09T14:14:56.743-0400 I COMMAND [conn45] CMD: drop db52.reindex_6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.745-0400 m31101| 2015-07-09T14:14:56.744-0400 I COMMAND [repl writer worker 11] CMD: drop db52.reindex_4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.746-0400 m30999| 2015-07-09T14:14:56.745-0400 I COMMAND [conn1] DROP: db52.reindex_7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.746-0400 m30999| 2015-07-09T14:14:56.745-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.746-0400 m31100| 2015-07-09T14:14:56.746-0400 I COMMAND [conn45] CMD: drop db52.reindex_7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.746-0400 m31102| 2015-07-09T14:14:56.746-0400 I COMMAND [repl writer worker 9] CMD: drop db52.reindex_5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.747-0400 m31101| 2015-07-09T14:14:56.747-0400 I COMMAND [repl writer worker 6] CMD: drop db52.reindex_5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.749-0400 m30999| 2015-07-09T14:14:56.749-0400 I COMMAND [conn1] DROP: db52.reindex_8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.749-0400 m30999| 2015-07-09T14:14:56.749-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.749-0400 m31100| 2015-07-09T14:14:56.749-0400 I COMMAND [conn45] CMD: drop db52.reindex_8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.749-0400 m31101| 2015-07-09T14:14:56.749-0400 I COMMAND [repl writer worker 1] CMD: drop db52.reindex_6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.750-0400 m31102| 2015-07-09T14:14:56.749-0400 I COMMAND [repl writer worker 6] CMD: drop db52.reindex_6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.751-0400 m31101| 2015-07-09T14:14:56.751-0400 I COMMAND [repl writer worker 5] CMD: drop db52.reindex_7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.752-0400 m31102| 2015-07-09T14:14:56.752-0400 I COMMAND [repl writer worker 13] CMD: drop db52.reindex_7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.752-0400 m30999| 2015-07-09T14:14:56.752-0400 I COMMAND [conn1] DROP: db52.reindex_9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.753-0400 m30999| 2015-07-09T14:14:56.752-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.753-0400 m31100| 2015-07-09T14:14:56.753-0400 I COMMAND [conn45] CMD: drop db52.reindex_9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.754-0400 m31101| 2015-07-09T14:14:56.754-0400 I COMMAND [repl writer worker 12] CMD: drop db52.reindex_8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.754-0400 m31102| 2015-07-09T14:14:56.754-0400 I COMMAND [repl writer worker 4] CMD: drop db52.reindex_8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.755-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.756-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.756-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.756-0400 jstests/concurrency/fsm_workloads/reindex.js: Workload completed in 15940 ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.756-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.756-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.756-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.756-0400 m30999| 2015-07-09T14:14:56.756-0400 I COMMAND [conn1] DROP: db52.coll52 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.756-0400 m30999| 2015-07-09T14:14:56.756-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:56.756-0400-559eba20ca4787b9985d1e20", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465696756), what: "dropCollection.start", ns: "db52.coll52", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.757-0400 m31101| 2015-07-09T14:14:56.756-0400 I COMMAND [repl writer worker 3] CMD: drop db52.reindex_9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.758-0400 m31102| 2015-07-09T14:14:56.758-0400 I COMMAND [repl writer worker 14] CMD: drop db52.reindex_9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.813-0400 m30999| 2015-07-09T14:14:56.812-0400 I SHARDING [conn1] distributed lock 'db52.coll52/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba20ca4787b9985d1e21 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.814-0400 m31100| 2015-07-09T14:14:56.813-0400 I COMMAND [conn15] CMD: drop db52.coll52 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.817-0400 m31200| 2015-07-09T14:14:56.816-0400 I COMMAND [conn18] CMD: drop db52.coll52 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.818-0400 m31102| 2015-07-09T14:14:56.818-0400 I COMMAND [repl writer worker 3] CMD: drop db52.coll52 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.819-0400 m31101| 2015-07-09T14:14:56.819-0400 I COMMAND [repl writer worker 7] CMD: drop db52.coll52 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.821-0400 m31201| 2015-07-09T14:14:56.821-0400 I COMMAND [repl writer worker 11] CMD: drop db52.coll52 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.822-0400 m31202| 2015-07-09T14:14:56.822-0400 I COMMAND [repl writer worker 8] CMD: drop db52.coll52 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.872-0400 m31100| 2015-07-09T14:14:56.872-0400 I SHARDING [conn15] remotely refreshing metadata for db52.coll52 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559eba10ca4787b9985d1e1e, current metadata version is 2|3||559eba10ca4787b9985d1e1e [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.874-0400 m31100| 2015-07-09T14:14:56.873-0400 W SHARDING [conn15] no chunks found when reloading db52.coll52, previous version was 0|0||559eba10ca4787b9985d1e1e, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.874-0400 m31100| 2015-07-09T14:14:56.874-0400 I SHARDING [conn15] dropping metadata for db52.coll52 at shard version 2|3||559eba10ca4787b9985d1e1e, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.875-0400 m31200| 2015-07-09T14:14:56.875-0400 I SHARDING [conn18] remotely refreshing metadata for db52.coll52 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559eba10ca4787b9985d1e1e, current metadata version is 2|5||559eba10ca4787b9985d1e1e [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.877-0400 m31200| 2015-07-09T14:14:56.876-0400 W SHARDING [conn18] no chunks found when reloading db52.coll52, previous version was 0|0||559eba10ca4787b9985d1e1e, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.877-0400 m31200| 2015-07-09T14:14:56.877-0400 I SHARDING [conn18] dropping metadata for db52.coll52 at shard version 2|5||559eba10ca4787b9985d1e1e, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.878-0400 m30999| 2015-07-09T14:14:56.878-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:56.878-0400-559eba20ca4787b9985d1e22", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465696878), what: "dropCollection", ns: "db52.coll52", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.933-0400 m30999| 2015-07-09T14:14:56.932-0400 I SHARDING [conn1] distributed lock 'db52.coll52/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.989-0400 m30999| 2015-07-09T14:14:56.989-0400 I COMMAND [conn1] DROP DATABASE: db52 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.989-0400 m30999| 2015-07-09T14:14:56.989-0400 I SHARDING [conn1] DBConfig::dropDatabase: db52 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:56.990-0400 m30999| 2015-07-09T14:14:56.989-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:56.989-0400-559eba20ca4787b9985d1e23", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465696989), what: "dropDatabase.start", ns: "db52", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.096-0400 m30999| 2015-07-09T14:14:57.095-0400 I SHARDING [conn1] DBConfig::dropDatabase: db52 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.096-0400 m31100| 2015-07-09T14:14:57.096-0400 I COMMAND [conn160] dropDatabase db52 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.097-0400 m31100| 2015-07-09T14:14:57.096-0400 I COMMAND [conn160] dropDatabase db52 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.098-0400 m30999| 2015-07-09T14:14:57.097-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:57.097-0400-559eba21ca4787b9985d1e24", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465697097), what: "dropDatabase", ns: "db52", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.098-0400 m31102| 2015-07-09T14:14:57.097-0400 I COMMAND [repl writer worker 5] dropDatabase db52 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.098-0400 m31101| 2015-07-09T14:14:57.097-0400 I COMMAND [repl writer worker 13] dropDatabase db52 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.098-0400 m31101| 2015-07-09T14:14:57.097-0400 I COMMAND [repl writer worker 13] dropDatabase db52 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.098-0400 m31102| 2015-07-09T14:14:57.097-0400 I COMMAND [repl writer worker 5] dropDatabase db52 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.190-0400 m31100| 2015-07-09T14:14:57.189-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.194-0400 m31102| 2015-07-09T14:14:57.193-0400 I COMMAND [repl writer worker 0] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.194-0400 m31101| 2015-07-09T14:14:57.194-0400 I COMMAND [repl writer worker 15] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.224-0400 m31200| 2015-07-09T14:14:57.224-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.227-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.227-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.227-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.228-0400 jstests/concurrency/fsm_workloads/indexed_insert_multikey_noindex.js [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.228-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.228-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.228-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.228-0400 m31202| 2015-07-09T14:14:57.228-0400 I COMMAND [repl writer worker 13] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.228-0400 m31201| 2015-07-09T14:14:57.228-0400 I COMMAND [repl writer worker 13] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.234-0400 m30999| 2015-07-09T14:14:57.234-0400 I SHARDING [conn1] distributed lock 'db53/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba21ca4787b9985d1e25 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.238-0400 m30999| 2015-07-09T14:14:57.238-0400 I SHARDING [conn1] Placing [db53] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.238-0400 m30999| 2015-07-09T14:14:57.238-0400 I SHARDING [conn1] Enabling sharding for database [db53] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.293-0400 m30999| 2015-07-09T14:14:57.293-0400 I SHARDING [conn1] distributed lock 'db53/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.316-0400 m31100| 2015-07-09T14:14:57.315-0400 I INDEX [conn29] build index on: db53.coll53 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db53.coll53" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.316-0400 m31100| 2015-07-09T14:14:57.316-0400 I INDEX [conn29] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.324-0400 m31100| 2015-07-09T14:14:57.322-0400 I INDEX [conn29] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.324-0400 m30999| 2015-07-09T14:14:57.323-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db53.coll53", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.327-0400 m30999| 2015-07-09T14:14:57.327-0400 I SHARDING [conn1] distributed lock 'db53.coll53/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba21ca4787b9985d1e26 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.329-0400 m30999| 2015-07-09T14:14:57.328-0400 I SHARDING [conn1] enable sharding on: db53.coll53 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.330-0400 m30999| 2015-07-09T14:14:57.328-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:57.328-0400-559eba21ca4787b9985d1e27", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465697328), what: "shardCollection.start", ns: "db53.coll53", details: { shardKey: { _id: "hashed" }, collection: "db53.coll53", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.340-0400 m31101| 2015-07-09T14:14:57.340-0400 I INDEX [repl writer worker 8] build index on: db53.coll53 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db53.coll53" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.341-0400 m31101| 2015-07-09T14:14:57.340-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.344-0400 m31102| 2015-07-09T14:14:57.343-0400 I INDEX [repl writer worker 12] build index on: db53.coll53 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db53.coll53" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.344-0400 m31102| 2015-07-09T14:14:57.343-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.348-0400 m31101| 2015-07-09T14:14:57.347-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.350-0400 m31102| 2015-07-09T14:14:57.349-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.383-0400 m30999| 2015-07-09T14:14:57.382-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db53.coll53 using new epoch 559eba21ca4787b9985d1e28 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.491-0400 m30999| 2015-07-09T14:14:57.490-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db53.coll53: 1ms sequenceNumber: 235 version: 1|1||559eba21ca4787b9985d1e28 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.547-0400 m30999| 2015-07-09T14:14:57.547-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db53.coll53: 1ms sequenceNumber: 236 version: 1|1||559eba21ca4787b9985d1e28 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.549-0400 m31100| 2015-07-09T14:14:57.549-0400 I SHARDING [conn45] remotely refreshing metadata for db53.coll53 with requested shard version 1|1||559eba21ca4787b9985d1e28, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.551-0400 m31100| 2015-07-09T14:14:57.551-0400 I SHARDING [conn45] collection db53.coll53 was previously unsharded, new metadata loaded with shard version 1|1||559eba21ca4787b9985d1e28 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.551-0400 m31100| 2015-07-09T14:14:57.551-0400 I SHARDING [conn45] collection version was loaded at version 1|1||559eba21ca4787b9985d1e28, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.552-0400 m30999| 2015-07-09T14:14:57.551-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:57.551-0400-559eba21ca4787b9985d1e29", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465697551), what: "shardCollection", ns: "db53.coll53", details: { version: "1|1||559eba21ca4787b9985d1e28" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.606-0400 m30999| 2015-07-09T14:14:57.606-0400 I SHARDING [conn1] distributed lock 'db53.coll53/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.607-0400 m30999| 2015-07-09T14:14:57.607-0400 I SHARDING [conn1] moving chunk ns: db53.coll53 moving ( ns: db53.coll53, shard: test-rs0, lastmod: 1|1||000000000000000000000000, min: { _id: 0 }, max: { _id: MaxKey }) test-rs0 -> test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.608-0400 m31100| 2015-07-09T14:14:57.608-0400 I SHARDING [conn15] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.609-0400 m31100| 2015-07-09T14:14:57.608-0400 I SHARDING [conn15] received moveChunk request: { moveChunk: "db53.coll53", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eba21ca4787b9985d1e28') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.616-0400 m31100| 2015-07-09T14:14:57.616-0400 I SHARDING [conn15] distributed lock 'db53.coll53/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eba21792e00bb67274a20 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.617-0400 m31100| 2015-07-09T14:14:57.616-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:57.616-0400-559eba21792e00bb67274a21", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436465697616), what: "moveChunk.start", ns: "db53.coll53", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.670-0400 m31100| 2015-07-09T14:14:57.669-0400 I SHARDING [conn15] remotely refreshing metadata for db53.coll53 based on current shard version 1|1||559eba21ca4787b9985d1e28, current metadata version is 1|1||559eba21ca4787b9985d1e28 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.672-0400 m31100| 2015-07-09T14:14:57.671-0400 I SHARDING [conn15] metadata of collection db53.coll53 already up to date (shard version : 1|1||559eba21ca4787b9985d1e28, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.673-0400 m31100| 2015-07-09T14:14:57.671-0400 I SHARDING [conn15] moveChunk request accepted at version 1|1||559eba21ca4787b9985d1e28 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.674-0400 m31100| 2015-07-09T14:14:57.673-0400 I SHARDING [conn15] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.674-0400 m31200| 2015-07-09T14:14:57.674-0400 I SHARDING [conn16] remotely refreshing metadata for db53.coll53, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.676-0400 m31200| 2015-07-09T14:14:57.675-0400 I SHARDING [conn16] collection db53.coll53 was previously unsharded, new metadata loaded with shard version 0|0||559eba21ca4787b9985d1e28 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.676-0400 m31200| 2015-07-09T14:14:57.676-0400 I SHARDING [conn16] collection version was loaded at version 1|1||559eba21ca4787b9985d1e28, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.677-0400 m31200| 2015-07-09T14:14:57.676-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: 0 } -> { _id: MaxKey } for collection db53.coll53 from test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 at epoch 559eba21ca4787b9985d1e28 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.679-0400 m31100| 2015-07-09T14:14:57.679-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db53.coll53", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.683-0400 m31100| 2015-07-09T14:14:57.682-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db53.coll53", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.688-0400 m31100| 2015-07-09T14:14:57.688-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db53.coll53", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.693-0400 m31200| 2015-07-09T14:14:57.693-0400 I INDEX [migrateThread] build index on: db53.coll53 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db53.coll53" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.693-0400 m31200| 2015-07-09T14:14:57.693-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.698-0400 m31100| 2015-07-09T14:14:57.698-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db53.coll53", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.699-0400 m31200| 2015-07-09T14:14:57.698-0400 I INDEX [migrateThread] build index on: db53.coll53 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db53.coll53" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.700-0400 m31200| 2015-07-09T14:14:57.699-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.714-0400 m31200| 2015-07-09T14:14:57.714-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.716-0400 m31200| 2015-07-09T14:14:57.715-0400 I SHARDING [migrateThread] Deleter starting delete for: db53.coll53 from { _id: 0 } -> { _id: MaxKey }, with opId: 88226 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.716-0400 m31200| 2015-07-09T14:14:57.716-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db53.coll53 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.717-0400 m31100| 2015-07-09T14:14:57.716-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db53.coll53", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.726-0400 m31201| 2015-07-09T14:14:57.725-0400 I INDEX [repl writer worker 10] build index on: db53.coll53 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db53.coll53" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.727-0400 m31201| 2015-07-09T14:14:57.726-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.729-0400 m31202| 2015-07-09T14:14:57.728-0400 I INDEX [repl writer worker 15] build index on: db53.coll53 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db53.coll53" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.729-0400 m31202| 2015-07-09T14:14:57.728-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.735-0400 m31202| 2015-07-09T14:14:57.735-0400 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.739-0400 m31200| 2015-07-09T14:14:57.738-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.739-0400 m31200| 2015-07-09T14:14:57.738-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db53.coll53' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.739-0400 m31201| 2015-07-09T14:14:57.739-0400 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.750-0400 m31100| 2015-07-09T14:14:57.750-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db53.coll53", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.751-0400 m31100| 2015-07-09T14:14:57.750-0400 I SHARDING [conn15] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.751-0400 m31100| 2015-07-09T14:14:57.751-0400 I SHARDING [conn15] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.751-0400 m31100| 2015-07-09T14:14:57.751-0400 I SHARDING [conn15] moveChunk setting version to: 2|0||559eba21ca4787b9985d1e28 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.762-0400 m31200| 2015-07-09T14:14:57.761-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db53.coll53' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.763-0400 m31200| 2015-07-09T14:14:57.762-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:57.762-0400-559eba21d5a107a5b9c0db55", server: "bs-osx108-8", clientAddr: "", time: new Date(1436465697762), what: "moveChunk.to", ns: "db53.coll53", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 5: 38, step 2 of 5: 21, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 23, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.816-0400 m31100| 2015-07-09T14:14:57.816-0400 I SHARDING [conn15] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db53.coll53", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.817-0400 m31100| 2015-07-09T14:14:57.816-0400 I SHARDING [conn15] moveChunk updating self version to: 2|1||559eba21ca4787b9985d1e28 through { _id: MinKey } -> { _id: 0 } for collection 'db53.coll53' [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.819-0400 m31100| 2015-07-09T14:14:57.818-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:57.818-0400-559eba21792e00bb67274a22", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436465697818), what: "moveChunk.commit", ns: "db53.coll53", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.872-0400 m31100| 2015-07-09T14:14:57.872-0400 I SHARDING [conn15] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.872-0400 m31100| 2015-07-09T14:14:57.872-0400 I SHARDING [conn15] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.872-0400 m31100| 2015-07-09T14:14:57.872-0400 I SHARDING [conn15] Deleter starting delete for: db53.coll53 from { _id: 0 } -> { _id: MaxKey }, with opId: 134369 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.873-0400 m31100| 2015-07-09T14:14:57.872-0400 I SHARDING [conn15] rangeDeleter deleted 0 documents for db53.coll53 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.873-0400 m31100| 2015-07-09T14:14:57.872-0400 I SHARDING [conn15] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.874-0400 m31100| 2015-07-09T14:14:57.874-0400 I SHARDING [conn15] distributed lock 'db53.coll53/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.875-0400 m31100| 2015-07-09T14:14:57.874-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:57.874-0400-559eba21792e00bb67274a23", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436465697874), what: "moveChunk.from", ns: "db53.coll53", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 6: 0, step 2 of 6: 62, step 3 of 6: 5, step 4 of 6: 73, step 5 of 6: 121, step 6 of 6: 0, to: "test-rs1", from: "test-rs0", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.928-0400 m31100| 2015-07-09T14:14:57.927-0400 I COMMAND [conn15] command db53.coll53 command: moveChunk { moveChunk: "db53.coll53", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eba21ca4787b9985d1e28') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 319ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.930-0400 m30999| 2015-07-09T14:14:57.929-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db53.coll53: 0ms sequenceNumber: 237 version: 2|1||559eba21ca4787b9985d1e28 based on: 1|1||559eba21ca4787b9985d1e28 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.932-0400 m31100| 2015-07-09T14:14:57.931-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db53.coll53", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba21ca4787b9985d1e28') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.935-0400 m31100| 2015-07-09T14:14:57.935-0400 I SHARDING [conn15] distributed lock 'db53.coll53/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eba21792e00bb67274a24 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.935-0400 m31100| 2015-07-09T14:14:57.935-0400 I SHARDING [conn15] remotely refreshing metadata for db53.coll53 based on current shard version 2|0||559eba21ca4787b9985d1e28, current metadata version is 2|0||559eba21ca4787b9985d1e28 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.937-0400 m31100| 2015-07-09T14:14:57.937-0400 I SHARDING [conn15] updating metadata for db53.coll53 from shard version 2|0||559eba21ca4787b9985d1e28 to shard version 2|1||559eba21ca4787b9985d1e28 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.937-0400 m31100| 2015-07-09T14:14:57.937-0400 I SHARDING [conn15] collection version was loaded at version 2|1||559eba21ca4787b9985d1e28, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.938-0400 m31100| 2015-07-09T14:14:57.937-0400 I SHARDING [conn15] splitChunk accepted at version 2|1||559eba21ca4787b9985d1e28 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.939-0400 m31100| 2015-07-09T14:14:57.938-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:57.938-0400-559eba21792e00bb67274a25", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436465697938), what: "split", ns: "db53.coll53", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559eba21ca4787b9985d1e28') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559eba21ca4787b9985d1e28') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.994-0400 m31100| 2015-07-09T14:14:57.993-0400 I SHARDING [conn15] distributed lock 'db53.coll53/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.995-0400 m30999| 2015-07-09T14:14:57.995-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db53.coll53: 0ms sequenceNumber: 238 version: 2|3||559eba21ca4787b9985d1e28 based on: 2|1||559eba21ca4787b9985d1e28 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:57.996-0400 m31200| 2015-07-09T14:14:57.995-0400 I SHARDING [conn18] received splitChunk request: { splitChunk: "db53.coll53", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba21ca4787b9985d1e28') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.000-0400 m31200| 2015-07-09T14:14:58.000-0400 I SHARDING [conn18] distributed lock 'db53.coll53/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eba21d5a107a5b9c0db56 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.000-0400 m31200| 2015-07-09T14:14:58.000-0400 I SHARDING [conn18] remotely refreshing metadata for db53.coll53 based on current shard version 0|0||559eba21ca4787b9985d1e28, current metadata version is 1|1||559eba21ca4787b9985d1e28 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.002-0400 m31200| 2015-07-09T14:14:58.001-0400 I SHARDING [conn18] updating metadata for db53.coll53 from shard version 0|0||559eba21ca4787b9985d1e28 to shard version 2|0||559eba21ca4787b9985d1e28 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.002-0400 m31200| 2015-07-09T14:14:58.001-0400 I SHARDING [conn18] collection version was loaded at version 2|3||559eba21ca4787b9985d1e28, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.002-0400 m31200| 2015-07-09T14:14:58.001-0400 I SHARDING [conn18] splitChunk accepted at version 2|0||559eba21ca4787b9985d1e28 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.004-0400 m31200| 2015-07-09T14:14:58.003-0400 I SHARDING [conn18] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:58.003-0400-559eba22d5a107a5b9c0db57", server: "bs-osx108-8", clientAddr: "127.0.0.1:62585", time: new Date(1436465698003), what: "split", ns: "db53.coll53", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559eba21ca4787b9985d1e28') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559eba21ca4787b9985d1e28') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.058-0400 m31200| 2015-07-09T14:14:58.057-0400 I SHARDING [conn18] distributed lock 'db53.coll53/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.060-0400 m30999| 2015-07-09T14:14:58.059-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db53.coll53: 0ms sequenceNumber: 239 version: 2|5||559eba21ca4787b9985d1e28 based on: 2|3||559eba21ca4787b9985d1e28 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.068-0400 m31200| 2015-07-09T14:14:58.068-0400 I INDEX [conn82] build index on: db53.coll53 properties: { v: 1, key: { indexed_insert_multikey: 1.0 }, name: "indexed_insert_multikey_1", ns: "db53.coll53" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.069-0400 m31200| 2015-07-09T14:14:58.068-0400 I INDEX [conn82] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.069-0400 m31100| 2015-07-09T14:14:58.068-0400 I INDEX [conn45] build index on: db53.coll53 properties: { v: 1, key: { indexed_insert_multikey: 1.0 }, name: "indexed_insert_multikey_1", ns: "db53.coll53" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.069-0400 m31100| 2015-07-09T14:14:58.069-0400 I INDEX [conn45] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.071-0400 m31200| 2015-07-09T14:14:58.071-0400 I INDEX [conn82] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.084-0400 m31100| 2015-07-09T14:14:58.083-0400 I INDEX [conn45] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.084-0400 m31201| 2015-07-09T14:14:58.083-0400 I INDEX [repl writer worker 15] build index on: db53.coll53 properties: { v: 1, key: { indexed_insert_multikey: 1.0 }, name: "indexed_insert_multikey_1", ns: "db53.coll53" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.084-0400 m31201| 2015-07-09T14:14:58.083-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.086-0400 m31202| 2015-07-09T14:14:58.085-0400 I INDEX [repl writer worker 6] build index on: db53.coll53 properties: { v: 1, key: { indexed_insert_multikey: 1.0 }, name: "indexed_insert_multikey_1", ns: "db53.coll53" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.087-0400 m31100| 2015-07-09T14:14:58.085-0400 I COMMAND [conn15] CMD: dropIndexes db53.coll53 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.087-0400 m31202| 2015-07-09T14:14:58.085-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.087-0400 m31200| 2015-07-09T14:14:58.086-0400 I COMMAND [conn18] CMD: dropIndexes db53.coll53 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.089-0400 Using 20 threads (requested 20) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.139-0400 m31201| 2015-07-09T14:14:58.138-0400 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.151-0400 m31202| 2015-07-09T14:14:58.148-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.152-0400 m31202| 2015-07-09T14:14:58.151-0400 I COMMAND [repl writer worker 3] CMD: dropIndexes db53.coll53 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.155-0400 m31102| 2015-07-09T14:14:58.153-0400 I INDEX [repl writer worker 1] build index on: db53.coll53 properties: { v: 1, key: { indexed_insert_multikey: 1.0 }, name: "indexed_insert_multikey_1", ns: "db53.coll53" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.155-0400 m31102| 2015-07-09T14:14:58.153-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.155-0400 m31101| 2015-07-09T14:14:58.154-0400 I INDEX [repl writer worker 14] build index on: db53.coll53 properties: { v: 1, key: { indexed_insert_multikey: 1.0 }, name: "indexed_insert_multikey_1", ns: "db53.coll53" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.156-0400 m31101| 2015-07-09T14:14:58.154-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.156-0400 m31201| 2015-07-09T14:14:58.154-0400 I COMMAND [repl writer worker 0] CMD: dropIndexes db53.coll53 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.227-0400 m30999| 2015-07-09T14:14:58.226-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63763 #335 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.233-0400 m31101| 2015-07-09T14:14:58.232-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.246-0400 m31102| 2015-07-09T14:14:58.243-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.250-0400 m30998| 2015-07-09T14:14:58.247-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63764 #334 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.258-0400 m31101| 2015-07-09T14:14:58.254-0400 I COMMAND [repl writer worker 2] CMD: dropIndexes db53.coll53 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.259-0400 m30998| 2015-07-09T14:14:58.258-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63765 #335 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.266-0400 m31102| 2015-07-09T14:14:58.266-0400 I COMMAND [repl writer worker 7] CMD: dropIndexes db53.coll53 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.294-0400 m30999| 2015-07-09T14:14:58.293-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63766 #336 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.294-0400 m30998| 2015-07-09T14:14:58.293-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63767 #336 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.304-0400 m30998| 2015-07-09T14:14:58.304-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63768 #337 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.323-0400 m30998| 2015-07-09T14:14:58.322-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63769 #338 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.344-0400 m30999| 2015-07-09T14:14:58.342-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63770 #337 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.370-0400 m30999| 2015-07-09T14:14:58.370-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63771 #338 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.371-0400 m30998| 2015-07-09T14:14:58.370-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63772 #339 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.384-0400 m30998| 2015-07-09T14:14:58.384-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63773 #340 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.385-0400 m30999| 2015-07-09T14:14:58.384-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63774 #339 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.389-0400 m30998| 2015-07-09T14:14:58.389-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63775 #341 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.395-0400 m30999| 2015-07-09T14:14:58.394-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63776 #340 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.398-0400 m30999| 2015-07-09T14:14:58.397-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63777 #341 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.399-0400 m30998| 2015-07-09T14:14:58.398-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63778 #342 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.399-0400 m30998| 2015-07-09T14:14:58.399-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63781 #343 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.400-0400 m30999| 2015-07-09T14:14:58.399-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63779 #342 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.400-0400 m30999| 2015-07-09T14:14:58.400-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63780 #343 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.408-0400 m30999| 2015-07-09T14:14:58.407-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63782 #344 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.425-0400 setting random seed: 9496802054345 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.425-0400 setting random seed: 1389649710617 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.427-0400 setting random seed: 6604473707266 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.427-0400 setting random seed: 5842697229236 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.427-0400 setting random seed: 4230283116921 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.430-0400 setting random seed: 9952057488262 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.435-0400 setting random seed: 5428687571547 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.436-0400 m30998| 2015-07-09T14:14:58.434-0400 I SHARDING [conn336] ChunkManager: time to load chunks for db53.coll53: 0ms sequenceNumber: 64 version: 2|5||559eba21ca4787b9985d1e28 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.436-0400 setting random seed: 1878740135580 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.436-0400 setting random seed: 5985473399050 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.441-0400 setting random seed: 1680222335271 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.444-0400 setting random seed: 6254175417125 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.448-0400 setting random seed: 9152531945146 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.449-0400 setting random seed: 7349418704397 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.458-0400 setting random seed: 7851464683189 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.462-0400 setting random seed: 6947649936191 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.496-0400 setting random seed: 2752201710827 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.496-0400 setting random seed: 3926284569315 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.522-0400 setting random seed: 8212377498857 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.524-0400 setting random seed: 9064787211827 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.524-0400 setting random seed: 8368379208259 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.932-0400 m30999| 2015-07-09T14:14:58.931-0400 I NETWORK [conn335] end connection 127.0.0.1:63763 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.944-0400 m30998| 2015-07-09T14:14:58.944-0400 I NETWORK [conn334] end connection 127.0.0.1:63764 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.957-0400 m30998| 2015-07-09T14:14:58.956-0400 I NETWORK [conn336] end connection 127.0.0.1:63767 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:58.999-0400 m30998| 2015-07-09T14:14:58.998-0400 I NETWORK [conn335] end connection 127.0.0.1:63765 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.024-0400 m30998| 2015-07-09T14:14:59.024-0400 I NETWORK [conn337] end connection 127.0.0.1:63768 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.077-0400 m30998| 2015-07-09T14:14:59.077-0400 I NETWORK [conn338] end connection 127.0.0.1:63769 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.102-0400 m30999| 2015-07-09T14:14:59.101-0400 I NETWORK [conn336] end connection 127.0.0.1:63766 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.104-0400 m30998| 2015-07-09T14:14:59.104-0400 I NETWORK [conn343] end connection 127.0.0.1:63781 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.146-0400 m30998| 2015-07-09T14:14:59.146-0400 I NETWORK [conn339] end connection 127.0.0.1:63772 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.199-0400 m30999| 2015-07-09T14:14:59.199-0400 I NETWORK [conn339] end connection 127.0.0.1:63774 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.208-0400 m30998| 2015-07-09T14:14:59.207-0400 I NETWORK [conn341] end connection 127.0.0.1:63775 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.221-0400 m30999| 2015-07-09T14:14:59.221-0400 I NETWORK [conn337] end connection 127.0.0.1:63770 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.224-0400 m30998| 2015-07-09T14:14:59.224-0400 I NETWORK [conn340] end connection 127.0.0.1:63773 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.250-0400 m30999| 2015-07-09T14:14:59.249-0400 I NETWORK [conn338] end connection 127.0.0.1:63771 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.278-0400 m30999| 2015-07-09T14:14:59.278-0400 I NETWORK [conn341] end connection 127.0.0.1:63777 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.280-0400 m30999| 2015-07-09T14:14:59.280-0400 I NETWORK [conn344] end connection 127.0.0.1:63782 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.283-0400 m30999| 2015-07-09T14:14:59.281-0400 I NETWORK [conn343] end connection 127.0.0.1:63780 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.291-0400 m30999| 2015-07-09T14:14:59.289-0400 I NETWORK [conn340] end connection 127.0.0.1:63776 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.302-0400 m30999| 2015-07-09T14:14:59.302-0400 I NETWORK [conn342] end connection 127.0.0.1:63779 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.366-0400 m30998| 2015-07-09T14:14:59.365-0400 I NETWORK [conn342] end connection 127.0.0.1:63778 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.390-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.390-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.390-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.390-0400 jstests/concurrency/fsm_workloads/indexed_insert_multikey_noindex.js: Workload completed in 1301 ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.390-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.390-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.390-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.390-0400 m30999| 2015-07-09T14:14:59.390-0400 I COMMAND [conn1] DROP: db53.coll53 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.391-0400 m30999| 2015-07-09T14:14:59.390-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:59.390-0400-559eba23ca4787b9985d1e2a", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465699390), what: "dropCollection.start", ns: "db53.coll53", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.448-0400 m30999| 2015-07-09T14:14:59.447-0400 I SHARDING [conn1] distributed lock 'db53.coll53/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba23ca4787b9985d1e2b [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.449-0400 m31100| 2015-07-09T14:14:59.449-0400 I COMMAND [conn15] CMD: drop db53.coll53 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.453-0400 m31200| 2015-07-09T14:14:59.452-0400 I COMMAND [conn18] CMD: drop db53.coll53 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.454-0400 m31102| 2015-07-09T14:14:59.453-0400 I COMMAND [repl writer worker 11] CMD: drop db53.coll53 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.454-0400 m31101| 2015-07-09T14:14:59.454-0400 I COMMAND [repl writer worker 2] CMD: drop db53.coll53 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.457-0400 m31201| 2015-07-09T14:14:59.456-0400 I COMMAND [repl writer worker 3] CMD: drop db53.coll53 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.457-0400 m31202| 2015-07-09T14:14:59.457-0400 I COMMAND [repl writer worker 6] CMD: drop db53.coll53 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.509-0400 m31100| 2015-07-09T14:14:59.508-0400 I SHARDING [conn15] remotely refreshing metadata for db53.coll53 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559eba21ca4787b9985d1e28, current metadata version is 2|3||559eba21ca4787b9985d1e28 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.510-0400 m31100| 2015-07-09T14:14:59.510-0400 W SHARDING [conn15] no chunks found when reloading db53.coll53, previous version was 0|0||559eba21ca4787b9985d1e28, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.511-0400 m31100| 2015-07-09T14:14:59.510-0400 I SHARDING [conn15] dropping metadata for db53.coll53 at shard version 2|3||559eba21ca4787b9985d1e28, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.512-0400 m31200| 2015-07-09T14:14:59.512-0400 I SHARDING [conn18] remotely refreshing metadata for db53.coll53 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559eba21ca4787b9985d1e28, current metadata version is 2|5||559eba21ca4787b9985d1e28 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.514-0400 m31200| 2015-07-09T14:14:59.513-0400 W SHARDING [conn18] no chunks found when reloading db53.coll53, previous version was 0|0||559eba21ca4787b9985d1e28, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.514-0400 m31200| 2015-07-09T14:14:59.513-0400 I SHARDING [conn18] dropping metadata for db53.coll53 at shard version 2|5||559eba21ca4787b9985d1e28, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.515-0400 m30999| 2015-07-09T14:14:59.514-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:59.514-0400-559eba23ca4787b9985d1e2c", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465699514), what: "dropCollection", ns: "db53.coll53", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.570-0400 m30999| 2015-07-09T14:14:59.569-0400 I SHARDING [conn1] distributed lock 'db53.coll53/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.625-0400 m30999| 2015-07-09T14:14:59.625-0400 I COMMAND [conn1] DROP DATABASE: db53 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.625-0400 m30999| 2015-07-09T14:14:59.625-0400 I SHARDING [conn1] DBConfig::dropDatabase: db53 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.626-0400 m30999| 2015-07-09T14:14:59.625-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:59.625-0400-559eba23ca4787b9985d1e2d", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465699625), what: "dropDatabase.start", ns: "db53", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.733-0400 m30999| 2015-07-09T14:14:59.732-0400 I SHARDING [conn1] DBConfig::dropDatabase: db53 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.733-0400 m31100| 2015-07-09T14:14:59.733-0400 I COMMAND [conn160] dropDatabase db53 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.733-0400 m31100| 2015-07-09T14:14:59.733-0400 I COMMAND [conn160] dropDatabase db53 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.734-0400 m30999| 2015-07-09T14:14:59.733-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:59.733-0400-559eba23ca4787b9985d1e2e", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465699733), what: "dropDatabase", ns: "db53", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.735-0400 m31101| 2015-07-09T14:14:59.734-0400 I COMMAND [repl writer worker 13] dropDatabase db53 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.735-0400 m31101| 2015-07-09T14:14:59.734-0400 I COMMAND [repl writer worker 13] dropDatabase db53 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.735-0400 m31102| 2015-07-09T14:14:59.735-0400 I COMMAND [repl writer worker 5] dropDatabase db53 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.735-0400 m31102| 2015-07-09T14:14:59.735-0400 I COMMAND [repl writer worker 5] dropDatabase db53 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.821-0400 m31100| 2015-07-09T14:14:59.821-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.825-0400 m31101| 2015-07-09T14:14:59.825-0400 I COMMAND [repl writer worker 5] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.826-0400 m31102| 2015-07-09T14:14:59.825-0400 I COMMAND [repl writer worker 0] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.859-0400 m31200| 2015-07-09T14:14:59.858-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.862-0400 m31202| 2015-07-09T14:14:59.862-0400 I COMMAND [repl writer worker 13] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.863-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.863-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.863-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.863-0400 jstests/concurrency/fsm_workloads/indexed_insert_base_capped.js [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.863-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.863-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.863-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.864-0400 m31201| 2015-07-09T14:14:59.862-0400 I COMMAND [repl writer worker 5] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.871-0400 m30999| 2015-07-09T14:14:59.870-0400 I SHARDING [conn1] distributed lock 'db54/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba23ca4787b9985d1e2f [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.875-0400 m30999| 2015-07-09T14:14:59.875-0400 I SHARDING [conn1] Placing [db54] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.876-0400 m30999| 2015-07-09T14:14:59.875-0400 I SHARDING [conn1] Enabling sharding for database [db54] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.930-0400 m30999| 2015-07-09T14:14:59.929-0400 I SHARDING [conn1] distributed lock 'db54/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.954-0400 m31100| 2015-07-09T14:14:59.954-0400 I INDEX [conn23] build index on: db54.coll54 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db54.coll54" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.954-0400 m31100| 2015-07-09T14:14:59.954-0400 I INDEX [conn23] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.966-0400 m31100| 2015-07-09T14:14:59.966-0400 I INDEX [conn23] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.968-0400 m30999| 2015-07-09T14:14:59.967-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db54.coll54", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.971-0400 m30999| 2015-07-09T14:14:59.970-0400 I SHARDING [conn1] distributed lock 'db54.coll54/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba23ca4787b9985d1e30 [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.971-0400 m30999| 2015-07-09T14:14:59.971-0400 I SHARDING [conn1] enable sharding on: db54.coll54 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.972-0400 m30999| 2015-07-09T14:14:59.971-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:14:59.971-0400-559eba23ca4787b9985d1e31", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465699971), what: "shardCollection.start", ns: "db54.coll54", details: { shardKey: { _id: "hashed" }, collection: "db54.coll54", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.976-0400 m31101| 2015-07-09T14:14:59.976-0400 I INDEX [repl writer worker 12] build index on: db54.coll54 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db54.coll54" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.976-0400 m31101| 2015-07-09T14:14:59.976-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.979-0400 m31102| 2015-07-09T14:14:59.978-0400 I INDEX [repl writer worker 6] build index on: db54.coll54 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db54.coll54" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.979-0400 m31102| 2015-07-09T14:14:59.978-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.984-0400 m31102| 2015-07-09T14:14:59.984-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:14:59.985-0400 m31101| 2015-07-09T14:14:59.984-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.025-0400 m30999| 2015-07-09T14:15:00.025-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db54.coll54 using new epoch 559eba24ca4787b9985d1e32 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.134-0400 m30999| 2015-07-09T14:15:00.133-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db54.coll54: 0ms sequenceNumber: 240 version: 1|1||559eba24ca4787b9985d1e32 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.190-0400 m30999| 2015-07-09T14:15:00.189-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db54.coll54: 0ms sequenceNumber: 241 version: 1|1||559eba24ca4787b9985d1e32 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.192-0400 m31100| 2015-07-09T14:15:00.191-0400 I SHARDING [conn56] remotely refreshing metadata for db54.coll54 with requested shard version 1|1||559eba24ca4787b9985d1e32, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.194-0400 m31100| 2015-07-09T14:15:00.193-0400 I SHARDING [conn56] collection db54.coll54 was previously unsharded, new metadata loaded with shard version 1|1||559eba24ca4787b9985d1e32 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.194-0400 m31100| 2015-07-09T14:15:00.193-0400 I SHARDING [conn56] collection version was loaded at version 1|1||559eba24ca4787b9985d1e32, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.194-0400 m30999| 2015-07-09T14:15:00.193-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:00.193-0400-559eba24ca4787b9985d1e33", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465700193), what: "shardCollection", ns: "db54.coll54", details: { version: "1|1||559eba24ca4787b9985d1e32" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.249-0400 m30999| 2015-07-09T14:15:00.248-0400 I SHARDING [conn1] distributed lock 'db54.coll54/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.250-0400 m30999| 2015-07-09T14:15:00.249-0400 I SHARDING [conn1] moving chunk ns: db54.coll54 moving ( ns: db54.coll54, shard: test-rs0, lastmod: 1|1||000000000000000000000000, min: { _id: 0 }, max: { _id: MaxKey }) test-rs0 -> test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.250-0400 m31100| 2015-07-09T14:15:00.250-0400 I SHARDING [conn15] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.251-0400 m31100| 2015-07-09T14:15:00.251-0400 I SHARDING [conn15] received moveChunk request: { moveChunk: "db54.coll54", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eba24ca4787b9985d1e32') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.255-0400 m31100| 2015-07-09T14:15:00.254-0400 I SHARDING [conn15] distributed lock 'db54.coll54/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eba24792e00bb67274a27 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.255-0400 m31100| 2015-07-09T14:15:00.255-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:00.255-0400-559eba24792e00bb67274a28", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436465700255), what: "moveChunk.start", ns: "db54.coll54", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.308-0400 m31100| 2015-07-09T14:15:00.308-0400 I SHARDING [conn15] remotely refreshing metadata for db54.coll54 based on current shard version 1|1||559eba24ca4787b9985d1e32, current metadata version is 1|1||559eba24ca4787b9985d1e32 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.310-0400 m31100| 2015-07-09T14:15:00.309-0400 I SHARDING [conn15] metadata of collection db54.coll54 already up to date (shard version : 1|1||559eba24ca4787b9985d1e32, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.310-0400 m31100| 2015-07-09T14:15:00.309-0400 I SHARDING [conn15] moveChunk request accepted at version 1|1||559eba24ca4787b9985d1e32 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.310-0400 m31100| 2015-07-09T14:15:00.310-0400 I SHARDING [conn15] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.311-0400 m31200| 2015-07-09T14:15:00.311-0400 I SHARDING [conn16] remotely refreshing metadata for db54.coll54, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.313-0400 m31200| 2015-07-09T14:15:00.313-0400 I SHARDING [conn16] collection db54.coll54 was previously unsharded, new metadata loaded with shard version 0|0||559eba24ca4787b9985d1e32 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.313-0400 m31200| 2015-07-09T14:15:00.313-0400 I SHARDING [conn16] collection version was loaded at version 1|1||559eba24ca4787b9985d1e32, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.314-0400 m31200| 2015-07-09T14:15:00.313-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: 0 } -> { _id: MaxKey } for collection db54.coll54 from test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 at epoch 559eba24ca4787b9985d1e32 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.316-0400 m31100| 2015-07-09T14:15:00.315-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db54.coll54", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.319-0400 m31100| 2015-07-09T14:15:00.319-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db54.coll54", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.325-0400 m31100| 2015-07-09T14:15:00.324-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db54.coll54", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.331-0400 m31200| 2015-07-09T14:15:00.330-0400 I INDEX [migrateThread] build index on: db54.coll54 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db54.coll54" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.331-0400 m31200| 2015-07-09T14:15:00.331-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.335-0400 m31100| 2015-07-09T14:15:00.334-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db54.coll54", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.337-0400 m31200| 2015-07-09T14:15:00.337-0400 I INDEX [migrateThread] build index on: db54.coll54 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db54.coll54" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.338-0400 m31200| 2015-07-09T14:15:00.337-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.352-0400 m31100| 2015-07-09T14:15:00.351-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db54.coll54", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.354-0400 m31200| 2015-07-09T14:15:00.353-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.354-0400 m31200| 2015-07-09T14:15:00.354-0400 I SHARDING [migrateThread] Deleter starting delete for: db54.coll54 from { _id: 0 } -> { _id: MaxKey }, with opId: 89852 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.354-0400 m31200| 2015-07-09T14:15:00.354-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db54.coll54 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.366-0400 m31202| 2015-07-09T14:15:00.365-0400 I INDEX [repl writer worker 15] build index on: db54.coll54 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db54.coll54" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.366-0400 m31202| 2015-07-09T14:15:00.365-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.367-0400 m31201| 2015-07-09T14:15:00.366-0400 I INDEX [repl writer worker 11] build index on: db54.coll54 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db54.coll54" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.368-0400 m31201| 2015-07-09T14:15:00.366-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.373-0400 m31202| 2015-07-09T14:15:00.372-0400 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.373-0400 m31201| 2015-07-09T14:15:00.373-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.375-0400 m31200| 2015-07-09T14:15:00.375-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.375-0400 m31200| 2015-07-09T14:15:00.375-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db54.coll54' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.385-0400 m31100| 2015-07-09T14:15:00.385-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db54.coll54", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.386-0400 m31100| 2015-07-09T14:15:00.385-0400 I SHARDING [conn15] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.386-0400 m31100| 2015-07-09T14:15:00.386-0400 I SHARDING [conn15] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.386-0400 m31100| 2015-07-09T14:15:00.386-0400 I SHARDING [conn15] moveChunk setting version to: 2|0||559eba24ca4787b9985d1e32 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.387-0400 m31200| 2015-07-09T14:15:00.387-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db54.coll54' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.388-0400 m31200| 2015-07-09T14:15:00.387-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:00.387-0400-559eba24d5a107a5b9c0db58", server: "bs-osx108-8", clientAddr: "", time: new Date(1436465700387), what: "moveChunk.to", ns: "db54.coll54", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 5: 40, step 2 of 5: 19, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 12, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.440-0400 m31100| 2015-07-09T14:15:00.440-0400 I SHARDING [conn15] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db54.coll54", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.441-0400 m31100| 2015-07-09T14:15:00.440-0400 I SHARDING [conn15] moveChunk updating self version to: 2|1||559eba24ca4787b9985d1e32 through { _id: MinKey } -> { _id: 0 } for collection 'db54.coll54' [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.443-0400 m31100| 2015-07-09T14:15:00.442-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:00.442-0400-559eba24792e00bb67274a29", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436465700442), what: "moveChunk.commit", ns: "db54.coll54", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.496-0400 m31100| 2015-07-09T14:15:00.496-0400 I SHARDING [conn15] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.496-0400 m31100| 2015-07-09T14:15:00.496-0400 I SHARDING [conn15] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.497-0400 m31100| 2015-07-09T14:15:00.496-0400 I SHARDING [conn15] Deleter starting delete for: db54.coll54 from { _id: 0 } -> { _id: MaxKey }, with opId: 135980 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.497-0400 m31100| 2015-07-09T14:15:00.496-0400 I SHARDING [conn15] rangeDeleter deleted 0 documents for db54.coll54 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.497-0400 m31100| 2015-07-09T14:15:00.496-0400 I SHARDING [conn15] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.498-0400 m31100| 2015-07-09T14:15:00.497-0400 I SHARDING [conn15] distributed lock 'db54.coll54/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.498-0400 m31100| 2015-07-09T14:15:00.497-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:00.497-0400-559eba24792e00bb67274a2a", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436465700497), what: "moveChunk.from", ns: "db54.coll54", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 6: 0, step 2 of 6: 58, step 3 of 6: 3, step 4 of 6: 71, step 5 of 6: 111, step 6 of 6: 0, to: "test-rs1", from: "test-rs0", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.551-0400 m31100| 2015-07-09T14:15:00.550-0400 I COMMAND [conn15] command db54.coll54 command: moveChunk { moveChunk: "db54.coll54", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eba24ca4787b9985d1e32') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 300ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.553-0400 m30999| 2015-07-09T14:15:00.553-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db54.coll54: 0ms sequenceNumber: 242 version: 2|1||559eba24ca4787b9985d1e32 based on: 1|1||559eba24ca4787b9985d1e32 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.554-0400 m31100| 2015-07-09T14:15:00.554-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db54.coll54", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba24ca4787b9985d1e32') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.558-0400 m31100| 2015-07-09T14:15:00.558-0400 I SHARDING [conn15] distributed lock 'db54.coll54/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eba24792e00bb67274a2b [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.558-0400 m31100| 2015-07-09T14:15:00.558-0400 I SHARDING [conn15] remotely refreshing metadata for db54.coll54 based on current shard version 2|0||559eba24ca4787b9985d1e32, current metadata version is 2|0||559eba24ca4787b9985d1e32 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.559-0400 m31100| 2015-07-09T14:15:00.559-0400 I SHARDING [conn15] updating metadata for db54.coll54 from shard version 2|0||559eba24ca4787b9985d1e32 to shard version 2|1||559eba24ca4787b9985d1e32 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.560-0400 m31100| 2015-07-09T14:15:00.559-0400 I SHARDING [conn15] collection version was loaded at version 2|1||559eba24ca4787b9985d1e32, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.560-0400 m31100| 2015-07-09T14:15:00.559-0400 I SHARDING [conn15] splitChunk accepted at version 2|1||559eba24ca4787b9985d1e32 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.561-0400 m31100| 2015-07-09T14:15:00.560-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:00.560-0400-559eba24792e00bb67274a2c", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436465700560), what: "split", ns: "db54.coll54", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559eba24ca4787b9985d1e32') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559eba24ca4787b9985d1e32') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.617-0400 m31100| 2015-07-09T14:15:00.616-0400 I SHARDING [conn15] distributed lock 'db54.coll54/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.618-0400 m30999| 2015-07-09T14:15:00.618-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db54.coll54: 0ms sequenceNumber: 243 version: 2|3||559eba24ca4787b9985d1e32 based on: 2|1||559eba24ca4787b9985d1e32 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.619-0400 m31200| 2015-07-09T14:15:00.619-0400 I SHARDING [conn18] received splitChunk request: { splitChunk: "db54.coll54", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba24ca4787b9985d1e32') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.623-0400 m31200| 2015-07-09T14:15:00.622-0400 I SHARDING [conn18] distributed lock 'db54.coll54/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eba24d5a107a5b9c0db59 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.623-0400 m31200| 2015-07-09T14:15:00.622-0400 I SHARDING [conn18] remotely refreshing metadata for db54.coll54 based on current shard version 0|0||559eba24ca4787b9985d1e32, current metadata version is 1|1||559eba24ca4787b9985d1e32 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.624-0400 m31200| 2015-07-09T14:15:00.624-0400 I SHARDING [conn18] updating metadata for db54.coll54 from shard version 0|0||559eba24ca4787b9985d1e32 to shard version 2|0||559eba24ca4787b9985d1e32 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.624-0400 m31200| 2015-07-09T14:15:00.624-0400 I SHARDING [conn18] collection version was loaded at version 2|3||559eba24ca4787b9985d1e32, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.625-0400 m31200| 2015-07-09T14:15:00.624-0400 I SHARDING [conn18] splitChunk accepted at version 2|0||559eba24ca4787b9985d1e32 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.626-0400 m31200| 2015-07-09T14:15:00.625-0400 I SHARDING [conn18] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:00.625-0400-559eba24d5a107a5b9c0db5a", server: "bs-osx108-8", clientAddr: "127.0.0.1:62585", time: new Date(1436465700625), what: "split", ns: "db54.coll54", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559eba24ca4787b9985d1e32') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559eba24ca4787b9985d1e32') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.680-0400 m31200| 2015-07-09T14:15:00.679-0400 I SHARDING [conn18] distributed lock 'db54.coll54/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.682-0400 m30999| 2015-07-09T14:15:00.681-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db54.coll54: 0ms sequenceNumber: 244 version: 2|5||559eba24ca4787b9985d1e32 based on: 2|3||559eba24ca4787b9985d1e32 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.684-0400 m30999| 2015-07-09T14:15:00.683-0400 I COMMAND [conn1] DROP: db54.coll54 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.684-0400 m30999| 2015-07-09T14:15:00.684-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:00.684-0400-559eba24ca4787b9985d1e34", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465700684), what: "dropCollection.start", ns: "db54.coll54", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.742-0400 m30999| 2015-07-09T14:15:00.741-0400 I SHARDING [conn1] distributed lock 'db54.coll54/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba24ca4787b9985d1e35 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.743-0400 m31100| 2015-07-09T14:15:00.743-0400 I COMMAND [conn15] CMD: drop db54.coll54 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.746-0400 m31200| 2015-07-09T14:15:00.746-0400 I COMMAND [conn18] CMD: drop db54.coll54 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.747-0400 m31101| 2015-07-09T14:15:00.747-0400 I COMMAND [repl writer worker 10] CMD: drop db54.coll54 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.747-0400 m31102| 2015-07-09T14:15:00.747-0400 I COMMAND [repl writer worker 4] CMD: drop db54.coll54 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.750-0400 m31201| 2015-07-09T14:15:00.749-0400 I COMMAND [repl writer worker 9] CMD: drop db54.coll54 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.750-0400 m31202| 2015-07-09T14:15:00.750-0400 I COMMAND [repl writer worker 10] CMD: drop db54.coll54 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.802-0400 m31100| 2015-07-09T14:15:00.801-0400 I SHARDING [conn15] remotely refreshing metadata for db54.coll54 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559eba24ca4787b9985d1e32, current metadata version is 2|3||559eba24ca4787b9985d1e32 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.803-0400 m31100| 2015-07-09T14:15:00.803-0400 W SHARDING [conn15] no chunks found when reloading db54.coll54, previous version was 0|0||559eba24ca4787b9985d1e32, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.804-0400 m31100| 2015-07-09T14:15:00.803-0400 I SHARDING [conn15] dropping metadata for db54.coll54 at shard version 2|3||559eba24ca4787b9985d1e32, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.805-0400 m31200| 2015-07-09T14:15:00.804-0400 I SHARDING [conn18] remotely refreshing metadata for db54.coll54 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559eba24ca4787b9985d1e32, current metadata version is 2|5||559eba24ca4787b9985d1e32 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.806-0400 m31200| 2015-07-09T14:15:00.806-0400 W SHARDING [conn18] no chunks found when reloading db54.coll54, previous version was 0|0||559eba24ca4787b9985d1e32, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.806-0400 m31200| 2015-07-09T14:15:00.806-0400 I SHARDING [conn18] dropping metadata for db54.coll54 at shard version 2|5||559eba24ca4787b9985d1e32, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.807-0400 m30999| 2015-07-09T14:15:00.807-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:00.807-0400-559eba24ca4787b9985d1e36", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465700807), what: "dropCollection", ns: "db54.coll54", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.861-0400 m30999| 2015-07-09T14:15:00.860-0400 I SHARDING [conn1] distributed lock 'db54.coll54/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.915-0400 m30999| 2015-07-09T14:15:00.915-0400 I SHARDING [conn1] sharded connection to test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.915-0400 m30999| 2015-07-09T14:15:00.915-0400 I SHARDING [conn1] retrying command: { create: "coll54", capped: true, size: 16384.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.916-0400 m31100| 2015-07-09T14:15:00.915-0400 I NETWORK [conn56] end connection 127.0.0.1:62746 (110 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.935-0400 m31100| 2015-07-09T14:15:00.935-0400 I INDEX [conn183] build index on: db54.coll54 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db54.coll54" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.935-0400 m31100| 2015-07-09T14:15:00.935-0400 I INDEX [conn183] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.946-0400 m31100| 2015-07-09T14:15:00.945-0400 I INDEX [conn183] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:00.947-0400 Using 20 threads (requested 20) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.092-0400 m31101| 2015-07-09T14:15:01.091-0400 I INDEX [repl writer worker 3] build index on: db54.coll54 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db54.coll54" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.092-0400 m31101| 2015-07-09T14:15:01.092-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.118-0400 m31102| 2015-07-09T14:15:01.117-0400 I INDEX [repl writer worker 9] build index on: db54.coll54 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db54.coll54" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.118-0400 m31102| 2015-07-09T14:15:01.117-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.126-0400 m30999| 2015-07-09T14:15:01.123-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63783 #345 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.165-0400 m31101| 2015-07-09T14:15:01.164-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.175-0400 m31102| 2015-07-09T14:15:01.174-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.193-0400 m30999| 2015-07-09T14:15:01.193-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63784 #346 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.194-0400 m30998| 2015-07-09T14:15:01.194-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63785 #344 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.203-0400 m30999| 2015-07-09T14:15:01.203-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63786 #347 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.204-0400 m30998| 2015-07-09T14:15:01.204-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63787 #345 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.211-0400 m30999| 2015-07-09T14:15:01.211-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63789 #348 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.211-0400 m30999| 2015-07-09T14:15:01.211-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63790 #349 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.214-0400 m30998| 2015-07-09T14:15:01.214-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63788 #346 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.216-0400 m30998| 2015-07-09T14:15:01.216-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63791 #347 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.223-0400 m30999| 2015-07-09T14:15:01.223-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63792 #350 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.224-0400 m30998| 2015-07-09T14:15:01.223-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63793 #348 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.230-0400 m30999| 2015-07-09T14:15:01.230-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63794 #351 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.230-0400 m30999| 2015-07-09T14:15:01.230-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63797 #352 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.233-0400 m30998| 2015-07-09T14:15:01.232-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63795 #349 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.233-0400 m30998| 2015-07-09T14:15:01.233-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63796 #350 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.233-0400 m30998| 2015-07-09T14:15:01.233-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63799 #351 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.249-0400 m30999| 2015-07-09T14:15:01.249-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63800 #353 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.250-0400 m30998| 2015-07-09T14:15:01.249-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63801 #352 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.250-0400 m30998| 2015-07-09T14:15:01.249-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63802 #353 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.256-0400 m30999| 2015-07-09T14:15:01.251-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63803 #354 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.260-0400 setting random seed: 9566991818137 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.261-0400 setting random seed: 8002193006686 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.261-0400 setting random seed: 7464991654269 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.262-0400 setting random seed: 7304598391056 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.263-0400 setting random seed: 4215889568440 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.263-0400 setting random seed: 9320033332332 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.263-0400 setting random seed: 6456009773537 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.271-0400 setting random seed: 5698387413285 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.272-0400 setting random seed: 319390860386 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.272-0400 setting random seed: 3162521985359 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.275-0400 setting random seed: 2797299688681 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.279-0400 setting random seed: 2431562310084 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.280-0400 setting random seed: 1800289461389 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.280-0400 setting random seed: 6156314369291 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.281-0400 setting random seed: 7267764620482 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.282-0400 setting random seed: 9175652852281 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.301-0400 setting random seed: 8385215643793 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.307-0400 setting random seed: 2507032686844 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.312-0400 setting random seed: 2857926012948 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.313-0400 setting random seed: 4114791727624 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.394-0400 m31100| 2015-07-09T14:15:01.393-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63804 #188 (111 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.497-0400 m31100| 2015-07-09T14:15:01.496-0400 I QUERY [conn176] query db54.coll54 query: { query: { x: 18.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:110 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:5 nreturned:3 reslen:119 locks:{ Global: { acquireCount: { r: 12 } }, Database: { acquireCount: { r: 6 } }, Collection: { acquireCount: { r: 6 } } } 112ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.499-0400 m31100| 2015-07-09T14:15:01.499-0400 I QUERY [conn45] query db54.coll54 query: { query: { x: 17.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:110 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:5 nreturned:6 reslen:218 locks:{ Global: { acquireCount: { r: 12 } }, Database: { acquireCount: { r: 6 } }, Collection: { acquireCount: { r: 6 } } } 114ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.501-0400 m31100| 2015-07-09T14:15:01.500-0400 I QUERY [conn50] query db54.coll54 query: { query: { x: 4.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:110 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:5 nreturned:4 reslen:152 locks:{ Global: { acquireCount: { r: 12 } }, Database: { acquireCount: { r: 6 } }, Collection: { acquireCount: { r: 6 } } } 114ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.502-0400 m31100| 2015-07-09T14:15:01.501-0400 I QUERY [conn47] query db54.coll54 query: { query: { x: 13.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:110 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:5 nreturned:2 reslen:86 locks:{ Global: { acquireCount: { r: 12 } }, Database: { acquireCount: { r: 6 } }, Collection: { acquireCount: { r: 6 } } } 115ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.503-0400 m31100| 2015-07-09T14:15:01.503-0400 I QUERY [conn177] query db54.coll54 query: { query: { x: 10.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:110 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:5 nreturned:6 reslen:218 locks:{ Global: { acquireCount: { r: 12 } }, Database: { acquireCount: { r: 6 } }, Collection: { acquireCount: { r: 6 } } } 116ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.505-0400 m31100| 2015-07-09T14:15:01.504-0400 I QUERY [conn185] query db54.coll54 query: { query: { x: 6.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:110 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:5 nreturned:4 reslen:152 locks:{ Global: { acquireCount: { r: 12 } }, Database: { acquireCount: { r: 6 } }, Collection: { acquireCount: { r: 6 } } } 115ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.506-0400 m31100| 2015-07-09T14:15:01.505-0400 I QUERY [conn182] query db54.coll54 query: { query: { x: 1.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:113 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:6 nreturned:4 reslen:152 locks:{ Global: { acquireCount: { r: 14 } }, Database: { acquireCount: { r: 7 } }, Collection: { acquireCount: { r: 7 } } } 116ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.507-0400 m31100| 2015-07-09T14:15:01.506-0400 I QUERY [conn178] query db54.coll54 query: { query: { x: 8.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:113 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:6 nreturned:5 reslen:185 locks:{ Global: { acquireCount: { r: 14 } }, Database: { acquireCount: { r: 7 } }, Collection: { acquireCount: { r: 7 } } } 116ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.507-0400 m31100| 2015-07-09T14:15:01.506-0400 I QUERY [conn179] query db54.coll54 query: { query: { x: 15.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:113 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:6 nreturned:6 reslen:218 locks:{ Global: { acquireCount: { r: 14 } }, Database: { acquireCount: { r: 7 } }, Collection: { acquireCount: { r: 7 } } } 116ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.507-0400 m31100| 2015-07-09T14:15:01.507-0400 I QUERY [conn181] query db54.coll54 query: { query: { x: 0.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:113 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:6 nreturned:9 reslen:317 locks:{ Global: { acquireCount: { r: 14 } }, Database: { acquireCount: { r: 7 } }, Collection: { acquireCount: { r: 7 } } } 116ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.508-0400 m31100| 2015-07-09T14:15:01.508-0400 I QUERY [conn58] query db54.coll54 query: { query: { x: 12.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:111 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:5 nreturned:8 reslen:284 locks:{ Global: { acquireCount: { r: 12 } }, Database: { acquireCount: { r: 6 } }, Collection: { acquireCount: { r: 6 } } } 116ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.508-0400 m31100| 2015-07-09T14:15:01.508-0400 I QUERY [conn184] query db54.coll54 query: { query: { x: 5.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:111 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:5 nreturned:10 reslen:350 locks:{ Global: { acquireCount: { r: 12 } }, Database: { acquireCount: { r: 6 } }, Collection: { acquireCount: { r: 6 } } } 116ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.509-0400 m31100| 2015-07-09T14:15:01.509-0400 I QUERY [conn186] query db54.coll54 query: { query: { x: 16.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:111 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:5 nreturned:4 reslen:152 locks:{ Global: { acquireCount: { r: 12 } }, Database: { acquireCount: { r: 6 } }, Collection: { acquireCount: { r: 6 } } } 115ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.511-0400 m31100| 2015-07-09T14:15:01.509-0400 I QUERY [conn188] query db54.coll54 query: { query: { x: 11.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:113 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:5 nreturned:1 reslen:53 locks:{ Global: { acquireCount: { r: 12 } }, Database: { acquireCount: { r: 6 } }, Collection: { acquireCount: { r: 6 } } } 114ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.638-0400 m31100| 2015-07-09T14:15:01.637-0400 I QUERY [conn188] query db54.coll54 query: { query: { x: 13.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:133 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:6 nreturned:3 reslen:119 locks:{ Global: { acquireCount: { r: 14 } }, Database: { acquireCount: { r: 7 } }, Collection: { acquireCount: { r: 7 } } } 124ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.639-0400 m31100| 2015-07-09T14:15:01.639-0400 I QUERY [conn184] query db54.coll54 query: { query: { x: 5.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:133 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:6 nreturned:11 reslen:383 locks:{ Global: { acquireCount: { r: 14 } }, Database: { acquireCount: { r: 7 } }, Collection: { acquireCount: { r: 7 } } } 125ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.644-0400 m31100| 2015-07-09T14:15:01.643-0400 I QUERY [conn58] query db54.coll54 query: { query: { x: 8.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:133 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:6 nreturned:6 reslen:218 locks:{ Global: { acquireCount: { r: 14 } }, Database: { acquireCount: { r: 7 } }, Collection: { acquireCount: { r: 7 } } } 128ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.644-0400 m31100| 2015-07-09T14:15:01.643-0400 I QUERY [conn182] query db54.coll54 query: { query: { x: 15.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:133 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:5 nreturned:7 reslen:251 locks:{ Global: { acquireCount: { r: 12 } }, Database: { acquireCount: { r: 6 } }, Collection: { acquireCount: { r: 6 } } } 128ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.645-0400 m31100| 2015-07-09T14:15:01.644-0400 I QUERY [conn186] query db54.coll54 query: { query: { x: 6.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:133 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:6 nreturned:5 reslen:185 locks:{ Global: { acquireCount: { r: 14 } }, Database: { acquireCount: { r: 7 } }, Collection: { acquireCount: { r: 7 } } } 129ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.645-0400 m31100| 2015-07-09T14:15:01.644-0400 I QUERY [conn179] query db54.coll54 query: { query: { x: 1.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:133 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:5 nreturned:5 reslen:185 locks:{ Global: { acquireCount: { r: 12 } }, Database: { acquireCount: { r: 6 } }, Collection: { acquireCount: { r: 6 } } } 129ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.645-0400 m31100| 2015-07-09T14:15:01.645-0400 I QUERY [conn178] query db54.coll54 query: { query: { x: 0.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:133 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:5 nreturned:10 reslen:350 locks:{ Global: { acquireCount: { r: 12 } }, Database: { acquireCount: { r: 6 } }, Collection: { acquireCount: { r: 6 } } } 128ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.646-0400 m31100| 2015-07-09T14:15:01.645-0400 I QUERY [conn185] query db54.coll54 query: { query: { x: 4.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:133 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:5 nreturned:5 reslen:185 locks:{ Global: { acquireCount: { r: 12 } }, Database: { acquireCount: { r: 6 } }, Collection: { acquireCount: { r: 6 } } } 129ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.647-0400 m31100| 2015-07-09T14:15:01.646-0400 I QUERY [conn181] query db54.coll54 query: { query: { x: 10.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:133 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:5 nreturned:7 reslen:251 locks:{ Global: { acquireCount: { r: 12 } }, Database: { acquireCount: { r: 6 } }, Collection: { acquireCount: { r: 6 } } } 131ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.657-0400 m31100| 2015-07-09T14:15:01.656-0400 I QUERY [conn177] query db54.coll54 query: { query: { x: 12.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:134 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:6 nreturned:9 reslen:317 locks:{ Global: { acquireCount: { r: 14 } }, Database: { acquireCount: { r: 7 } }, Collection: { acquireCount: { r: 7 } } } 133ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.657-0400 m31100| 2015-07-09T14:15:01.656-0400 I QUERY [conn50] query db54.coll54 query: { query: { x: 16.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:134 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:6 nreturned:5 reslen:185 locks:{ Global: { acquireCount: { r: 14 } }, Database: { acquireCount: { r: 7 } }, Collection: { acquireCount: { r: 7 } } } 132ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.658-0400 m31100| 2015-07-09T14:15:01.657-0400 I QUERY [conn45] query db54.coll54 query: { query: { x: 11.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:134 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:6 nreturned:2 reslen:86 locks:{ Global: { acquireCount: { r: 14 } }, Database: { acquireCount: { r: 7 } }, Collection: { acquireCount: { r: 7 } } } 132ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.663-0400 m31100| 2015-07-09T14:15:01.663-0400 I QUERY [conn180] query db54.coll54 query: { query: { x: 3.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:134 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:5 nreturned:8 reslen:284 locks:{ Global: { acquireCount: { r: 12 } }, Database: { acquireCount: { r: 6 } }, Collection: { acquireCount: { r: 6 } } } 124ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.696-0400 m31100| 2015-07-09T14:15:01.695-0400 I QUERY [conn175] query db54.coll54 query: { query: { x: 9.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:146 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:6 nreturned:4 reslen:152 locks:{ Global: { acquireCount: { r: 14 } }, Database: { acquireCount: { r: 7 } }, Collection: { acquireCount: { r: 7 } } } 135ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.710-0400 m31100| 2015-07-09T14:15:01.709-0400 I QUERY [conn49] query db54.coll54 query: { query: { x: 2.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:146 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:6 nreturned:8 reslen:284 locks:{ Global: { acquireCount: { r: 14 } }, Database: { acquireCount: { r: 7 } }, Collection: { acquireCount: { r: 7 } } } 144ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.719-0400 m31100| 2015-07-09T14:15:01.718-0400 I QUERY [conn57] query db54.coll54 query: { query: { x: 7.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:147 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:10 reslen:350 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 } }, Collection: { acquireCount: { r: 8 } } } 148ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.726-0400 m31100| 2015-07-09T14:15:01.725-0400 I QUERY [conn183] query db54.coll54 query: { query: { x: 19.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:149 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:8 reslen:284 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 } }, Collection: { acquireCount: { r: 8 } } } 146ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.732-0400 m31100| 2015-07-09T14:15:01.731-0400 I QUERY [conn73] query db54.coll54 query: { query: { x: 14.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:147 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:6 nreturned:7 reslen:251 locks:{ Global: { acquireCount: { r: 14 } }, Database: { acquireCount: { r: 7 } }, Collection: { acquireCount: { r: 7 } } } 142ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.747-0400 m31100| 2015-07-09T14:15:01.747-0400 I QUERY [conn176] query db54.coll54 query: { query: { x: 18.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:150 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:6 nreturned:5 reslen:185 locks:{ Global: { acquireCount: { r: 14 } }, Database: { acquireCount: { r: 7 } }, Collection: { acquireCount: { r: 7 } } } 140ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.767-0400 m31100| 2015-07-09T14:15:01.766-0400 I QUERY [conn47] query db54.coll54 query: { query: { x: 17.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:152 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:8 reslen:284 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 } }, Collection: { acquireCount: { r: 8 } } } 153ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.800-0400 m31100| 2015-07-09T14:15:01.800-0400 I QUERY [conn182] query db54.coll54 query: { query: { x: 13.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:153 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:4 reslen:152 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 } }, Collection: { acquireCount: { r: 8 } } } 155ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.816-0400 m31100| 2015-07-09T14:15:01.816-0400 I QUERY [conn179] query db54.coll54 query: { query: { x: 5.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:153 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:12 reslen:416 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 } }, Collection: { acquireCount: { r: 8 } } } 164ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.820-0400 m31100| 2015-07-09T14:15:01.819-0400 I QUERY [conn184] query db54.coll54 query: { query: { x: 1.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:153 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:6 reslen:218 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 } }, Collection: { acquireCount: { r: 8 } } } 165ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.820-0400 m31100| 2015-07-09T14:15:01.820-0400 I QUERY [conn186] query db54.coll54 query: { query: { x: 8.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:153 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:7 reslen:251 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 } }, Collection: { acquireCount: { r: 8 } } } 166ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.821-0400 m31100| 2015-07-09T14:15:01.820-0400 I QUERY [conn181] query db54.coll54 query: { query: { x: 6.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:153 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:6 reslen:218 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 } }, Collection: { acquireCount: { r: 8 } } } 166ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.823-0400 m31100| 2015-07-09T14:15:01.822-0400 I QUERY [conn185] query db54.coll54 query: { query: { x: 4.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:155 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:6 reslen:218 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 } }, Collection: { acquireCount: { r: 9 } } } 167ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.823-0400 m31100| 2015-07-09T14:15:01.823-0400 I QUERY [conn178] query db54.coll54 query: { query: { x: 0.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:155 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:11 reslen:383 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 } }, Collection: { acquireCount: { r: 9 } } } 168ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.824-0400 m31100| 2015-07-09T14:15:01.823-0400 I QUERY [conn188] query db54.coll54 query: { query: { x: 15.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:155 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:8 reslen:284 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 } }, Collection: { acquireCount: { r: 9 } } } 167ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.830-0400 m31100| 2015-07-09T14:15:01.829-0400 I QUERY [conn50] query db54.coll54 query: { query: { x: 10.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:154 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:8 reslen:284 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 } }, Collection: { acquireCount: { r: 8 } } } 170ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.832-0400 m31100| 2015-07-09T14:15:01.832-0400 I QUERY [conn177] query db54.coll54 query: { query: { x: 12.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:154 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:10 reslen:350 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 } }, Collection: { acquireCount: { r: 8 } } } 168ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.836-0400 m31100| 2015-07-09T14:15:01.835-0400 I QUERY [conn58] query db54.coll54 query: { query: { x: 16.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:161 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:6 reslen:218 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 } }, Collection: { acquireCount: { r: 9 } } } 171ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.837-0400 m31100| 2015-07-09T14:15:01.836-0400 I QUERY [conn180] query db54.coll54 query: { query: { x: 11.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:161 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:3 reslen:119 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 } }, Collection: { acquireCount: { r: 9 } } } 171ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.839-0400 m31100| 2015-07-09T14:15:01.838-0400 I QUERY [conn45] query db54.coll54 query: { query: { x: 3.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:155 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:9 reslen:317 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 } }, Collection: { acquireCount: { r: 8 } } } 166ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.877-0400 m31100| 2015-07-09T14:15:01.877-0400 I QUERY [conn175] query db54.coll54 query: { query: { x: 9.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:166 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:5 reslen:185 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 } }, Collection: { acquireCount: { r: 9 } } } 172ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.897-0400 m31100| 2015-07-09T14:15:01.897-0400 I QUERY [conn49] query db54.coll54 query: { query: { x: 2.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:167 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:9 reslen:317 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 } }, Collection: { acquireCount: { r: 9 } } } 176ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.904-0400 m31100| 2015-07-09T14:15:01.904-0400 I QUERY [conn57] query db54.coll54 query: { query: { x: 7.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:167 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:11 reslen:383 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 } }, Collection: { acquireCount: { r: 9 } } } 181ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.915-0400 m31100| 2015-07-09T14:15:01.914-0400 I QUERY [conn183] query db54.coll54 query: { query: { x: 19.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:168 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:9 reslen:317 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 } }, Collection: { acquireCount: { r: 9 } } } 180ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.920-0400 m31100| 2015-07-09T14:15:01.919-0400 I QUERY [conn73] query db54.coll54 query: { query: { x: 14.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:168 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:8 reslen:284 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 } }, Collection: { acquireCount: { r: 9 } } } 181ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.930-0400 m31100| 2015-07-09T14:15:01.929-0400 I QUERY [conn176] query db54.coll54 query: { query: { x: 18.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:170 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:6 reslen:218 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 } }, Collection: { acquireCount: { r: 9 } } } 174ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.949-0400 m31100| 2015-07-09T14:15:01.948-0400 I QUERY [conn47] query db54.coll54 query: { query: { x: 17.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:172 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:9 reslen:317 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 } }, Collection: { acquireCount: { r: 9 } } } 173ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.980-0400 m31100| 2015-07-09T14:15:01.979-0400 I QUERY [conn182] query db54.coll54 query: { query: { x: 13.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:173 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:5 reslen:185 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 } }, Collection: { acquireCount: { r: 9 } } } 172ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.998-0400 m31100| 2015-07-09T14:15:01.997-0400 I QUERY [conn184] query db54.coll54 query: { query: { x: 5.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:174 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:13 reslen:449 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 } }, Collection: { acquireCount: { r: 9 } } } 174ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:01.999-0400 m31100| 2015-07-09T14:15:01.999-0400 I QUERY [conn185] query db54.coll54 query: { query: { x: 8.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:174 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:8 reslen:284 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 } }, Collection: { acquireCount: { r: 9 } } } 172ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.000-0400 m31100| 2015-07-09T14:15:01.999-0400 I QUERY [conn188] query db54.coll54 query: { query: { x: 1.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:174 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:7 reslen:251 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 } }, Collection: { acquireCount: { r: 9 } } } 172ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.000-0400 m31100| 2015-07-09T14:15:02.000-0400 I QUERY [conn178] query db54.coll54 query: { query: { x: 6.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:174 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:7 reslen:251 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 } }, Collection: { acquireCount: { r: 9 } } } 172ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.001-0400 m31100| 2015-07-09T14:15:02.000-0400 I QUERY [conn181] query db54.coll54 query: { query: { x: 4.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:174 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:7 reslen:251 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 } }, Collection: { acquireCount: { r: 9 } } } 171ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.002-0400 m31100| 2015-07-09T14:15:02.001-0400 I QUERY [conn186] query db54.coll54 query: { query: { x: 0.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:174 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:12 reslen:416 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 } }, Collection: { acquireCount: { r: 9 } } } 171ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.005-0400 m31100| 2015-07-09T14:15:02.004-0400 I QUERY [conn179] query db54.coll54 query: { query: { x: 15.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:174 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:8 nreturned:9 reslen:317 locks:{ Global: { acquireCount: { r: 18 } }, Database: { acquireCount: { r: 9 } }, Collection: { acquireCount: { r: 9 } } } 172ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.033-0400 m31100| 2015-07-09T14:15:02.033-0400 I QUERY [conn58] query db54.coll54 query: { query: { x: 10.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:181 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:9 nreturned:9 reslen:317 locks:{ Global: { acquireCount: { r: 20 } }, Database: { acquireCount: { r: 10 } }, Collection: { acquireCount: { r: 10 } } } 196ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.034-0400 m31100| 2015-07-09T14:15:02.033-0400 I QUERY [conn177] query db54.coll54 query: { query: { x: 12.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:181 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:9 nreturned:11 reslen:383 locks:{ Global: { acquireCount: { r: 20 } }, Database: { acquireCount: { r: 10 } }, Collection: { acquireCount: { r: 10 } } } 197ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.041-0400 m31100| 2015-07-09T14:15:02.040-0400 I QUERY [conn50] query db54.coll54 query: { query: { x: 16.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:181 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:9 nreturned:7 reslen:251 locks:{ Global: { acquireCount: { r: 20 } }, Database: { acquireCount: { r: 10 } }, Collection: { acquireCount: { r: 10 } } } 198ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.041-0400 m31100| 2015-07-09T14:15:02.040-0400 I QUERY [conn45] query db54.coll54 query: { query: { x: 11.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:181 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:9 nreturned:4 reslen:152 locks:{ Global: { acquireCount: { r: 20 } }, Database: { acquireCount: { r: 10 } }, Collection: { acquireCount: { r: 10 } } } 197ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.044-0400 m31100| 2015-07-09T14:15:02.044-0400 I QUERY [conn180] query db54.coll54 query: { query: { x: 3.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:183 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:9 nreturned:10 reslen:350 locks:{ Global: { acquireCount: { r: 20 } }, Database: { acquireCount: { r: 10 } }, Collection: { acquireCount: { r: 10 } } } 200ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.084-0400 m31100| 2015-07-09T14:15:02.083-0400 I QUERY [conn175] query db54.coll54 query: { query: { x: 9.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:186 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:9 nreturned:6 reslen:218 locks:{ Global: { acquireCount: { r: 20 } }, Database: { acquireCount: { r: 10 } }, Collection: { acquireCount: { r: 10 } } } 199ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.112-0400 m31100| 2015-07-09T14:15:02.111-0400 I QUERY [conn49] query db54.coll54 query: { query: { x: 2.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:187 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:9 nreturned:10 reslen:350 locks:{ Global: { acquireCount: { r: 20 } }, Database: { acquireCount: { r: 10 } }, Collection: { acquireCount: { r: 10 } } } 206ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.121-0400 m31100| 2015-07-09T14:15:02.120-0400 I QUERY [conn57] query db54.coll54 query: { query: { x: 7.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:187 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:9 nreturned:12 reslen:416 locks:{ Global: { acquireCount: { r: 20 } }, Database: { acquireCount: { r: 10 } }, Collection: { acquireCount: { r: 10 } } } 207ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.134-0400 m31100| 2015-07-09T14:15:02.134-0400 I QUERY [conn183] query db54.coll54 query: { query: { x: 19.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:189 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:10 nreturned:10 reslen:350 locks:{ Global: { acquireCount: { r: 22 } }, Database: { acquireCount: { r: 11 } }, Collection: { acquireCount: { r: 11 } } } 214ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.145-0400 m31100| 2015-07-09T14:15:02.145-0400 I QUERY [conn73] query db54.coll54 query: { query: { x: 14.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:189 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:10 nreturned:9 reslen:317 locks:{ Global: { acquireCount: { r: 22 } }, Database: { acquireCount: { r: 11 } }, Collection: { acquireCount: { r: 11 } } } 220ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.158-0400 m31100| 2015-07-09T14:15:02.158-0400 I QUERY [conn176] query db54.coll54 query: { query: { x: 18.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:190 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:10 nreturned:7 reslen:251 locks:{ Global: { acquireCount: { r: 22 } }, Database: { acquireCount: { r: 11 } }, Collection: { acquireCount: { r: 11 } } } 221ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.176-0400 m31100| 2015-07-09T14:15:02.175-0400 I QUERY [conn47] query db54.coll54 query: { query: { x: 17.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:192 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:10 nreturned:10 reslen:350 locks:{ Global: { acquireCount: { r: 22 } }, Database: { acquireCount: { r: 11 } }, Collection: { acquireCount: { r: 11 } } } 218ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.218-0400 m31100| 2015-07-09T14:15:02.217-0400 I QUERY [conn182] query db54.coll54 query: { query: { x: 13.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:193 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:10 nreturned:6 reslen:218 locks:{ Global: { acquireCount: { r: 22 } }, Database: { acquireCount: { r: 11 } }, Collection: { acquireCount: { r: 11 } } } 230ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.249-0400 m31100| 2015-07-09T14:15:02.249-0400 I QUERY [conn184] query db54.coll54 query: { query: { x: 5.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:194 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:11 nreturned:14 reslen:482 locks:{ Global: { acquireCount: { r: 24 } }, Database: { acquireCount: { r: 12 } }, Collection: { acquireCount: { r: 12 } } } 247ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.255-0400 m31100| 2015-07-09T14:15:02.255-0400 I QUERY [conn181] query db54.coll54 query: { query: { x: 8.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:194 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:11 nreturned:9 reslen:317 locks:{ Global: { acquireCount: { r: 24 } }, Database: { acquireCount: { r: 12 } }, Collection: { acquireCount: { r: 12 } } } 249ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.256-0400 m31100| 2015-07-09T14:15:02.255-0400 I QUERY [conn188] query db54.coll54 query: { query: { x: 1.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:194 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:11 nreturned:8 reslen:284 locks:{ Global: { acquireCount: { r: 24 } }, Database: { acquireCount: { r: 12 } }, Collection: { acquireCount: { r: 12 } } } 250ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.257-0400 m31100| 2015-07-09T14:15:02.256-0400 I QUERY [conn186] query db54.coll54 query: { query: { x: 6.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:194 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:11 nreturned:8 reslen:284 locks:{ Global: { acquireCount: { r: 24 } }, Database: { acquireCount: { r: 12 } }, Collection: { acquireCount: { r: 12 } } } 249ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.257-0400 m31100| 2015-07-09T14:15:02.256-0400 I QUERY [conn178] query db54.coll54 query: { query: { x: 0.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:194 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:11 nreturned:13 reslen:449 locks:{ Global: { acquireCount: { r: 24 } }, Database: { acquireCount: { r: 12 } }, Collection: { acquireCount: { r: 12 } } } 249ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.258-0400 m31100| 2015-07-09T14:15:02.257-0400 I QUERY [conn185] query db54.coll54 query: { query: { x: 4.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:194 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:11 nreturned:8 reslen:284 locks:{ Global: { acquireCount: { r: 24 } }, Database: { acquireCount: { r: 12 } }, Collection: { acquireCount: { r: 12 } } } 249ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.259-0400 m31100| 2015-07-09T14:15:02.258-0400 I QUERY [conn179] query db54.coll54 query: { query: { x: 15.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:194 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:11 nreturned:10 reslen:350 locks:{ Global: { acquireCount: { r: 24 } }, Database: { acquireCount: { r: 12 } }, Collection: { acquireCount: { r: 12 } } } 250ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.320-0400 m31100| 2015-07-09T14:15:02.320-0400 I QUERY [conn177] query db54.coll54 query: { query: { x: 10.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:201 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:13 nreturned:10 reslen:350 locks:{ Global: { acquireCount: { r: 28 } }, Database: { acquireCount: { r: 14 } }, Collection: { acquireCount: { r: 14 } } } 281ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.322-0400 m31100| 2015-07-09T14:15:02.322-0400 I QUERY [conn58] query db54.coll54 query: { query: { x: 12.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:201 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:12 nreturned:12 reslen:416 locks:{ Global: { acquireCount: { r: 26 } }, Database: { acquireCount: { r: 13 } }, Collection: { acquireCount: { r: 13 } } } 281ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.331-0400 m31100| 2015-07-09T14:15:02.331-0400 I QUERY [conn50] query db54.coll54 query: { query: { x: 16.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:201 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:13 nreturned:8 reslen:284 locks:{ Global: { acquireCount: { r: 28 } }, Database: { acquireCount: { r: 14 } }, Collection: { acquireCount: { r: 14 } } } 284ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.336-0400 m31100| 2015-07-09T14:15:02.335-0400 I QUERY [conn180] query db54.coll54 query: { query: { x: 11.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:201 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:13 nreturned:5 reslen:185 locks:{ Global: { acquireCount: { r: 28 } }, Database: { acquireCount: { r: 14 } }, Collection: { acquireCount: { r: 14 } } } 287ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.338-0400 m31100| 2015-07-09T14:15:02.337-0400 I QUERY [conn45] query db54.coll54 query: { query: { x: 3.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:201 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:13 nreturned:11 reslen:383 locks:{ Global: { acquireCount: { r: 28 } }, Database: { acquireCount: { r: 14 } }, Collection: { acquireCount: { r: 14 } } } 288ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.377-0400 m31100| 2015-07-09T14:15:02.376-0400 I QUERY [conn175] query db54.coll54 query: { query: { x: 9.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:206 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:13 nreturned:7 reslen:251 locks:{ Global: { acquireCount: { r: 28 } }, Database: { acquireCount: { r: 14 } }, Collection: { acquireCount: { r: 14 } } } 284ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.399-0400 m31100| 2015-07-09T14:15:02.398-0400 I QUERY [conn49] query db54.coll54 query: { query: { x: 2.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:206 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:12 nreturned:11 reslen:383 locks:{ Global: { acquireCount: { r: 26 } }, Database: { acquireCount: { r: 13 } }, Collection: { acquireCount: { r: 13 } } } 279ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.419-0400 m31100| 2015-07-09T14:15:02.418-0400 I QUERY [conn57] query db54.coll54 query: { query: { x: 7.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:208 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:13 nreturned:13 reslen:449 locks:{ Global: { acquireCount: { r: 28 } }, Database: { acquireCount: { r: 14 } }, Collection: { acquireCount: { r: 14 } } } 291ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.446-0400 m31100| 2015-07-09T14:15:02.445-0400 I QUERY [conn183] query db54.coll54 query: { query: { x: 19.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:209 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:14 nreturned:11 reslen:383 locks:{ Global: { acquireCount: { r: 30 } }, Database: { acquireCount: { r: 15 } }, Collection: { acquireCount: { r: 15 } } } 303ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.474-0400 m31100| 2015-07-09T14:15:02.473-0400 I QUERY [conn73] query db54.coll54 query: { query: { x: 14.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:210 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:14 nreturned:10 reslen:350 locks:{ Global: { acquireCount: { r: 30 } }, Database: { acquireCount: { r: 15 } }, Collection: { acquireCount: { r: 15 } } } 320ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.492-0400 m31100| 2015-07-09T14:15:02.492-0400 I QUERY [conn176] query db54.coll54 query: { query: { x: 18.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:211 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:15 nreturned:8 reslen:284 locks:{ Global: { acquireCount: { r: 32 } }, Database: { acquireCount: { r: 16 } }, Collection: { acquireCount: { r: 16 } } } 326ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.499-0400 m31100| 2015-07-09T14:15:02.498-0400 I QUERY [conn47] query db54.coll54 query: { query: { x: 17.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:211 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:14 nreturned:11 reslen:383 locks:{ Global: { acquireCount: { r: 30 } }, Database: { acquireCount: { r: 15 } }, Collection: { acquireCount: { r: 15 } } } 315ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.537-0400 m31100| 2015-07-09T14:15:02.536-0400 I QUERY [conn182] query db54.coll54 query: { query: { x: 13.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:213 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:14 nreturned:7 reslen:251 locks:{ Global: { acquireCount: { r: 30 } }, Database: { acquireCount: { r: 15 } }, Collection: { acquireCount: { r: 15 } } } 311ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.581-0400 m31100| 2015-07-09T14:15:02.580-0400 I QUERY [conn184] query db54.coll54 query: { query: { x: 5.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:214 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:15 nreturned:15 reslen:515 locks:{ Global: { acquireCount: { r: 32 } }, Database: { acquireCount: { r: 16 } }, Collection: { acquireCount: { r: 16 } } } 323ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.585-0400 m31100| 2015-07-09T14:15:02.585-0400 I QUERY [conn186] query db54.coll54 query: { query: { x: 8.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:214 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:15 nreturned:10 reslen:350 locks:{ Global: { acquireCount: { r: 32 } }, Database: { acquireCount: { r: 16 } }, Collection: { acquireCount: { r: 16 } } } 323ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.587-0400 m31100| 2015-07-09T14:15:02.586-0400 I QUERY [conn179] query db54.coll54 query: { query: { x: 15.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:214 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:15 nreturned:11 reslen:383 locks:{ Global: { acquireCount: { r: 32 } }, Database: { acquireCount: { r: 16 } }, Collection: { acquireCount: { r: 16 } } } 322ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.587-0400 m31100| 2015-07-09T14:15:02.586-0400 I QUERY [conn178] query db54.coll54 query: { query: { x: 4.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:214 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:15 nreturned:9 reslen:317 locks:{ Global: { acquireCount: { r: 32 } }, Database: { acquireCount: { r: 16 } }, Collection: { acquireCount: { r: 16 } } } 322ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.587-0400 m31100| 2015-07-09T14:15:02.587-0400 I QUERY [conn185] query db54.coll54 query: { query: { x: 0.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:214 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:15 nreturned:14 reslen:482 locks:{ Global: { acquireCount: { r: 32 } }, Database: { acquireCount: { r: 16 } }, Collection: { acquireCount: { r: 16 } } } 322ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.588-0400 m31100| 2015-07-09T14:15:02.587-0400 I QUERY [conn188] query db54.coll54 query: { query: { x: 1.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:214 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:15 nreturned:9 reslen:317 locks:{ Global: { acquireCount: { r: 32 } }, Database: { acquireCount: { r: 16 } }, Collection: { acquireCount: { r: 16 } } } 323ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.590-0400 m31100| 2015-07-09T14:15:02.590-0400 I QUERY [conn181] query db54.coll54 query: { query: { x: 6.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:214 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:15 nreturned:9 reslen:317 locks:{ Global: { acquireCount: { r: 32 } }, Database: { acquireCount: { r: 16 } }, Collection: { acquireCount: { r: 16 } } } 324ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.660-0400 m31100| 2015-07-09T14:15:02.660-0400 I QUERY [conn58] query db54.coll54 query: { query: { x: 10.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:221 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:15 nreturned:11 reslen:383 locks:{ Global: { acquireCount: { r: 32 } }, Database: { acquireCount: { r: 16 } }, Collection: { acquireCount: { r: 16 } } } 332ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.661-0400 m31100| 2015-07-09T14:15:02.660-0400 I QUERY [conn177] query db54.coll54 query: { query: { x: 12.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:221 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:15 nreturned:13 reslen:449 locks:{ Global: { acquireCount: { r: 32 } }, Database: { acquireCount: { r: 16 } }, Collection: { acquireCount: { r: 16 } } } 333ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.686-0400 m31100| 2015-07-09T14:15:02.686-0400 I QUERY [conn45] query db54.coll54 query: { query: { x: 11.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:223 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:16 nreturned:6 reslen:218 locks:{ Global: { acquireCount: { r: 34 } }, Database: { acquireCount: { r: 17 } }, Collection: { acquireCount: { r: 17 } } } 346ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.693-0400 m31100| 2015-07-09T14:15:02.693-0400 I QUERY [conn50] query db54.coll54 query: { query: { x: 16.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:223 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:16 nreturned:9 reslen:317 locks:{ Global: { acquireCount: { r: 34 } }, Database: { acquireCount: { r: 17 } }, Collection: { acquireCount: { r: 17 } } } 352ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.697-0400 m31100| 2015-07-09T14:15:02.696-0400 I QUERY [conn180] query db54.coll54 query: { query: { x: 3.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:223 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:16 nreturned:12 reslen:416 locks:{ Global: { acquireCount: { r: 34 } }, Database: { acquireCount: { r: 17 } }, Collection: { acquireCount: { r: 17 } } } 348ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.731-0400 m31100| 2015-07-09T14:15:02.730-0400 I QUERY [conn175] query db54.coll54 query: { query: { x: 9.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:226 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:16 nreturned:8 reslen:284 locks:{ Global: { acquireCount: { r: 34 } }, Database: { acquireCount: { r: 17 } }, Collection: { acquireCount: { r: 17 } } } 345ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.755-0400 m31100| 2015-07-09T14:15:02.755-0400 I QUERY [conn49] query db54.coll54 query: { query: { x: 2.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:227 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:15 nreturned:12 reslen:416 locks:{ Global: { acquireCount: { r: 32 } }, Database: { acquireCount: { r: 16 } }, Collection: { acquireCount: { r: 16 } } } 345ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.764-0400 m31100| 2015-07-09T14:15:02.764-0400 I QUERY [conn57] query db54.coll54 query: { query: { x: 7.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:227 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:15 nreturned:14 reslen:482 locks:{ Global: { acquireCount: { r: 32 } }, Database: { acquireCount: { r: 16 } }, Collection: { acquireCount: { r: 16 } } } 336ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.791-0400 m31100| 2015-07-09T14:15:02.791-0400 I QUERY [conn183] query db54.coll54 query: { query: { x: 19.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:229 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:15 nreturned:12 reslen:416 locks:{ Global: { acquireCount: { r: 32 } }, Database: { acquireCount: { r: 16 } }, Collection: { acquireCount: { r: 16 } } } 332ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.802-0400 m31100| 2015-07-09T14:15:02.802-0400 I QUERY [conn73] query db54.coll54 query: { query: { x: 14.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:229 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:14 nreturned:11 reslen:383 locks:{ Global: { acquireCount: { r: 30 } }, Database: { acquireCount: { r: 15 } }, Collection: { acquireCount: { r: 15 } } } 321ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.835-0400 m31100| 2015-07-09T14:15:02.834-0400 I QUERY [conn176] query db54.coll54 query: { query: { x: 18.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:231 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:15 nreturned:9 reslen:317 locks:{ Global: { acquireCount: { r: 32 } }, Database: { acquireCount: { r: 16 } }, Collection: { acquireCount: { r: 16 } } } 336ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.847-0400 m31100| 2015-07-09T14:15:02.846-0400 I QUERY [conn47] query db54.coll54 query: { query: { x: 17.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:231 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:15 nreturned:12 reslen:416 locks:{ Global: { acquireCount: { r: 32 } }, Database: { acquireCount: { r: 16 } }, Collection: { acquireCount: { r: 16 } } } 342ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.890-0400 m31100| 2015-07-09T14:15:02.890-0400 I QUERY [conn182] query db54.coll54 query: { query: { x: 13.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:233 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:16 nreturned:8 reslen:284 locks:{ Global: { acquireCount: { r: 34 } }, Database: { acquireCount: { r: 17 } }, Collection: { acquireCount: { r: 17 } } } 342ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.930-0400 m31100| 2015-07-09T14:15:02.929-0400 I QUERY [conn179] query db54.coll54 query: { query: { x: 5.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:234 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:15 nreturned:16 reslen:548 locks:{ Global: { acquireCount: { r: 32 } }, Database: { acquireCount: { r: 16 } }, Collection: { acquireCount: { r: 16 } } } 339ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.930-0400 m31100| 2015-07-09T14:15:02.930-0400 I QUERY [conn185] query db54.coll54 query: { query: { x: 8.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:234 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:15 nreturned:11 reslen:383 locks:{ Global: { acquireCount: { r: 32 } }, Database: { acquireCount: { r: 16 } }, Collection: { acquireCount: { r: 16 } } } 339ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.931-0400 m31100| 2015-07-09T14:15:02.930-0400 I QUERY [conn181] query db54.coll54 query: { query: { x: 0.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:234 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:15 nreturned:15 reslen:515 locks:{ Global: { acquireCount: { r: 32 } }, Database: { acquireCount: { r: 16 } }, Collection: { acquireCount: { r: 16 } } } 338ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.931-0400 m31100| 2015-07-09T14:15:02.930-0400 I QUERY [conn188] query db54.coll54 query: { query: { x: 15.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:234 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:15 nreturned:12 reslen:416 locks:{ Global: { acquireCount: { r: 32 } }, Database: { acquireCount: { r: 16 } }, Collection: { acquireCount: { r: 16 } } } 338ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.932-0400 m31100| 2015-07-09T14:15:02.931-0400 I QUERY [conn178] query db54.coll54 query: { query: { x: 4.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:234 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:15 nreturned:10 reslen:350 locks:{ Global: { acquireCount: { r: 32 } }, Database: { acquireCount: { r: 16 } }, Collection: { acquireCount: { r: 16 } } } 338ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.933-0400 m31100| 2015-07-09T14:15:02.933-0400 I QUERY [conn184] query db54.coll54 query: { query: { x: 1.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:234 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:15 nreturned:10 reslen:350 locks:{ Global: { acquireCount: { r: 32 } }, Database: { acquireCount: { r: 16 } }, Collection: { acquireCount: { r: 16 } } } 340ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:02.936-0400 m31100| 2015-07-09T14:15:02.935-0400 I QUERY [conn186] query db54.coll54 query: { query: { x: 6.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:234 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:15 nreturned:10 reslen:350 locks:{ Global: { acquireCount: { r: 32 } }, Database: { acquireCount: { r: 16 } }, Collection: { acquireCount: { r: 16 } } } 340ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:03.030-0400 m31100| 2015-07-09T14:15:03.030-0400 I QUERY [conn58] query db54.coll54 query: { query: { x: 12.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:241 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:17 nreturned:14 reslen:482 locks:{ Global: { acquireCount: { r: 36 } }, Database: { acquireCount: { r: 18 } }, Collection: { acquireCount: { r: 18 } } } 362ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:03.031-0400 m31100| 2015-07-09T14:15:03.030-0400 I QUERY [conn177] query db54.coll54 query: { query: { x: 10.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:241 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:17 nreturned:12 reslen:416 locks:{ Global: { acquireCount: { r: 36 } }, Database: { acquireCount: { r: 18 } }, Collection: { acquireCount: { r: 18 } } } 362ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:03.054-0400 m31100| 2015-07-09T14:15:03.053-0400 I QUERY [conn45] query db54.coll54 query: { query: { x: 11.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:243 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:16 nreturned:7 reslen:251 locks:{ Global: { acquireCount: { r: 34 } }, Database: { acquireCount: { r: 17 } }, Collection: { acquireCount: { r: 17 } } } 358ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:03.061-0400 m31100| 2015-07-09T14:15:03.060-0400 I QUERY [conn50] query db54.coll54 query: { query: { x: 16.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:243 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:16 nreturned:10 reslen:350 locks:{ Global: { acquireCount: { r: 34 } }, Database: { acquireCount: { r: 17 } }, Collection: { acquireCount: { r: 17 } } } 362ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:03.066-0400 m31100| 2015-07-09T14:15:03.066-0400 I QUERY [conn180] query db54.coll54 query: { query: { x: 3.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:244 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:17 nreturned:13 reslen:449 locks:{ Global: { acquireCount: { r: 36 } }, Database: { acquireCount: { r: 18 } }, Collection: { acquireCount: { r: 18 } } } 363ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:03.104-0400 m31100| 2015-07-09T14:15:03.103-0400 I QUERY [conn175] query db54.coll54 query: { query: { x: 9.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:246 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:16 nreturned:9 reslen:317 locks:{ Global: { acquireCount: { r: 34 } }, Database: { acquireCount: { r: 17 } }, Collection: { acquireCount: { r: 17 } } } 364ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:03.123-0400 m31100| 2015-07-09T14:15:03.122-0400 I QUERY [conn49] query db54.coll54 query: { query: { x: 2.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:246 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:16 nreturned:13 reslen:449 locks:{ Global: { acquireCount: { r: 34 } }, Database: { acquireCount: { r: 17 } }, Collection: { acquireCount: { r: 17 } } } 360ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:03.135-0400 m31100| 2015-07-09T14:15:03.135-0400 I QUERY [conn57] query db54.coll54 query: { query: { x: 7.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:247 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:16 nreturned:15 reslen:515 locks:{ Global: { acquireCount: { r: 34 } }, Database: { acquireCount: { r: 17 } }, Collection: { acquireCount: { r: 17 } } } 362ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:03.159-0400 m31100| 2015-07-09T14:15:03.158-0400 I QUERY [conn183] query db54.coll54 query: { query: { x: 19.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:249 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:16 nreturned:13 reslen:449 locks:{ Global: { acquireCount: { r: 34 } }, Database: { acquireCount: { r: 17 } }, Collection: { acquireCount: { r: 17 } } } 361ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:03.186-0400 m31100| 2015-07-09T14:15:03.186-0400 I QUERY [conn73] query db54.coll54 query: { query: { x: 14.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:250 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:17 nreturned:12 reslen:416 locks:{ Global: { acquireCount: { r: 36 } }, Database: { acquireCount: { r: 18 } }, Collection: { acquireCount: { r: 18 } } } 372ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:03.216-0400 m31100| 2015-07-09T14:15:03.216-0400 I QUERY [conn176] query db54.coll54 query: { query: { x: 18.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:251 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:17 nreturned:10 reslen:350 locks:{ Global: { acquireCount: { r: 36 } }, Database: { acquireCount: { r: 18 } }, Collection: { acquireCount: { r: 18 } } } 375ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:03.229-0400 m31100| 2015-07-09T14:15:03.228-0400 I QUERY [conn47] query db54.coll54 query: { query: { x: 17.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:251 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:17 nreturned:13 reslen:449 locks:{ Global: { acquireCount: { r: 36 } }, Database: { acquireCount: { r: 18 } }, Collection: { acquireCount: { r: 18 } } } 375ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:03.282-0400 m31100| 2015-07-09T14:15:03.281-0400 I QUERY [conn182] query db54.coll54 query: { query: { x: 13.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:253 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:18 nreturned:9 reslen:317 locks:{ Global: { acquireCount: { r: 38 } }, Database: { acquireCount: { r: 19 } }, Collection: { acquireCount: { r: 19 } } } 381ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:03.320-0400 m31100| 2015-07-09T14:15:03.320-0400 I QUERY [conn184] query db54.coll54 query: { query: { x: 5.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:254 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:17 nreturned:17 reslen:581 locks:{ Global: { acquireCount: { r: 36 } }, Database: { acquireCount: { r: 18 } }, Collection: { acquireCount: { r: 18 } } } 385ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:03.323-0400 m31100| 2015-07-09T14:15:03.322-0400 I QUERY [conn186] query db54.coll54 query: { query: { x: 8.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:254 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:17 nreturned:12 reslen:416 locks:{ Global: { acquireCount: { r: 36 } }, Database: { acquireCount: { r: 18 } }, Collection: { acquireCount: { r: 18 } } } 386ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:03.323-0400 m31100| 2015-07-09T14:15:03.322-0400 I QUERY [conn188] query db54.coll54 query: { query: { x: 15.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:254 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:17 nreturned:13 reslen:449 locks:{ Global: { acquireCount: { r: 36 } }, Database: { acquireCount: { r: 18 } }, Collection: { acquireCount: { r: 18 } } } 386ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:03.324-0400 m31100| 2015-07-09T14:15:03.323-0400 I QUERY [conn181] query db54.coll54 query: { query: { x: 0.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:254 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:17 nreturned:16 reslen:548 locks:{ Global: { acquireCount: { r: 36 } }, Database: { acquireCount: { r: 18 } }, Collection: { acquireCount: { r: 18 } } } 386ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:03.324-0400 m31100| 2015-07-09T14:15:03.323-0400 I QUERY [conn178] query db54.coll54 query: { query: { x: 4.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:254 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:17 nreturned:11 reslen:383 locks:{ Global: { acquireCount: { r: 36 } }, Database: { acquireCount: { r: 18 } }, Collection: { acquireCount: { r: 18 } } } 386ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:03.325-0400 m31100| 2015-07-09T14:15:03.324-0400 I QUERY [conn179] query db54.coll54 query: { query: { x: 1.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:254 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:17 nreturned:11 reslen:383 locks:{ Global: { acquireCount: { r: 36 } }, Database: { acquireCount: { r: 18 } }, Collection: { acquireCount: { r: 18 } } } 386ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:03.326-0400 m31100| 2015-07-09T14:15:03.325-0400 I QUERY [conn185] query db54.coll54 query: { query: { x: 6.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:255 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:18 nreturned:11 reslen:383 locks:{ Global: { acquireCount: { r: 38 } }, Database: { acquireCount: { r: 19 } }, Collection: { acquireCount: { r: 19 } } } 385ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:03.418-0400 m31100| 2015-07-09T14:15:03.418-0400 I QUERY [conn58] query db54.coll54 query: { query: { x: 12.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:261 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:17 nreturned:15 reslen:515 locks:{ Global: { acquireCount: { r: 36 } }, Database: { acquireCount: { r: 18 } }, Collection: { acquireCount: { r: 18 } } } 381ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:03.419-0400 m31100| 2015-07-09T14:15:03.419-0400 I QUERY [conn177] query db54.coll54 query: { query: { x: 10.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:261 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:17 nreturned:13 reslen:449 locks:{ Global: { acquireCount: { r: 36 } }, Database: { acquireCount: { r: 18 } }, Collection: { acquireCount: { r: 18 } } } 382ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:03.437-0400 m31100| 2015-07-09T14:15:03.436-0400 I QUERY [conn45] query db54.coll54 query: { query: { x: 11.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:261 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:17 nreturned:8 reslen:284 locks:{ Global: { acquireCount: { r: 36 } }, Database: { acquireCount: { r: 18 } }, Collection: { acquireCount: { r: 18 } } } 375ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:03.446-0400 m31100| 2015-07-09T14:15:03.445-0400 I QUERY [conn50] query db54.coll54 query: { query: { x: 16.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:263 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:17 nreturned:11 reslen:383 locks:{ Global: { acquireCount: { r: 36 } }, Database: { acquireCount: { r: 18 } }, Collection: { acquireCount: { r: 18 } } } 380ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:03.457-0400 m31100| 2015-07-09T14:15:03.456-0400 I QUERY [conn180] query db54.coll54 query: { query: { x: 3.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:264 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:17 nreturned:14 reslen:482 locks:{ Global: { acquireCount: { r: 36 } }, Database: { acquireCount: { r: 18 } }, Collection: { acquireCount: { r: 18 } } } 382ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:03.498-0400 m31100| 2015-07-09T14:15:03.498-0400 I QUERY [conn175] query db54.coll54 query: { query: { x: 9.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:266 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:17 nreturned:10 reslen:350 locks:{ Global: { acquireCount: { r: 36 } }, Database: { acquireCount: { r: 18 } }, Collection: { acquireCount: { r: 18 } } } 387ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:03.527-0400 m31100| 2015-07-09T14:15:03.526-0400 I QUERY [conn49] query db54.coll54 query: { query: { x: 2.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:267 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:18 nreturned:14 reslen:482 locks:{ Global: { acquireCount: { r: 38 } }, Database: { acquireCount: { r: 19 } }, Collection: { acquireCount: { r: 19 } } } 395ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:03.538-0400 m31100| 2015-07-09T14:15:03.537-0400 I QUERY [conn57] query db54.coll54 query: { query: { x: 7.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:267 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:18 nreturned:16 reslen:548 locks:{ Global: { acquireCount: { r: 38 } }, Database: { acquireCount: { r: 19 } }, Collection: { acquireCount: { r: 19 } } } 395ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:03.574-0400 m31100| 2015-07-09T14:15:03.574-0400 I QUERY [conn183] query db54.coll54 query: { query: { x: 19.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:269 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:18 nreturned:14 reslen:482 locks:{ Global: { acquireCount: { r: 38 } }, Database: { acquireCount: { r: 19 } }, Collection: { acquireCount: { r: 19 } } } 406ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:03.602-0400 m31100| 2015-07-09T14:15:03.602-0400 I QUERY [conn73] query db54.coll54 query: { query: { x: 14.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:270 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:18 nreturned:13 reslen:449 locks:{ Global: { acquireCount: { r: 38 } }, Database: { acquireCount: { r: 19 } }, Collection: { acquireCount: { r: 19 } } } 408ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:03.638-0400 m31100| 2015-07-09T14:15:03.637-0400 I QUERY [conn176] query db54.coll54 query: { query: { x: 18.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:271 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:19 nreturned:11 reslen:383 locks:{ Global: { acquireCount: { r: 40 } }, Database: { acquireCount: { r: 20 } }, Collection: { acquireCount: { r: 20 } } } 412ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:03.650-0400 m31100| 2015-07-09T14:15:03.649-0400 I QUERY [conn47] query db54.coll54 query: { query: { x: 17.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:271 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:19 nreturned:14 reslen:482 locks:{ Global: { acquireCount: { r: 40 } }, Database: { acquireCount: { r: 20 } }, Collection: { acquireCount: { r: 20 } } } 414ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:03.704-0400 m31100| 2015-07-09T14:15:03.704-0400 I QUERY [conn182] query db54.coll54 query: { query: { x: 13.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:273 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:19 nreturned:10 reslen:350 locks:{ Global: { acquireCount: { r: 40 } }, Database: { acquireCount: { r: 20 } }, Collection: { acquireCount: { r: 20 } } } 414ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:03.747-0400 m31100| 2015-07-09T14:15:03.747-0400 I QUERY [conn188] query db54.coll54 query: { query: { x: 5.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:274 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:19 nreturned:18 reslen:614 locks:{ Global: { acquireCount: { r: 40 } }, Database: { acquireCount: { r: 20 } }, Collection: { acquireCount: { r: 20 } } } 421ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:03.753-0400 m31100| 2015-07-09T14:15:03.753-0400 I QUERY [conn179] query db54.coll54 query: { query: { x: 15.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:274 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:19 nreturned:14 reslen:482 locks:{ Global: { acquireCount: { r: 40 } }, Database: { acquireCount: { r: 20 } }, Collection: { acquireCount: { r: 20 } } } 424ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:03.754-0400 m31100| 2015-07-09T14:15:03.753-0400 I QUERY [conn185] query db54.coll54 query: { query: { x: 8.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:274 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:19 nreturned:13 reslen:449 locks:{ Global: { acquireCount: { r: 40 } }, Database: { acquireCount: { r: 20 } }, Collection: { acquireCount: { r: 20 } } } 425ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:03.755-0400 m31100| 2015-07-09T14:15:03.754-0400 I QUERY [conn181] query db54.coll54 query: { query: { x: 0.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:274 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:19 nreturned:17 reslen:581 locks:{ Global: { acquireCount: { r: 40 } }, Database: { acquireCount: { r: 20 } }, Collection: { acquireCount: { r: 20 } } } 424ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:03.756-0400 m31100| 2015-07-09T14:15:03.756-0400 I QUERY [conn186] query db54.coll54 query: { query: { x: 4.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:274 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:19 nreturned:12 reslen:416 locks:{ Global: { acquireCount: { r: 40 } }, Database: { acquireCount: { r: 20 } }, Collection: { acquireCount: { r: 20 } } } 424ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:03.758-0400 m31100| 2015-07-09T14:15:03.757-0400 I QUERY [conn178] query db54.coll54 query: { query: { x: 6.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:274 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:19 nreturned:12 reslen:416 locks:{ Global: { acquireCount: { r: 40 } }, Database: { acquireCount: { r: 20 } }, Collection: { acquireCount: { r: 20 } } } 426ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:03.758-0400 m31100| 2015-07-09T14:15:03.758-0400 I QUERY [conn184] query db54.coll54 query: { query: { x: 1.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:274 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:19 nreturned:12 reslen:416 locks:{ Global: { acquireCount: { r: 40 } }, Database: { acquireCount: { r: 20 } }, Collection: { acquireCount: { r: 20 } } } 425ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:03.854-0400 m31100| 2015-07-09T14:15:03.853-0400 I QUERY [conn177] query db54.coll54 query: { query: { x: 12.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:281 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:19 nreturned:16 reslen:548 locks:{ Global: { acquireCount: { r: 40 } }, Database: { acquireCount: { r: 20 } }, Collection: { acquireCount: { r: 20 } } } 428ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:03.854-0400 m31100| 2015-07-09T14:15:03.854-0400 I QUERY [conn58] query db54.coll54 query: { query: { x: 10.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:281 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:19 nreturned:14 reslen:482 locks:{ Global: { acquireCount: { r: 40 } }, Database: { acquireCount: { r: 20 } }, Collection: { acquireCount: { r: 20 } } } 429ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:03.869-0400 m31100| 2015-07-09T14:15:03.869-0400 I QUERY [conn45] query db54.coll54 query: { query: { x: 11.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:282 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:19 nreturned:9 reslen:317 locks:{ Global: { acquireCount: { r: 40 } }, Database: { acquireCount: { r: 20 } }, Collection: { acquireCount: { r: 20 } } } 422ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:03.877-0400 m31100| 2015-07-09T14:15:03.876-0400 I QUERY [conn50] query db54.coll54 query: { query: { x: 16.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:283 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:19 nreturned:12 reslen:416 locks:{ Global: { acquireCount: { r: 40 } }, Database: { acquireCount: { r: 20 } }, Collection: { acquireCount: { r: 20 } } } 421ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:03.889-0400 m31100| 2015-07-09T14:15:03.889-0400 I QUERY [conn180] query db54.coll54 query: { query: { x: 3.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:284 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:19 nreturned:15 reslen:515 locks:{ Global: { acquireCount: { r: 40 } }, Database: { acquireCount: { r: 20 } }, Collection: { acquireCount: { r: 20 } } } 422ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:03.936-0400 m31100| 2015-07-09T14:15:03.936-0400 I QUERY [conn175] query db54.coll54 query: { query: { x: 9.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:286 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:19 nreturned:11 reslen:383 locks:{ Global: { acquireCount: { r: 40 } }, Database: { acquireCount: { r: 20 } }, Collection: { acquireCount: { r: 20 } } } 430ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:03.961-0400 m31100| 2015-07-09T14:15:03.960-0400 I QUERY [conn49] query db54.coll54 query: { query: { x: 2.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:287 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:19 nreturned:15 reslen:515 locks:{ Global: { acquireCount: { r: 40 } }, Database: { acquireCount: { r: 20 } }, Collection: { acquireCount: { r: 20 } } } 426ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:03.972-0400 m31100| 2015-07-09T14:15:03.971-0400 I QUERY [conn57] query db54.coll54 query: { query: { x: 7.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:287 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:19 nreturned:17 reslen:581 locks:{ Global: { acquireCount: { r: 40 } }, Database: { acquireCount: { r: 20 } }, Collection: { acquireCount: { r: 20 } } } 425ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:04.009-0400 m31100| 2015-07-09T14:15:04.009-0400 I QUERY [conn183] query db54.coll54 query: { query: { x: 19.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:289 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:19 nreturned:15 reslen:515 locks:{ Global: { acquireCount: { r: 40 } }, Database: { acquireCount: { r: 20 } }, Collection: { acquireCount: { r: 20 } } } 427ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:04.039-0400 m31100| 2015-07-09T14:15:04.038-0400 I QUERY [conn73] query db54.coll54 query: { query: { x: 14.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:290 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:19 nreturned:14 reslen:482 locks:{ Global: { acquireCount: { r: 40 } }, Database: { acquireCount: { r: 20 } }, Collection: { acquireCount: { r: 20 } } } 427ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:04.078-0400 m31100| 2015-07-09T14:15:04.077-0400 I QUERY [conn176] query db54.coll54 query: { query: { x: 18.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:291 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:19 nreturned:12 reslen:416 locks:{ Global: { acquireCount: { r: 40 } }, Database: { acquireCount: { r: 20 } }, Collection: { acquireCount: { r: 20 } } } 429ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:04.086-0400 m31100| 2015-07-09T14:15:04.086-0400 I QUERY [conn47] query db54.coll54 query: { query: { x: 17.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:291 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:19 nreturned:15 reslen:515 locks:{ Global: { acquireCount: { r: 40 } }, Database: { acquireCount: { r: 20 } }, Collection: { acquireCount: { r: 20 } } } 430ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:04.154-0400 m31100| 2015-07-09T14:15:04.153-0400 I QUERY [conn182] query db54.coll54 query: { query: { x: 13.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:293 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:20 nreturned:11 reslen:383 locks:{ Global: { acquireCount: { r: 42 } }, Database: { acquireCount: { r: 21 } }, Collection: { acquireCount: { r: 21 } } } 441ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:04.202-0400 m31100| 2015-07-09T14:15:04.201-0400 I QUERY [conn188] query db54.coll54 query: { query: { x: 5.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:294 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:20 nreturned:19 reslen:647 locks:{ Global: { acquireCount: { r: 42 } }, Database: { acquireCount: { r: 21 } }, Collection: { acquireCount: { r: 21 } } } 446ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:04.212-0400 m31100| 2015-07-09T14:15:04.211-0400 I QUERY [conn185] query db54.coll54 query: { query: { x: 0.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:294 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:20 nreturned:18 reslen:614 locks:{ Global: { acquireCount: { r: 42 } }, Database: { acquireCount: { r: 21 } }, Collection: { acquireCount: { r: 21 } } } 453ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:04.213-0400 m31100| 2015-07-09T14:15:04.212-0400 I QUERY [conn186] query db54.coll54 query: { query: { x: 8.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:294 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:20 nreturned:14 reslen:482 locks:{ Global: { acquireCount: { r: 42 } }, Database: { acquireCount: { r: 21 } }, Collection: { acquireCount: { r: 21 } } } 454ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:04.213-0400 m31100| 2015-07-09T14:15:04.212-0400 I QUERY [conn179] query db54.coll54 query: { query: { x: 15.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:294 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:20 nreturned:15 reslen:515 locks:{ Global: { acquireCount: { r: 42 } }, Database: { acquireCount: { r: 21 } }, Collection: { acquireCount: { r: 21 } } } 454ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:04.215-0400 m31100| 2015-07-09T14:15:04.214-0400 I QUERY [conn178] query db54.coll54 query: { query: { x: 4.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:294 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:20 nreturned:13 reslen:449 locks:{ Global: { acquireCount: { r: 42 } }, Database: { acquireCount: { r: 21 } }, Collection: { acquireCount: { r: 21 } } } 453ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:04.216-0400 m31100| 2015-07-09T14:15:04.215-0400 I QUERY [conn181] query db54.coll54 query: { query: { x: 6.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:294 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:20 nreturned:13 reslen:449 locks:{ Global: { acquireCount: { r: 42 } }, Database: { acquireCount: { r: 21 } }, Collection: { acquireCount: { r: 21 } } } 453ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:04.216-0400 m31100| 2015-07-09T14:15:04.215-0400 I QUERY [conn184] query db54.coll54 query: { query: { x: 1.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:294 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:20 nreturned:13 reslen:449 locks:{ Global: { acquireCount: { r: 42 } }, Database: { acquireCount: { r: 21 } }, Collection: { acquireCount: { r: 21 } } } 453ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:04.347-0400 m31100| 2015-07-09T14:15:04.346-0400 I QUERY [conn58] query db54.coll54 query: { query: { x: 10.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:301 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:22 nreturned:15 reslen:515 locks:{ Global: { acquireCount: { r: 46 } }, Database: { acquireCount: { r: 23 } }, Collection: { acquireCount: { r: 23 } } } 484ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:04.348-0400 m31100| 2015-07-09T14:15:04.347-0400 I QUERY [conn177] query db54.coll54 query: { query: { x: 12.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:301 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:22 nreturned:17 reslen:581 locks:{ Global: { acquireCount: { r: 46 } }, Database: { acquireCount: { r: 23 } }, Collection: { acquireCount: { r: 23 } } } 484ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:04.358-0400 m31100| 2015-07-09T14:15:04.358-0400 I QUERY [conn45] query db54.coll54 query: { query: { x: 11.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:303 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:22 nreturned:10 reslen:350 locks:{ Global: { acquireCount: { r: 46 } }, Database: { acquireCount: { r: 23 } }, Collection: { acquireCount: { r: 23 } } } 482ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:04.362-0400 m31100| 2015-07-09T14:15:04.362-0400 I QUERY [conn50] query db54.coll54 query: { query: { x: 16.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:301 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:21 nreturned:13 reslen:449 locks:{ Global: { acquireCount: { r: 44 } }, Database: { acquireCount: { r: 22 } }, Collection: { acquireCount: { r: 22 } } } 478ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:04.372-0400 m31100| 2015-07-09T14:15:04.372-0400 I QUERY [conn180] query db54.coll54 query: { query: { x: 3.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:303 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:21 nreturned:16 reslen:548 locks:{ Global: { acquireCount: { r: 44 } }, Database: { acquireCount: { r: 22 } }, Collection: { acquireCount: { r: 22 } } } 476ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:04.437-0400 m31100| 2015-07-09T14:15:04.436-0400 I QUERY [conn175] query db54.coll54 query: { query: { x: 9.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:306 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:22 nreturned:12 reslen:416 locks:{ Global: { acquireCount: { r: 46 } }, Database: { acquireCount: { r: 23 } }, Collection: { acquireCount: { r: 23 } } } 492ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:04.462-0400 m31100| 2015-07-09T14:15:04.461-0400 I QUERY [conn49] query db54.coll54 query: { query: { x: 2.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:307 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:22 nreturned:16 reslen:548 locks:{ Global: { acquireCount: { r: 46 } }, Database: { acquireCount: { r: 23 } }, Collection: { acquireCount: { r: 23 } } } 491ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:04.471-0400 m31100| 2015-07-09T14:15:04.470-0400 I QUERY [conn57] query db54.coll54 query: { query: { x: 7.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:307 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:22 nreturned:18 reslen:614 locks:{ Global: { acquireCount: { r: 46 } }, Database: { acquireCount: { r: 23 } }, Collection: { acquireCount: { r: 23 } } } 490ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:04.512-0400 m31100| 2015-07-09T14:15:04.511-0400 I QUERY [conn183] query db54.coll54 query: { query: { x: 19.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:309 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:22 nreturned:16 reslen:548 locks:{ Global: { acquireCount: { r: 46 } }, Database: { acquireCount: { r: 23 } }, Collection: { acquireCount: { r: 23 } } } 494ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:04.543-0400 m31100| 2015-07-09T14:15:04.543-0400 I QUERY [conn73] query db54.coll54 query: { query: { x: 14.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:310 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:23 nreturned:15 reslen:515 locks:{ Global: { acquireCount: { r: 48 } }, Database: { acquireCount: { r: 24 } }, Collection: { acquireCount: { r: 24 } } } 497ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:04.572-0400 m30999| 2015-07-09T14:15:04.571-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:15:04.569-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30999:1436464534:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:04.581-0400 m31100| 2015-07-09T14:15:04.581-0400 I QUERY [conn176] query db54.coll54 query: { query: { x: 18.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:311 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:22 nreturned:13 reslen:449 locks:{ Global: { acquireCount: { r: 46 } }, Database: { acquireCount: { r: 23 } }, Collection: { acquireCount: { r: 23 } } } 497ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:04.590-0400 m31100| 2015-07-09T14:15:04.589-0400 I QUERY [conn47] query db54.coll54 query: { query: { x: 17.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:311 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:22 nreturned:16 reslen:548 locks:{ Global: { acquireCount: { r: 46 } }, Database: { acquireCount: { r: 23 } }, Collection: { acquireCount: { r: 23 } } } 497ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:04.651-0400 m31100| 2015-07-09T14:15:04.651-0400 I QUERY [conn182] query db54.coll54 query: { query: { x: 13.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:313 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:22 nreturned:12 reslen:416 locks:{ Global: { acquireCount: { r: 46 } }, Database: { acquireCount: { r: 23 } }, Collection: { acquireCount: { r: 23 } } } 488ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:04.697-0400 m31100| 2015-07-09T14:15:04.697-0400 I QUERY [conn188] query db54.coll54 query: { query: { x: 5.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:314 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:22 nreturned:20 reslen:680 locks:{ Global: { acquireCount: { r: 46 } }, Database: { acquireCount: { r: 23 } }, Collection: { acquireCount: { r: 23 } } } 486ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:04.706-0400 m31100| 2015-07-09T14:15:04.705-0400 I QUERY [conn178] query db54.coll54 query: { query: { x: 0.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:314 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:22 nreturned:19 reslen:647 locks:{ Global: { acquireCount: { r: 46 } }, Database: { acquireCount: { r: 23 } }, Collection: { acquireCount: { r: 23 } } } 489ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:04.918-0400 m31100| 2015-07-09T14:15:04.706-0400 I QUERY [conn184] query db54.coll54 query: { query: { x: 15.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:314 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:22 nreturned:16 reslen:548 locks:{ Global: { acquireCount: { r: 46 } }, Database: { acquireCount: { r: 23 } }, Collection: { acquireCount: { r: 23 } } } 489ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:04.918-0400 m31100| 2015-07-09T14:15:04.706-0400 I QUERY [conn181] query db54.coll54 query: { query: { x: 4.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:314 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:22 nreturned:14 reslen:482 locks:{ Global: { acquireCount: { r: 46 } }, Database: { acquireCount: { r: 23 } }, Collection: { acquireCount: { r: 23 } } } 486ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:04.918-0400 m31100| 2015-07-09T14:15:04.707-0400 I QUERY [conn186] query db54.coll54 query: { query: { x: 8.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:314 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:22 nreturned:15 reslen:515 locks:{ Global: { acquireCount: { r: 46 } }, Database: { acquireCount: { r: 23 } }, Collection: { acquireCount: { r: 23 } } } 487ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:04.919-0400 m31100| 2015-07-09T14:15:04.708-0400 I QUERY [conn185] query db54.coll54 query: { query: { x: 6.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:314 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:22 nreturned:14 reslen:482 locks:{ Global: { acquireCount: { r: 46 } }, Database: { acquireCount: { r: 23 } }, Collection: { acquireCount: { r: 23 } } } 486ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:04.919-0400 m31100| 2015-07-09T14:15:04.708-0400 I QUERY [conn179] query db54.coll54 query: { query: { x: 1.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:314 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:22 nreturned:14 reslen:482 locks:{ Global: { acquireCount: { r: 46 } }, Database: { acquireCount: { r: 23 } }, Collection: { acquireCount: { r: 23 } } } 486ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:04.919-0400 m31100| 2015-07-09T14:15:04.829-0400 I QUERY [conn58] query db54.coll54 query: { query: { x: 10.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:321 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:21 nreturned:16 reslen:548 locks:{ Global: { acquireCount: { r: 44 } }, Database: { acquireCount: { r: 22 } }, Collection: { acquireCount: { r: 22 } } } 474ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:04.920-0400 m31100| 2015-07-09T14:15:04.829-0400 I QUERY [conn177] query db54.coll54 query: { query: { x: 12.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:321 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:21 nreturned:18 reslen:614 locks:{ Global: { acquireCount: { r: 44 } }, Database: { acquireCount: { r: 22 } }, Collection: { acquireCount: { r: 22 } } } 475ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:04.920-0400 m31100| 2015-07-09T14:15:04.850-0400 I QUERY [conn45] query db54.coll54 query: { query: { x: 11.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:323 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:22 nreturned:11 reslen:383 locks:{ Global: { acquireCount: { r: 46 } }, Database: { acquireCount: { r: 23 } }, Collection: { acquireCount: { r: 23 } } } 485ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:04.920-0400 m31100| 2015-07-09T14:15:04.858-0400 I QUERY [conn50] query db54.coll54 query: { query: { x: 16.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:323 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:22 nreturned:14 reslen:482 locks:{ Global: { acquireCount: { r: 46 } }, Database: { acquireCount: { r: 23 } }, Collection: { acquireCount: { r: 23 } } } 489ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:04.921-0400 m31100| 2015-07-09T14:15:04.864-0400 I QUERY [conn180] query db54.coll54 query: { query: { x: 3.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:323 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:22 nreturned:17 reslen:581 locks:{ Global: { acquireCount: { r: 46 } }, Database: { acquireCount: { r: 23 } }, Collection: { acquireCount: { r: 23 } } } 483ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:04.921-0400 m31100| 2015-07-09T14:15:04.921-0400 I QUERY [conn175] query db54.coll54 query: { query: { x: 9.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:326 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:22 nreturned:13 reslen:449 locks:{ Global: { acquireCount: { r: 46 } }, Database: { acquireCount: { r: 23 } }, Collection: { acquireCount: { r: 23 } } } 476ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:04.946-0400 m31100| 2015-07-09T14:15:04.946-0400 I QUERY [conn49] query db54.coll54 query: { query: { x: 2.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:327 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:22 nreturned:17 reslen:581 locks:{ Global: { acquireCount: { r: 46 } }, Database: { acquireCount: { r: 23 } }, Collection: { acquireCount: { r: 23 } } } 477ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:04.959-0400 m31100| 2015-07-09T14:15:04.958-0400 I QUERY [conn57] query db54.coll54 query: { query: { x: 7.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:328 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:22 nreturned:19 reslen:647 locks:{ Global: { acquireCount: { r: 46 } }, Database: { acquireCount: { r: 23 } }, Collection: { acquireCount: { r: 23 } } } 480ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:04.990-0400 m31100| 2015-07-09T14:15:04.990-0400 I QUERY [conn183] query db54.coll54 query: { query: { x: 19.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:329 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:21 nreturned:17 reslen:581 locks:{ Global: { acquireCount: { r: 44 } }, Database: { acquireCount: { r: 22 } }, Collection: { acquireCount: { r: 22 } } } 472ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:05.023-0400 m31100| 2015-07-09T14:15:05.023-0400 I QUERY [conn73] query db54.coll54 query: { query: { x: 14.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:330 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:21 nreturned:16 reslen:548 locks:{ Global: { acquireCount: { r: 44 } }, Database: { acquireCount: { r: 22 } }, Collection: { acquireCount: { r: 22 } } } 471ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:05.063-0400 m31100| 2015-07-09T14:15:05.062-0400 I QUERY [conn176] query db54.coll54 query: { query: { x: 18.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:331 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:21 nreturned:14 reslen:482 locks:{ Global: { acquireCount: { r: 44 } }, Database: { acquireCount: { r: 22 } }, Collection: { acquireCount: { r: 22 } } } 475ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:05.072-0400 m31100| 2015-07-09T14:15:05.072-0400 I QUERY [conn47] query db54.coll54 query: { query: { x: 17.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:331 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:21 nreturned:17 reslen:581 locks:{ Global: { acquireCount: { r: 44 } }, Database: { acquireCount: { r: 22 } }, Collection: { acquireCount: { r: 22 } } } 476ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:05.129-0400 m31100| 2015-07-09T14:15:05.129-0400 I QUERY [conn182] query db54.coll54 query: { query: { x: 13.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:333 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:21 nreturned:13 reslen:449 locks:{ Global: { acquireCount: { r: 44 } }, Database: { acquireCount: { r: 22 } }, Collection: { acquireCount: { r: 22 } } } 469ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:05.176-0400 m31100| 2015-07-09T14:15:05.175-0400 I QUERY [conn188] query db54.coll54 query: { query: { x: 5.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:334 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:21 nreturned:21 reslen:713 locks:{ Global: { acquireCount: { r: 44 } }, Database: { acquireCount: { r: 22 } }, Collection: { acquireCount: { r: 22 } } } 472ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:05.194-0400 m31100| 2015-07-09T14:15:05.194-0400 I QUERY [conn185] query db54.coll54 query: { query: { x: 0.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:334 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:22 nreturned:20 reslen:680 locks:{ Global: { acquireCount: { r: 46 } }, Database: { acquireCount: { r: 23 } }, Collection: { acquireCount: { r: 23 } } } 483ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:05.199-0400 m31100| 2015-07-09T14:15:05.198-0400 I QUERY [conn181] query db54.coll54 query: { query: { x: 4.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:334 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:22 nreturned:15 reslen:515 locks:{ Global: { acquireCount: { r: 46 } }, Database: { acquireCount: { r: 23 } }, Collection: { acquireCount: { r: 23 } } } 487ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:05.200-0400 m31100| 2015-07-09T14:15:05.200-0400 I QUERY [conn179] query db54.coll54 query: { query: { x: 15.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:335 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:22 nreturned:17 reslen:581 locks:{ Global: { acquireCount: { r: 46 } }, Database: { acquireCount: { r: 23 } }, Collection: { acquireCount: { r: 23 } } } 488ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:05.201-0400 m31100| 2015-07-09T14:15:05.200-0400 I QUERY [conn186] query db54.coll54 query: { query: { x: 8.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:335 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:22 nreturned:16 reslen:548 locks:{ Global: { acquireCount: { r: 46 } }, Database: { acquireCount: { r: 23 } }, Collection: { acquireCount: { r: 23 } } } 488ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:05.204-0400 m31100| 2015-07-09T14:15:05.203-0400 I QUERY [conn178] query db54.coll54 query: { query: { x: 6.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:334 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:22 nreturned:15 reslen:515 locks:{ Global: { acquireCount: { r: 46 } }, Database: { acquireCount: { r: 23 } }, Collection: { acquireCount: { r: 23 } } } 490ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:05.204-0400 m31100| 2015-07-09T14:15:05.203-0400 I QUERY [conn184] query db54.coll54 query: { query: { x: 1.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:335 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:22 nreturned:15 reslen:515 locks:{ Global: { acquireCount: { r: 46 } }, Database: { acquireCount: { r: 23 } }, Collection: { acquireCount: { r: 23 } } } 490ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:05.327-0400 m31100| 2015-07-09T14:15:05.326-0400 I QUERY [conn177] query db54.coll54 query: { query: { x: 10.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:341 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:22 nreturned:17 reslen:581 locks:{ Global: { acquireCount: { r: 46 } }, Database: { acquireCount: { r: 23 } }, Collection: { acquireCount: { r: 23 } } } 491ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:05.328-0400 m31100| 2015-07-09T14:15:05.327-0400 I QUERY [conn58] query db54.coll54 query: { query: { x: 12.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:341 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:22 nreturned:19 reslen:647 locks:{ Global: { acquireCount: { r: 46 } }, Database: { acquireCount: { r: 23 } }, Collection: { acquireCount: { r: 23 } } } 490ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:05.347-0400 m31100| 2015-07-09T14:15:05.347-0400 I QUERY [conn45] query db54.coll54 query: { query: { x: 11.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:343 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:22 nreturned:12 reslen:416 locks:{ Global: { acquireCount: { r: 46 } }, Database: { acquireCount: { r: 23 } }, Collection: { acquireCount: { r: 23 } } } 488ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:05.356-0400 m31100| 2015-07-09T14:15:05.356-0400 I QUERY [conn50] query db54.coll54 query: { query: { x: 16.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:343 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:22 nreturned:15 reslen:515 locks:{ Global: { acquireCount: { r: 46 } }, Database: { acquireCount: { r: 23 } }, Collection: { acquireCount: { r: 23 } } } 491ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:05.362-0400 m31100| 2015-07-09T14:15:05.361-0400 I QUERY [conn180] query db54.coll54 query: { query: { x: 3.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:343 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:22 nreturned:18 reslen:614 locks:{ Global: { acquireCount: { r: 46 } }, Database: { acquireCount: { r: 23 } }, Collection: { acquireCount: { r: 23 } } } 492ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:05.415-0400 m31100| 2015-07-09T14:15:05.414-0400 I QUERY [conn175] query db54.coll54 query: { query: { x: 9.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:346 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:22 nreturned:14 reslen:482 locks:{ Global: { acquireCount: { r: 46 } }, Database: { acquireCount: { r: 23 } }, Collection: { acquireCount: { r: 23 } } } 485ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:05.436-0400 m31100| 2015-07-09T14:15:05.435-0400 I QUERY [conn49] query db54.coll54 query: { query: { x: 2.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:347 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:22 nreturned:18 reslen:614 locks:{ Global: { acquireCount: { r: 46 } }, Database: { acquireCount: { r: 23 } }, Collection: { acquireCount: { r: 23 } } } 482ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:05.449-0400 m31100| 2015-07-09T14:15:05.449-0400 I QUERY [conn57] query db54.coll54 query: { query: { x: 7.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:348 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:22 nreturned:20 reslen:680 locks:{ Global: { acquireCount: { r: 46 } }, Database: { acquireCount: { r: 23 } }, Collection: { acquireCount: { r: 23 } } } 482ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:05.492-0400 m31100| 2015-07-09T14:15:05.491-0400 I QUERY [conn183] query db54.coll54 query: { query: { x: 19.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:349 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:22 nreturned:18 reslen:614 locks:{ Global: { acquireCount: { r: 46 } }, Database: { acquireCount: { r: 23 } }, Collection: { acquireCount: { r: 23 } } } 492ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:05.526-0400 m31100| 2015-07-09T14:15:05.525-0400 I QUERY [conn73] query db54.coll54 query: { query: { x: 14.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:350 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:23 nreturned:17 reslen:581 locks:{ Global: { acquireCount: { r: 48 } }, Database: { acquireCount: { r: 24 } }, Collection: { acquireCount: { r: 24 } } } 495ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:05.568-0400 m31100| 2015-07-09T14:15:05.568-0400 I QUERY [conn176] query db54.coll54 query: { query: { x: 18.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:351 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:23 nreturned:15 reslen:515 locks:{ Global: { acquireCount: { r: 48 } }, Database: { acquireCount: { r: 24 } }, Collection: { acquireCount: { r: 24 } } } 496ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:05.581-0400 m31100| 2015-07-09T14:15:05.581-0400 I QUERY [conn47] query db54.coll54 query: { query: { x: 17.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:352 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:23 nreturned:18 reslen:614 locks:{ Global: { acquireCount: { r: 48 } }, Database: { acquireCount: { r: 24 } }, Collection: { acquireCount: { r: 24 } } } 501ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:05.660-0400 m31100| 2015-07-09T14:15:05.659-0400 I QUERY [conn182] query db54.coll54 query: { query: { x: 13.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:353 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:24 nreturned:14 reslen:482 locks:{ Global: { acquireCount: { r: 50 } }, Database: { acquireCount: { r: 25 } }, Collection: { acquireCount: { r: 25 } } } 523ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:05.702-0400 m31100| 2015-07-09T14:15:05.701-0400 I QUERY [conn188] query db54.coll54 query: { query: { x: 5.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:354 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:23 nreturned:22 reslen:746 locks:{ Global: { acquireCount: { r: 48 } }, Database: { acquireCount: { r: 24 } }, Collection: { acquireCount: { r: 24 } } } 517ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:05.712-0400 m31100| 2015-07-09T14:15:05.712-0400 I QUERY [conn186] query db54.coll54 query: { query: { x: 0.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:354 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:23 nreturned:21 reslen:713 locks:{ Global: { acquireCount: { r: 48 } }, Database: { acquireCount: { r: 24 } }, Collection: { acquireCount: { r: 24 } } } 510ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:05.716-0400 m31100| 2015-07-09T14:15:05.715-0400 I QUERY [conn178] query db54.coll54 query: { query: { x: 4.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:354 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:23 nreturned:16 reslen:548 locks:{ Global: { acquireCount: { r: 48 } }, Database: { acquireCount: { r: 24 } }, Collection: { acquireCount: { r: 24 } } } 510ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:05.716-0400 m31100| 2015-07-09T14:15:05.715-0400 I QUERY [conn179] query db54.coll54 query: { query: { x: 15.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:355 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:23 nreturned:18 reslen:614 locks:{ Global: { acquireCount: { r: 48 } }, Database: { acquireCount: { r: 24 } }, Collection: { acquireCount: { r: 24 } } } 510ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:05.717-0400 m31100| 2015-07-09T14:15:05.716-0400 I QUERY [conn181] query db54.coll54 query: { query: { x: 8.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:355 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:23 nreturned:17 reslen:581 locks:{ Global: { acquireCount: { r: 48 } }, Database: { acquireCount: { r: 24 } }, Collection: { acquireCount: { r: 24 } } } 509ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:05.718-0400 m31100| 2015-07-09T14:15:05.717-0400 I QUERY [conn185] query db54.coll54 query: { query: { x: 6.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:355 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:23 nreturned:16 reslen:548 locks:{ Global: { acquireCount: { r: 48 } }, Database: { acquireCount: { r: 24 } }, Collection: { acquireCount: { r: 24 } } } 509ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:05.720-0400 m31100| 2015-07-09T14:15:05.719-0400 I QUERY [conn184] query db54.coll54 query: { query: { x: 1.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:355 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:23 nreturned:16 reslen:548 locks:{ Global: { acquireCount: { r: 48 } }, Database: { acquireCount: { r: 24 } }, Collection: { acquireCount: { r: 24 } } } 510ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:05.842-0400 m31100| 2015-07-09T14:15:05.841-0400 I QUERY [conn177] query db54.coll54 query: { query: { x: 10.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:361 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:23 nreturned:18 reslen:614 locks:{ Global: { acquireCount: { r: 48 } }, Database: { acquireCount: { r: 24 } }, Collection: { acquireCount: { r: 24 } } } 508ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:05.843-0400 m31100| 2015-07-09T14:15:05.842-0400 I QUERY [conn58] query db54.coll54 query: { query: { x: 12.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:361 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:23 nreturned:20 reslen:680 locks:{ Global: { acquireCount: { r: 48 } }, Database: { acquireCount: { r: 24 } }, Collection: { acquireCount: { r: 24 } } } 509ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:05.852-0400 m31100| 2015-07-09T14:15:05.852-0400 I QUERY [conn45] query db54.coll54 query: { query: { x: 11.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:363 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:23 nreturned:13 reslen:449 locks:{ Global: { acquireCount: { r: 48 } }, Database: { acquireCount: { r: 24 } }, Collection: { acquireCount: { r: 24 } } } 497ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:05.865-0400 m31100| 2015-07-09T14:15:05.864-0400 I QUERY [conn50] query db54.coll54 query: { query: { x: 16.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:364 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:23 nreturned:16 reslen:548 locks:{ Global: { acquireCount: { r: 48 } }, Database: { acquireCount: { r: 24 } }, Collection: { acquireCount: { r: 24 } } } 502ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:05.873-0400 m31100| 2015-07-09T14:15:05.873-0400 I QUERY [conn180] query db54.coll54 query: { query: { x: 3.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:364 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:23 nreturned:19 reslen:647 locks:{ Global: { acquireCount: { r: 48 } }, Database: { acquireCount: { r: 24 } }, Collection: { acquireCount: { r: 24 } } } 506ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:05.930-0400 m31100| 2015-07-09T14:15:05.930-0400 I QUERY [conn175] query db54.coll54 query: { query: { x: 9.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:366 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:23 nreturned:15 reslen:515 locks:{ Global: { acquireCount: { r: 48 } }, Database: { acquireCount: { r: 24 } }, Collection: { acquireCount: { r: 24 } } } 508ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:05.959-0400 m31100| 2015-07-09T14:15:05.958-0400 I QUERY [conn49] query db54.coll54 query: { query: { x: 2.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:367 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:23 nreturned:19 reslen:647 locks:{ Global: { acquireCount: { r: 48 } }, Database: { acquireCount: { r: 24 } }, Collection: { acquireCount: { r: 24 } } } 514ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:05.968-0400 m31100| 2015-07-09T14:15:05.968-0400 I QUERY [conn57] query db54.coll54 query: { query: { x: 7.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:367 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:23 nreturned:21 reslen:713 locks:{ Global: { acquireCount: { r: 48 } }, Database: { acquireCount: { r: 24 } }, Collection: { acquireCount: { r: 24 } } } 510ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:05.982-0400 m30998| 2015-07-09T14:15:05.981-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:15:05.979-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30998:1436464535:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:06.008-0400 m31100| 2015-07-09T14:15:06.007-0400 I QUERY [conn183] query db54.coll54 query: { query: { x: 19.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:369 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:23 nreturned:19 reslen:647 locks:{ Global: { acquireCount: { r: 48 } }, Database: { acquireCount: { r: 24 } }, Collection: { acquireCount: { r: 24 } } } 507ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:06.039-0400 m31100| 2015-07-09T14:15:06.038-0400 I QUERY [conn73] query db54.coll54 query: { query: { x: 14.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:370 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:23 nreturned:18 reslen:614 locks:{ Global: { acquireCount: { r: 48 } }, Database: { acquireCount: { r: 24 } }, Collection: { acquireCount: { r: 24 } } } 506ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:06.083-0400 m31100| 2015-07-09T14:15:06.082-0400 I QUERY [conn176] query db54.coll54 query: { query: { x: 18.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:371 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:23 nreturned:16 reslen:548 locks:{ Global: { acquireCount: { r: 48 } }, Database: { acquireCount: { r: 24 } }, Collection: { acquireCount: { r: 24 } } } 505ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:06.102-0400 m31100| 2015-07-09T14:15:06.102-0400 I QUERY [conn47] query db54.coll54 query: { query: { x: 17.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:372 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:23 nreturned:19 reslen:647 locks:{ Global: { acquireCount: { r: 48 } }, Database: { acquireCount: { r: 24 } }, Collection: { acquireCount: { r: 24 } } } 511ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:06.165-0400 m31100| 2015-07-09T14:15:06.164-0400 I QUERY [conn182] query db54.coll54 query: { query: { x: 13.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:373 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:22 nreturned:15 reslen:515 locks:{ Global: { acquireCount: { r: 46 } }, Database: { acquireCount: { r: 23 } }, Collection: { acquireCount: { r: 23 } } } 498ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:06.216-0400 m31100| 2015-07-09T14:15:06.216-0400 I QUERY [conn188] query db54.coll54 query: { query: { x: 5.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:374 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:23 nreturned:23 reslen:779 locks:{ Global: { acquireCount: { r: 48 } }, Database: { acquireCount: { r: 24 } }, Collection: { acquireCount: { r: 24 } } } 506ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:06.231-0400 m31100| 2015-07-09T14:15:06.231-0400 I QUERY [conn178] query db54.coll54 query: { query: { x: 0.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:374 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:23 nreturned:22 reslen:746 locks:{ Global: { acquireCount: { r: 48 } }, Database: { acquireCount: { r: 24 } }, Collection: { acquireCount: { r: 24 } } } 510ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:06.237-0400 m31100| 2015-07-09T14:15:06.236-0400 I QUERY [conn184] query db54.coll54 query: { query: { x: 15.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:374 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:23 nreturned:19 reslen:647 locks:{ Global: { acquireCount: { r: 48 } }, Database: { acquireCount: { r: 24 } }, Collection: { acquireCount: { r: 24 } } } 513ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:06.237-0400 m31100| 2015-07-09T14:15:06.237-0400 I QUERY [conn185] query db54.coll54 query: { query: { x: 4.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:374 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:23 nreturned:17 reslen:581 locks:{ Global: { acquireCount: { r: 48 } }, Database: { acquireCount: { r: 24 } }, Collection: { acquireCount: { r: 24 } } } 514ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:06.238-0400 m31100| 2015-07-09T14:15:06.237-0400 I QUERY [conn181] query db54.coll54 query: { query: { x: 8.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:375 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:23 nreturned:18 reslen:614 locks:{ Global: { acquireCount: { r: 48 } }, Database: { acquireCount: { r: 24 } }, Collection: { acquireCount: { r: 24 } } } 514ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:06.238-0400 m31100| 2015-07-09T14:15:06.237-0400 I QUERY [conn186] query db54.coll54 query: { query: { x: 6.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:374 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:23 nreturned:17 reslen:581 locks:{ Global: { acquireCount: { r: 48 } }, Database: { acquireCount: { r: 24 } }, Collection: { acquireCount: { r: 24 } } } 512ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:06.240-0400 m31100| 2015-07-09T14:15:06.239-0400 I QUERY [conn179] query db54.coll54 query: { query: { x: 1.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:374 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:23 nreturned:17 reslen:581 locks:{ Global: { acquireCount: { r: 48 } }, Database: { acquireCount: { r: 24 } }, Collection: { acquireCount: { r: 24 } } } 512ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:06.398-0400 m31100| 2015-07-09T14:15:06.397-0400 I QUERY [conn58] query db54.coll54 query: { query: { x: 12.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:381 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:25 nreturned:21 reslen:713 locks:{ Global: { acquireCount: { r: 52 } }, Database: { acquireCount: { r: 26 } }, Collection: { acquireCount: { r: 26 } } } 549ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:06.398-0400 m31100| 2015-07-09T14:15:06.398-0400 I QUERY [conn177] query db54.coll54 query: { query: { x: 10.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:381 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:25 nreturned:19 reslen:647 locks:{ Global: { acquireCount: { r: 52 } }, Database: { acquireCount: { r: 26 } }, Collection: { acquireCount: { r: 26 } } } 549ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:06.407-0400 m31100| 2015-07-09T14:15:06.406-0400 I QUERY [conn45] query db54.coll54 query: { query: { x: 11.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:381 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:25 nreturned:14 reslen:482 locks:{ Global: { acquireCount: { r: 52 } }, Database: { acquireCount: { r: 26 } }, Collection: { acquireCount: { r: 26 } } } 546ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:06.412-0400 m31100| 2015-07-09T14:15:06.411-0400 I QUERY [conn50] query db54.coll54 query: { query: { x: 16.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:381 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:24 nreturned:17 reslen:581 locks:{ Global: { acquireCount: { r: 50 } }, Database: { acquireCount: { r: 25 } }, Collection: { acquireCount: { r: 25 } } } 540ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:06.423-0400 m31100| 2015-07-09T14:15:06.422-0400 I QUERY [conn180] query db54.coll54 query: { query: { x: 3.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:385 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:25 nreturned:20 reslen:680 locks:{ Global: { acquireCount: { r: 52 } }, Database: { acquireCount: { r: 26 } }, Collection: { acquireCount: { r: 26 } } } 542ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:06.492-0400 m31100| 2015-07-09T14:15:06.491-0400 I QUERY [conn175] query db54.coll54 query: { query: { x: 9.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:386 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:25 nreturned:16 reslen:548 locks:{ Global: { acquireCount: { r: 52 } }, Database: { acquireCount: { r: 26 } }, Collection: { acquireCount: { r: 26 } } } 550ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:06.521-0400 m31100| 2015-07-09T14:15:06.520-0400 I QUERY [conn49] query db54.coll54 query: { query: { x: 2.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:387 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:25 nreturned:20 reslen:680 locks:{ Global: { acquireCount: { r: 52 } }, Database: { acquireCount: { r: 26 } }, Collection: { acquireCount: { r: 26 } } } 554ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:06.548-0400 m31100| 2015-07-09T14:15:06.547-0400 I QUERY [conn57] query db54.coll54 query: { query: { x: 7.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:388 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:26 nreturned:22 reslen:746 locks:{ Global: { acquireCount: { r: 54 } }, Database: { acquireCount: { r: 27 } }, Collection: { acquireCount: { r: 27 } } } 573ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:06.594-0400 m31100| 2015-07-09T14:15:06.593-0400 I QUERY [conn183] query db54.coll54 query: { query: { x: 19.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:389 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:26 nreturned:20 reslen:680 locks:{ Global: { acquireCount: { r: 54 } }, Database: { acquireCount: { r: 27 } }, Collection: { acquireCount: { r: 27 } } } 578ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:06.628-0400 m31100| 2015-07-09T14:15:06.628-0400 I QUERY [conn73] query db54.coll54 query: { query: { x: 14.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:390 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:27 nreturned:19 reslen:647 locks:{ Global: { acquireCount: { r: 56 } }, Database: { acquireCount: { r: 28 } }, Collection: { acquireCount: { r: 28 } } } 581ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:06.671-0400 m31100| 2015-07-09T14:15:06.670-0400 I QUERY [conn176] query db54.coll54 query: { query: { x: 18.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:391 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:27 nreturned:17 reslen:581 locks:{ Global: { acquireCount: { r: 56 } }, Database: { acquireCount: { r: 28 } }, Collection: { acquireCount: { r: 28 } } } 581ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:06.689-0400 m31100| 2015-07-09T14:15:06.689-0400 I QUERY [conn47] query db54.coll54 query: { query: { x: 17.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:391 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:26 nreturned:20 reslen:680 locks:{ Global: { acquireCount: { r: 54 } }, Database: { acquireCount: { r: 27 } }, Collection: { acquireCount: { r: 27 } } } 578ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:06.753-0400 m31100| 2015-07-09T14:15:06.753-0400 I QUERY [conn182] query db54.coll54 query: { query: { x: 13.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:393 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:26 nreturned:16 reslen:548 locks:{ Global: { acquireCount: { r: 54 } }, Database: { acquireCount: { r: 27 } }, Collection: { acquireCount: { r: 27 } } } 580ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:06.806-0400 m31100| 2015-07-09T14:15:06.805-0400 I QUERY [conn188] query db54.coll54 query: { query: { x: 5.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:394 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:26 nreturned:24 reslen:812 locks:{ Global: { acquireCount: { r: 54 } }, Database: { acquireCount: { r: 27 } }, Collection: { acquireCount: { r: 27 } } } 580ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:06.817-0400 m30999| 2015-07-09T14:15:06.816-0400 I NETWORK [conn345] end connection 127.0.0.1:63783 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:06.819-0400 m31100| 2015-07-09T14:15:06.819-0400 I QUERY [conn181] query db54.coll54 query: { query: { x: 0.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:394 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:26 nreturned:23 reslen:779 locks:{ Global: { acquireCount: { r: 54 } }, Database: { acquireCount: { r: 27 } }, Collection: { acquireCount: { r: 27 } } } 580ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:06.821-0400 m31100| 2015-07-09T14:15:06.820-0400 I QUERY [conn186] query db54.coll54 query: { query: { x: 4.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:394 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:26 nreturned:18 reslen:614 locks:{ Global: { acquireCount: { r: 54 } }, Database: { acquireCount: { r: 27 } }, Collection: { acquireCount: { r: 27 } } } 579ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:06.823-0400 m31100| 2015-07-09T14:15:06.822-0400 I QUERY [conn185] query db54.coll54 query: { query: { x: 6.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:395 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:27 nreturned:18 reslen:614 locks:{ Global: { acquireCount: { r: 56 } }, Database: { acquireCount: { r: 28 } }, Collection: { acquireCount: { r: 28 } } } 580ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:06.823-0400 m31100| 2015-07-09T14:15:06.822-0400 I QUERY [conn179] query db54.coll54 query: { query: { x: 15.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:395 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:27 nreturned:20 reslen:680 locks:{ Global: { acquireCount: { r: 56 } }, Database: { acquireCount: { r: 28 } }, Collection: { acquireCount: { r: 28 } } } 579ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:06.824-0400 m31100| 2015-07-09T14:15:06.823-0400 I QUERY [conn184] query db54.coll54 query: { query: { x: 1.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:395 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:27 nreturned:18 reslen:614 locks:{ Global: { acquireCount: { r: 56 } }, Database: { acquireCount: { r: 28 } }, Collection: { acquireCount: { r: 28 } } } 579ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:06.825-0400 m31100| 2015-07-09T14:15:06.824-0400 I QUERY [conn178] query db54.coll54 query: { query: { x: 8.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:396 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:27 nreturned:19 reslen:647 locks:{ Global: { acquireCount: { r: 56 } }, Database: { acquireCount: { r: 28 } }, Collection: { acquireCount: { r: 28 } } } 581ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:06.897-0400 m31100| 2015-07-09T14:15:06.896-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:15:06.894-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31100:1436464536:197041335', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:06.998-0400 m31100| 2015-07-09T14:15:06.997-0400 I QUERY [conn177] query db54.coll54 query: { query: { x: 10.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:401 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:27 nreturned:20 reslen:680 locks:{ Global: { acquireCount: { r: 56 } }, Database: { acquireCount: { r: 28 } }, Collection: { acquireCount: { r: 28 } } } 591ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:07.005-0400 m31100| 2015-07-09T14:15:07.004-0400 I QUERY [conn58] query db54.coll54 query: { query: { x: 12.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:401 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:27 nreturned:22 reslen:746 locks:{ Global: { acquireCount: { r: 56 } }, Database: { acquireCount: { r: 28 } }, Collection: { acquireCount: { r: 28 } } } 595ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:07.008-0400 m31100| 2015-07-09T14:15:07.008-0400 I QUERY [conn45] query db54.coll54 query: { query: { x: 11.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:401 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:27 nreturned:15 reslen:515 locks:{ Global: { acquireCount: { r: 56 } }, Database: { acquireCount: { r: 28 } }, Collection: { acquireCount: { r: 28 } } } 596ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:07.014-0400 m31100| 2015-07-09T14:15:07.014-0400 I QUERY [conn50] query db54.coll54 query: { query: { x: 16.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:403 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:27 nreturned:18 reslen:614 locks:{ Global: { acquireCount: { r: 56 } }, Database: { acquireCount: { r: 28 } }, Collection: { acquireCount: { r: 28 } } } 594ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:07.030-0400 m31100| 2015-07-09T14:15:07.029-0400 I QUERY [conn180] query db54.coll54 query: { query: { x: 3.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:405 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:27 nreturned:21 reslen:713 locks:{ Global: { acquireCount: { r: 56 } }, Database: { acquireCount: { r: 28 } }, Collection: { acquireCount: { r: 28 } } } 597ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:07.087-0400 m31100| 2015-07-09T14:15:07.086-0400 I QUERY [conn175] query db54.coll54 query: { query: { x: 9.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:406 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:27 nreturned:17 reslen:581 locks:{ Global: { acquireCount: { r: 56 } }, Database: { acquireCount: { r: 28 } }, Collection: { acquireCount: { r: 28 } } } 587ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:07.110-0400 m31100| 2015-07-09T14:15:07.109-0400 I QUERY [conn49] query db54.coll54 query: { query: { x: 2.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:407 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:26 nreturned:21 reslen:713 locks:{ Global: { acquireCount: { r: 54 } }, Database: { acquireCount: { r: 27 } }, Collection: { acquireCount: { r: 27 } } } 577ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:07.130-0400 m31100| 2015-07-09T14:15:07.130-0400 I QUERY [conn57] query db54.coll54 query: { query: { x: 7.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:408 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:26 nreturned:23 reslen:779 locks:{ Global: { acquireCount: { r: 54 } }, Database: { acquireCount: { r: 27 } }, Collection: { acquireCount: { r: 27 } } } 574ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:07.171-0400 m31100| 2015-07-09T14:15:07.170-0400 I QUERY [conn183] query db54.coll54 query: { query: { x: 19.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:409 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:26 nreturned:21 reslen:713 locks:{ Global: { acquireCount: { r: 54 } }, Database: { acquireCount: { r: 27 } }, Collection: { acquireCount: { r: 27 } } } 570ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:07.208-0400 m31100| 2015-07-09T14:15:07.207-0400 I QUERY [conn73] query db54.coll54 query: { query: { x: 14.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:410 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:26 nreturned:20 reslen:680 locks:{ Global: { acquireCount: { r: 54 } }, Database: { acquireCount: { r: 27 } }, Collection: { acquireCount: { r: 27 } } } 571ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:07.254-0400 m31100| 2015-07-09T14:15:07.253-0400 I QUERY [conn176] query db54.coll54 query: { query: { x: 18.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:411 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:26 nreturned:18 reslen:614 locks:{ Global: { acquireCount: { r: 54 } }, Database: { acquireCount: { r: 27 } }, Collection: { acquireCount: { r: 27 } } } 576ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:07.281-0400 m31100| 2015-07-09T14:15:07.280-0400 I QUERY [conn47] query db54.coll54 query: { query: { x: 17.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:412 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:27 nreturned:21 reslen:713 locks:{ Global: { acquireCount: { r: 56 } }, Database: { acquireCount: { r: 28 } }, Collection: { acquireCount: { r: 28 } } } 583ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:07.310-0400 m31200| 2015-07-09T14:15:07.310-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:15:07.307-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31200:1436464537:809424560', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:07.338-0400 m31100| 2015-07-09T14:15:07.338-0400 I QUERY [conn182] query db54.coll54 query: { query: { x: 13.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:413 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:26 nreturned:17 reslen:581 locks:{ Global: { acquireCount: { r: 54 } }, Database: { acquireCount: { r: 27 } }, Collection: { acquireCount: { r: 27 } } } 579ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:07.425-0400 m31100| 2015-07-09T14:15:07.424-0400 I QUERY [conn186] query db54.coll54 query: { query: { x: 0.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:414 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:27 nreturned:24 reslen:812 locks:{ Global: { acquireCount: { r: 56 } }, Database: { acquireCount: { r: 28 } }, Collection: { acquireCount: { r: 28 } } } 597ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:07.426-0400 m31100| 2015-07-09T14:15:07.426-0400 I QUERY [conn178] query db54.coll54 query: { query: { x: 6.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:414 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:27 nreturned:19 reslen:647 locks:{ Global: { acquireCount: { r: 56 } }, Database: { acquireCount: { r: 28 } }, Collection: { acquireCount: { r: 28 } } } 596ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:07.427-0400 m31100| 2015-07-09T14:15:07.426-0400 I QUERY [conn184] query db54.coll54 query: { query: { x: 15.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:414 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:27 nreturned:21 reslen:713 locks:{ Global: { acquireCount: { r: 56 } }, Database: { acquireCount: { r: 28 } }, Collection: { acquireCount: { r: 28 } } } 597ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:07.428-0400 m31100| 2015-07-09T14:15:07.426-0400 I QUERY [conn185] query db54.coll54 query: { query: { x: 4.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:414 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:27 nreturned:19 reslen:647 locks:{ Global: { acquireCount: { r: 56 } }, Database: { acquireCount: { r: 28 } }, Collection: { acquireCount: { r: 28 } } } 596ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:07.428-0400 m31100| 2015-07-09T14:15:07.426-0400 I QUERY [conn181] query db54.coll54 query: { query: { x: 8.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:414 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:27 nreturned:20 reslen:680 locks:{ Global: { acquireCount: { r: 56 } }, Database: { acquireCount: { r: 28 } }, Collection: { acquireCount: { r: 28 } } } 595ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:07.429-0400 m31100| 2015-07-09T14:15:07.427-0400 I QUERY [conn179] query db54.coll54 query: { query: { x: 1.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:414 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:27 nreturned:19 reslen:647 locks:{ Global: { acquireCount: { r: 56 } }, Database: { acquireCount: { r: 28 } }, Collection: { acquireCount: { r: 28 } } } 596ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:07.434-0400 m30998| 2015-07-09T14:15:07.434-0400 I NETWORK [conn346] end connection 127.0.0.1:63788 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:07.592-0400 m31100| 2015-07-09T14:15:07.592-0400 I QUERY [conn58] query db54.coll54 query: { query: { x: 10.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:420 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:27 nreturned:21 reslen:713 locks:{ Global: { acquireCount: { r: 56 } }, Database: { acquireCount: { r: 28 } }, Collection: { acquireCount: { r: 28 } } } 586ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:07.600-0400 m31100| 2015-07-09T14:15:07.599-0400 I QUERY [conn177] query db54.coll54 query: { query: { x: 12.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:421 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:27 nreturned:23 reslen:779 locks:{ Global: { acquireCount: { r: 56 } }, Database: { acquireCount: { r: 28 } }, Collection: { acquireCount: { r: 28 } } } 589ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:07.606-0400 m31100| 2015-07-09T14:15:07.605-0400 I QUERY [conn45] query db54.coll54 query: { query: { x: 11.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:421 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:27 nreturned:16 reslen:548 locks:{ Global: { acquireCount: { r: 56 } }, Database: { acquireCount: { r: 28 } }, Collection: { acquireCount: { r: 28 } } } 591ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:07.616-0400 m31100| 2015-07-09T14:15:07.615-0400 I QUERY [conn50] query db54.coll54 query: { query: { x: 16.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:423 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:27 nreturned:19 reslen:647 locks:{ Global: { acquireCount: { r: 56 } }, Database: { acquireCount: { r: 28 } }, Collection: { acquireCount: { r: 28 } } } 594ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:07.631-0400 m31100| 2015-07-09T14:15:07.630-0400 I QUERY [conn180] query db54.coll54 query: { query: { x: 3.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:424 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:27 nreturned:22 reslen:746 locks:{ Global: { acquireCount: { r: 56 } }, Database: { acquireCount: { r: 28 } }, Collection: { acquireCount: { r: 28 } } } 593ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:07.686-0400 m31100| 2015-07-09T14:15:07.686-0400 I QUERY [conn175] query db54.coll54 query: { query: { x: 9.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:425 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:27 nreturned:18 reslen:614 locks:{ Global: { acquireCount: { r: 56 } }, Database: { acquireCount: { r: 28 } }, Collection: { acquireCount: { r: 28 } } } 591ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:07.701-0400 m31100| 2015-07-09T14:15:07.700-0400 I QUERY [conn49] query db54.coll54 query: { query: { x: 2.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:426 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:27 nreturned:22 reslen:746 locks:{ Global: { acquireCount: { r: 56 } }, Database: { acquireCount: { r: 28 } }, Collection: { acquireCount: { r: 28 } } } 584ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:07.722-0400 m31100| 2015-07-09T14:15:07.721-0400 I QUERY [conn57] query db54.coll54 query: { query: { x: 7.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:427 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:27 nreturned:24 reslen:812 locks:{ Global: { acquireCount: { r: 56 } }, Database: { acquireCount: { r: 28 } }, Collection: { acquireCount: { r: 28 } } } 583ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:07.730-0400 m30999| 2015-07-09T14:15:07.730-0400 I NETWORK [conn346] end connection 127.0.0.1:63784 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:07.757-0400 m31100| 2015-07-09T14:15:07.756-0400 I QUERY [conn183] query db54.coll54 query: { query: { x: 19.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:428 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:26 nreturned:22 reslen:746 locks:{ Global: { acquireCount: { r: 54 } }, Database: { acquireCount: { r: 27 } }, Collection: { acquireCount: { r: 27 } } } 578ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:07.787-0400 m31100| 2015-07-09T14:15:07.786-0400 I QUERY [conn73] query db54.coll54 query: { query: { x: 14.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:429 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:26 nreturned:21 reslen:713 locks:{ Global: { acquireCount: { r: 54 } }, Database: { acquireCount: { r: 27 } }, Collection: { acquireCount: { r: 27 } } } 571ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:07.822-0400 m31100| 2015-07-09T14:15:07.821-0400 I QUERY [conn176] query db54.coll54 query: { query: { x: 18.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:430 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:25 nreturned:19 reslen:647 locks:{ Global: { acquireCount: { r: 52 } }, Database: { acquireCount: { r: 26 } }, Collection: { acquireCount: { r: 26 } } } 560ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:07.847-0400 m31100| 2015-07-09T14:15:07.846-0400 I QUERY [conn47] query db54.coll54 query: { query: { x: 17.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:431 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:25 nreturned:22 reslen:746 locks:{ Global: { acquireCount: { r: 52 } }, Database: { acquireCount: { r: 26 } }, Collection: { acquireCount: { r: 26 } } } 558ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:07.906-0400 m31100| 2015-07-09T14:15:07.905-0400 I QUERY [conn182] query db54.coll54 query: { query: { x: 13.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:432 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:25 nreturned:18 reslen:614 locks:{ Global: { acquireCount: { r: 52 } }, Database: { acquireCount: { r: 26 } }, Collection: { acquireCount: { r: 26 } } } 560ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:08.000-0400 m31100| 2015-07-09T14:15:07.999-0400 I QUERY [conn184] query db54.coll54 query: { query: { x: 15.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:433 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:26 nreturned:22 reslen:746 locks:{ Global: { acquireCount: { r: 54 } }, Database: { acquireCount: { r: 27 } }, Collection: { acquireCount: { r: 27 } } } 567ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:08.000-0400 m31100| 2015-07-09T14:15:07.999-0400 I QUERY [conn181] query db54.coll54 query: { query: { x: 6.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:433 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:26 nreturned:20 reslen:680 locks:{ Global: { acquireCount: { r: 54 } }, Database: { acquireCount: { r: 27 } }, Collection: { acquireCount: { r: 27 } } } 567ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:08.000-0400 m31100| 2015-07-09T14:15:07.999-0400 I QUERY [conn185] query db54.coll54 query: { query: { x: 4.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:433 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:26 nreturned:20 reslen:680 locks:{ Global: { acquireCount: { r: 54 } }, Database: { acquireCount: { r: 27 } }, Collection: { acquireCount: { r: 27 } } } 566ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:08.001-0400 m31100| 2015-07-09T14:15:08.000-0400 I QUERY [conn178] query db54.coll54 query: { query: { x: 8.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:433 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:26 nreturned:21 reslen:713 locks:{ Global: { acquireCount: { r: 54 } }, Database: { acquireCount: { r: 27 } }, Collection: { acquireCount: { r: 27 } } } 566ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:08.001-0400 m31100| 2015-07-09T14:15:08.001-0400 I QUERY [conn179] query db54.coll54 query: { query: { x: 1.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:433 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:25 nreturned:20 reslen:680 locks:{ Global: { acquireCount: { r: 52 } }, Database: { acquireCount: { r: 26 } }, Collection: { acquireCount: { r: 26 } } } 564ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:08.155-0400 m31100| 2015-07-09T14:15:08.154-0400 I QUERY [conn58] query db54.coll54 query: { query: { x: 10.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:438 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:25 nreturned:22 reslen:746 locks:{ Global: { acquireCount: { r: 52 } }, Database: { acquireCount: { r: 26 } }, Collection: { acquireCount: { r: 26 } } } 555ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:08.162-0400 m31100| 2015-07-09T14:15:08.161-0400 I QUERY [conn177] query db54.coll54 query: { query: { x: 12.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:438 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:25 nreturned:24 reslen:812 locks:{ Global: { acquireCount: { r: 52 } }, Database: { acquireCount: { r: 26 } }, Collection: { acquireCount: { r: 26 } } } 555ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:08.166-0400 m31100| 2015-07-09T14:15:08.165-0400 I QUERY [conn45] query db54.coll54 query: { query: { x: 11.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:438 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:25 nreturned:17 reslen:581 locks:{ Global: { acquireCount: { r: 52 } }, Database: { acquireCount: { r: 26 } }, Collection: { acquireCount: { r: 26 } } } 553ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:08.172-0400 m30998| 2015-07-09T14:15:08.172-0400 I NETWORK [conn347] end connection 127.0.0.1:63791 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:08.174-0400 m31100| 2015-07-09T14:15:08.173-0400 I QUERY [conn50] query db54.coll54 query: { query: { x: 16.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:440 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:25 nreturned:20 reslen:680 locks:{ Global: { acquireCount: { r: 52 } }, Database: { acquireCount: { r: 26 } }, Collection: { acquireCount: { r: 26 } } } 549ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:08.192-0400 m31100| 2015-07-09T14:15:08.191-0400 I QUERY [conn180] query db54.coll54 query: { query: { x: 3.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:442 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:25 nreturned:23 reslen:779 locks:{ Global: { acquireCount: { r: 52 } }, Database: { acquireCount: { r: 26 } }, Collection: { acquireCount: { r: 26 } } } 553ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:08.243-0400 m31100| 2015-07-09T14:15:08.243-0400 I QUERY [conn175] query db54.coll54 query: { query: { x: 9.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:443 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:25 nreturned:19 reslen:647 locks:{ Global: { acquireCount: { r: 52 } }, Database: { acquireCount: { r: 26 } }, Collection: { acquireCount: { r: 26 } } } 550ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:08.255-0400 m31100| 2015-07-09T14:15:08.254-0400 I QUERY [conn49] query db54.coll54 query: { query: { x: 2.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:444 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:25 nreturned:23 reslen:779 locks:{ Global: { acquireCount: { r: 52 } }, Database: { acquireCount: { r: 26 } }, Collection: { acquireCount: { r: 26 } } } 546ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:08.315-0400 m31100| 2015-07-09T14:15:08.314-0400 I QUERY [conn183] query db54.coll54 query: { query: { x: 19.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:445 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:25 nreturned:23 reslen:779 locks:{ Global: { acquireCount: { r: 52 } }, Database: { acquireCount: { r: 26 } }, Collection: { acquireCount: { r: 26 } } } 549ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:08.346-0400 m31100| 2015-07-09T14:15:08.345-0400 I QUERY [conn73] query db54.coll54 query: { query: { x: 14.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:446 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:25 nreturned:22 reslen:746 locks:{ Global: { acquireCount: { r: 52 } }, Database: { acquireCount: { r: 26 } }, Collection: { acquireCount: { r: 26 } } } 552ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:08.381-0400 m31100| 2015-07-09T14:15:08.380-0400 I QUERY [conn176] query db54.coll54 query: { query: { x: 18.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:447 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:25 nreturned:20 reslen:680 locks:{ Global: { acquireCount: { r: 52 } }, Database: { acquireCount: { r: 26 } }, Collection: { acquireCount: { r: 26 } } } 551ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:08.399-0400 m31100| 2015-07-09T14:15:08.399-0400 I QUERY [conn47] query db54.coll54 query: { query: { x: 17.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:448 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:25 nreturned:23 reslen:779 locks:{ Global: { acquireCount: { r: 52 } }, Database: { acquireCount: { r: 26 } }, Collection: { acquireCount: { r: 26 } } } 544ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:08.454-0400 m31100| 2015-07-09T14:15:08.453-0400 I QUERY [conn182] query db54.coll54 query: { query: { x: 13.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:449 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:25 nreturned:19 reslen:647 locks:{ Global: { acquireCount: { r: 52 } }, Database: { acquireCount: { r: 26 } }, Collection: { acquireCount: { r: 26 } } } 540ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:08.550-0400 m31100| 2015-07-09T14:15:08.550-0400 I QUERY [conn181] query db54.coll54 query: { query: { x: 4.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:450 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:25 nreturned:21 reslen:713 locks:{ Global: { acquireCount: { r: 52 } }, Database: { acquireCount: { r: 26 } }, Collection: { acquireCount: { r: 26 } } } 545ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:08.553-0400 m31100| 2015-07-09T14:15:08.552-0400 I QUERY [conn185] query db54.coll54 query: { query: { x: 8.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:450 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:25 nreturned:22 reslen:746 locks:{ Global: { acquireCount: { r: 52 } }, Database: { acquireCount: { r: 26 } }, Collection: { acquireCount: { r: 26 } } } 547ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:08.553-0400 m31100| 2015-07-09T14:15:08.552-0400 I QUERY [conn184] query db54.coll54 query: { query: { x: 1.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:450 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:25 nreturned:21 reslen:713 locks:{ Global: { acquireCount: { r: 52 } }, Database: { acquireCount: { r: 26 } }, Collection: { acquireCount: { r: 26 } } } 547ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:08.554-0400 m31100| 2015-07-09T14:15:08.553-0400 I QUERY [conn179] query db54.coll54 query: { query: { x: 15.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:450 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:25 nreturned:23 reslen:779 locks:{ Global: { acquireCount: { r: 52 } }, Database: { acquireCount: { r: 26 } }, Collection: { acquireCount: { r: 26 } } } 547ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:08.554-0400 m31100| 2015-07-09T14:15:08.553-0400 I QUERY [conn178] query db54.coll54 query: { query: { x: 6.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:450 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:25 nreturned:21 reslen:713 locks:{ Global: { acquireCount: { r: 52 } }, Database: { acquireCount: { r: 26 } }, Collection: { acquireCount: { r: 26 } } } 548ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:08.707-0400 m31100| 2015-07-09T14:15:08.706-0400 I QUERY [conn58] query db54.coll54 query: { query: { x: 10.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:455 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:25 nreturned:23 reslen:779 locks:{ Global: { acquireCount: { r: 52 } }, Database: { acquireCount: { r: 26 } }, Collection: { acquireCount: { r: 26 } } } 544ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:08.723-0400 m31100| 2015-07-09T14:15:08.722-0400 I QUERY [conn45] query db54.coll54 query: { query: { x: 11.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:456 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:25 nreturned:18 reslen:614 locks:{ Global: { acquireCount: { r: 52 } }, Database: { acquireCount: { r: 26 } }, Collection: { acquireCount: { r: 26 } } } 549ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:08.734-0400 m31100| 2015-07-09T14:15:08.733-0400 I QUERY [conn50] query db54.coll54 query: { query: { x: 16.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:456 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:25 nreturned:21 reslen:713 locks:{ Global: { acquireCount: { r: 52 } }, Database: { acquireCount: { r: 26 } }, Collection: { acquireCount: { r: 26 } } } 555ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:08.762-0400 m31100| 2015-07-09T14:15:08.762-0400 I QUERY [conn180] query db54.coll54 query: { query: { x: 3.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:458 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:26 nreturned:24 reslen:812 locks:{ Global: { acquireCount: { r: 54 } }, Database: { acquireCount: { r: 27 } }, Collection: { acquireCount: { r: 27 } } } 563ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:08.771-0400 m30999| 2015-07-09T14:15:08.771-0400 I NETWORK [conn350] end connection 127.0.0.1:63792 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:08.807-0400 m31100| 2015-07-09T14:15:08.806-0400 I QUERY [conn175] query db54.coll54 query: { query: { x: 9.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:459 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:25 nreturned:20 reslen:680 locks:{ Global: { acquireCount: { r: 52 } }, Database: { acquireCount: { r: 26 } }, Collection: { acquireCount: { r: 26 } } } 556ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:08.818-0400 m31100| 2015-07-09T14:15:08.817-0400 I QUERY [conn49] query db54.coll54 query: { query: { x: 2.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:459 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:25 nreturned:24 reslen:812 locks:{ Global: { acquireCount: { r: 52 } }, Database: { acquireCount: { r: 26 } }, Collection: { acquireCount: { r: 26 } } } 556ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:08.825-0400 m30998| 2015-07-09T14:15:08.825-0400 I NETWORK [conn344] end connection 127.0.0.1:63785 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:08.862-0400 m31100| 2015-07-09T14:15:08.862-0400 I QUERY [conn183] query db54.coll54 query: { query: { x: 19.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:461 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:24 nreturned:24 reslen:812 locks:{ Global: { acquireCount: { r: 50 } }, Database: { acquireCount: { r: 25 } }, Collection: { acquireCount: { r: 25 } } } 541ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:08.871-0400 m30999| 2015-07-09T14:15:08.871-0400 I NETWORK [conn349] end connection 127.0.0.1:63790 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:08.884-0400 m31100| 2015-07-09T14:15:08.884-0400 I QUERY [conn73] query db54.coll54 query: { query: { x: 14.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:461 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:24 nreturned:23 reslen:779 locks:{ Global: { acquireCount: { r: 50 } }, Database: { acquireCount: { r: 25 } }, Collection: { acquireCount: { r: 25 } } } 531ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:08.909-0400 m31100| 2015-07-09T14:15:08.908-0400 I QUERY [conn176] query db54.coll54 query: { query: { x: 18.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:463 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:23 nreturned:21 reslen:713 locks:{ Global: { acquireCount: { r: 48 } }, Database: { acquireCount: { r: 24 } }, Collection: { acquireCount: { r: 24 } } } 521ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:08.926-0400 m31100| 2015-07-09T14:15:08.925-0400 I QUERY [conn47] query db54.coll54 query: { query: { x: 17.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:464 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:24 nreturned:24 reslen:812 locks:{ Global: { acquireCount: { r: 50 } }, Database: { acquireCount: { r: 25 } }, Collection: { acquireCount: { r: 25 } } } 519ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:08.935-0400 m30999| 2015-07-09T14:15:08.935-0400 I NETWORK [conn348] end connection 127.0.0.1:63789 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:08.973-0400 m31100| 2015-07-09T14:15:08.973-0400 I QUERY [conn182] query db54.coll54 query: { query: { x: 13.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:465 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:23 nreturned:20 reslen:680 locks:{ Global: { acquireCount: { r: 48 } }, Database: { acquireCount: { r: 24 } }, Collection: { acquireCount: { r: 24 } } } 512ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.041-0400 m31100| 2015-07-09T14:15:09.040-0400 I QUERY [conn185] query db54.coll54 query: { query: { x: 4.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:466 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:22 nreturned:22 reslen:746 locks:{ Global: { acquireCount: { r: 46 } }, Database: { acquireCount: { r: 23 } }, Collection: { acquireCount: { r: 23 } } } 483ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.042-0400 m31100| 2015-07-09T14:15:09.040-0400 I QUERY [conn179] query db54.coll54 query: { query: { x: 15.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:466 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:22 nreturned:24 reslen:812 locks:{ Global: { acquireCount: { r: 46 } }, Database: { acquireCount: { r: 23 } }, Collection: { acquireCount: { r: 23 } } } 481ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.042-0400 m31100| 2015-07-09T14:15:09.040-0400 I QUERY [conn184] query db54.coll54 query: { query: { x: 1.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:466 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:22 nreturned:22 reslen:746 locks:{ Global: { acquireCount: { r: 46 } }, Database: { acquireCount: { r: 23 } }, Collection: { acquireCount: { r: 23 } } } 482ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.043-0400 m31100| 2015-07-09T14:15:09.041-0400 I QUERY [conn178] query db54.coll54 query: { query: { x: 6.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:466 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:22 nreturned:22 reslen:746 locks:{ Global: { acquireCount: { r: 46 } }, Database: { acquireCount: { r: 23 } }, Collection: { acquireCount: { r: 23 } } } 481ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.043-0400 m31100| 2015-07-09T14:15:09.042-0400 I QUERY [conn181] query db54.coll54 query: { query: { x: 8.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:466 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:22 nreturned:23 reslen:779 locks:{ Global: { acquireCount: { r: 46 } }, Database: { acquireCount: { r: 23 } }, Collection: { acquireCount: { r: 23 } } } 481ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.052-0400 m30999| 2015-07-09T14:15:09.052-0400 I NETWORK [conn352] end connection 127.0.0.1:63797 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.139-0400 m31100| 2015-07-09T14:15:09.139-0400 I QUERY [conn58] query db54.coll54 query: { query: { x: 10.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:471 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:19 nreturned:24 reslen:812 locks:{ Global: { acquireCount: { r: 40 } }, Database: { acquireCount: { r: 20 } }, Collection: { acquireCount: { r: 20 } } } 425ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.145-0400 m31100| 2015-07-09T14:15:09.145-0400 I QUERY [conn45] query db54.coll54 query: { query: { x: 11.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:471 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:19 nreturned:19 reslen:647 locks:{ Global: { acquireCount: { r: 40 } }, Database: { acquireCount: { r: 20 } }, Collection: { acquireCount: { r: 20 } } } 415ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.147-0400 m30998| 2015-07-09T14:15:09.147-0400 I NETWORK [conn349] end connection 127.0.0.1:63795 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.150-0400 m31100| 2015-07-09T14:15:09.149-0400 I QUERY [conn50] query db54.coll54 query: { query: { x: 16.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:471 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:18 nreturned:22 reslen:746 locks:{ Global: { acquireCount: { r: 38 } }, Database: { acquireCount: { r: 19 } }, Collection: { acquireCount: { r: 19 } } } 409ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.180-0400 m31100| 2015-07-09T14:15:09.179-0400 I QUERY [conn175] query db54.coll54 query: { query: { x: 9.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:474 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:16 nreturned:21 reslen:713 locks:{ Global: { acquireCount: { r: 34 } }, Database: { acquireCount: { r: 17 } }, Collection: { acquireCount: { r: 17 } } } 365ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.259-0400 m31100| 2015-07-09T14:15:09.258-0400 I QUERY [conn73] query db54.coll54 query: { query: { x: 14.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:475 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:16 nreturned:24 reslen:812 locks:{ Global: { acquireCount: { r: 34 } }, Database: { acquireCount: { r: 17 } }, Collection: { acquireCount: { r: 17 } } } 369ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.267-0400 m30998| 2015-07-09T14:15:09.266-0400 I NETWORK [conn345] end connection 127.0.0.1:63787 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.268-0400 m31100| 2015-07-09T14:15:09.266-0400 I QUERY [conn176] query db54.coll54 query: { query: { x: 18.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:475 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:16 nreturned:22 reslen:746 locks:{ Global: { acquireCount: { r: 34 } }, Database: { acquireCount: { r: 17 } }, Collection: { acquireCount: { r: 17 } } } 351ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.288-0400 m31100| 2015-07-09T14:15:09.288-0400 I QUERY [conn182] query db54.coll54 query: { query: { x: 13.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:477 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:14 nreturned:21 reslen:713 locks:{ Global: { acquireCount: { r: 30 } }, Database: { acquireCount: { r: 15 } }, Collection: { acquireCount: { r: 15 } } } 308ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.339-0400 m31100| 2015-07-09T14:15:09.339-0400 I QUERY [conn178] query db54.coll54 query: { query: { x: 4.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:478 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:13 nreturned:23 reslen:779 locks:{ Global: { acquireCount: { r: 28 } }, Database: { acquireCount: { r: 14 } }, Collection: { acquireCount: { r: 14 } } } 291ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.340-0400 m31100| 2015-07-09T14:15:09.339-0400 I QUERY [conn179] query db54.coll54 query: { query: { x: 1.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:478 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:13 nreturned:23 reslen:779 locks:{ Global: { acquireCount: { r: 28 } }, Database: { acquireCount: { r: 14 } }, Collection: { acquireCount: { r: 14 } } } 291ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.341-0400 m31100| 2015-07-09T14:15:09.339-0400 I QUERY [conn185] query db54.coll54 query: { query: { x: 8.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:478 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:13 nreturned:24 reslen:812 locks:{ Global: { acquireCount: { r: 28 } }, Database: { acquireCount: { r: 14 } }, Collection: { acquireCount: { r: 14 } } } 290ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.341-0400 m31100| 2015-07-09T14:15:09.339-0400 I QUERY [conn181] query db54.coll54 query: { query: { x: 6.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:478 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:13 nreturned:23 reslen:779 locks:{ Global: { acquireCount: { r: 28 } }, Database: { acquireCount: { r: 14 } }, Collection: { acquireCount: { r: 14 } } } 291ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.346-0400 m30998| 2015-07-09T14:15:09.346-0400 I NETWORK [conn348] end connection 127.0.0.1:63793 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.382-0400 m31100| 2015-07-09T14:15:09.381-0400 I QUERY [conn45] query db54.coll54 query: { query: { x: 11.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:482 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:10 nreturned:20 reslen:680 locks:{ Global: { acquireCount: { r: 22 } }, Database: { acquireCount: { r: 11 } }, Collection: { acquireCount: { r: 11 } } } 232ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.399-0400 m31100| 2015-07-09T14:15:09.398-0400 I QUERY [conn50] query db54.coll54 query: { query: { x: 16.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:483 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:11 nreturned:23 reslen:779 locks:{ Global: { acquireCount: { r: 24 } }, Database: { acquireCount: { r: 12 } }, Collection: { acquireCount: { r: 12 } } } 242ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.425-0400 m31100| 2015-07-09T14:15:09.425-0400 I QUERY [conn175] query db54.coll54 query: { query: { x: 9.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:484 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:10 nreturned:22 reslen:746 locks:{ Global: { acquireCount: { r: 22 } }, Database: { acquireCount: { r: 11 } }, Collection: { acquireCount: { r: 11 } } } 239ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.524-0400 m31100| 2015-07-09T14:15:09.523-0400 I QUERY [conn176] query db54.coll54 query: { query: { x: 18.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:485 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:11 nreturned:23 reslen:779 locks:{ Global: { acquireCount: { r: 24 } }, Database: { acquireCount: { r: 12 } }, Collection: { acquireCount: { r: 12 } } } 252ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.556-0400 m31100| 2015-07-09T14:15:09.555-0400 I QUERY [conn182] query db54.coll54 query: { query: { x: 13.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:486 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:12 nreturned:22 reslen:746 locks:{ Global: { acquireCount: { r: 26 } }, Database: { acquireCount: { r: 13 } }, Collection: { acquireCount: { r: 13 } } } 262ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.634-0400 m31100| 2015-07-09T14:15:09.633-0400 I QUERY [conn185] query db54.coll54 query: { query: { x: 6.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:487 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:13 nreturned:24 reslen:812 locks:{ Global: { acquireCount: { r: 28 } }, Database: { acquireCount: { r: 14 } }, Collection: { acquireCount: { r: 14 } } } 290ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.635-0400 m31100| 2015-07-09T14:15:09.634-0400 I QUERY [conn179] query db54.coll54 query: { query: { x: 1.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:487 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:13 nreturned:24 reslen:812 locks:{ Global: { acquireCount: { r: 28 } }, Database: { acquireCount: { r: 14 } }, Collection: { acquireCount: { r: 14 } } } 290ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.635-0400 m31100| 2015-07-09T14:15:09.634-0400 I QUERY [conn178] query db54.coll54 query: { query: { x: 4.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:487 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:13 nreturned:24 reslen:812 locks:{ Global: { acquireCount: { r: 28 } }, Database: { acquireCount: { r: 14 } }, Collection: { acquireCount: { r: 14 } } } 290ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.640-0400 m31100| 2015-07-09T14:15:09.639-0400 I QUERY [conn45] query db54.coll54 query: { query: { x: 11.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:487 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:11 nreturned:21 reslen:713 locks:{ Global: { acquireCount: { r: 24 } }, Database: { acquireCount: { r: 12 } }, Collection: { acquireCount: { r: 12 } } } 251ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.645-0400 m31100| 2015-07-09T14:15:09.644-0400 I QUERY [conn50] query db54.coll54 query: { query: { x: 16.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:487 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:11 nreturned:24 reslen:812 locks:{ Global: { acquireCount: { r: 24 } }, Database: { acquireCount: { r: 12 } }, Collection: { acquireCount: { r: 12 } } } 240ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.647-0400 m30998| 2015-07-09T14:15:09.646-0400 I NETWORK [conn350] end connection 127.0.0.1:63796 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.647-0400 m30998| 2015-07-09T14:15:09.646-0400 I NETWORK [conn351] end connection 127.0.0.1:63799 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.649-0400 m30999| 2015-07-09T14:15:09.646-0400 I NETWORK [conn347] end connection 127.0.0.1:63786 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.651-0400 m31100| 2015-07-09T14:15:09.649-0400 I QUERY [conn175] query db54.coll54 query: { query: { x: 9.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:490 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:10 nreturned:23 reslen:779 locks:{ Global: { acquireCount: { r: 22 } }, Database: { acquireCount: { r: 11 } }, Collection: { acquireCount: { r: 11 } } } 218ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.654-0400 m31100| 2015-07-09T14:15:09.653-0400 I QUERY [conn176] query db54.coll54 query: { query: { x: 18.0 }, orderby: { $natural: 1.0 } } planSummary: COLLSCAN ntoreturn:0 ntoskip:0 nscanned:0 nscannedObjects:490 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:7 nreturned:24 reslen:812 locks:{ Global: { acquireCount: { r: 16 } }, Database: { acquireCount: { r: 8 } }, Collection: { acquireCount: { r: 8 } } } 125ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.680-0400 m30998| 2015-07-09T14:15:09.679-0400 I NETWORK [conn353] end connection 127.0.0.1:63802 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.683-0400 m30998| 2015-07-09T14:15:09.683-0400 I NETWORK [conn352] end connection 127.0.0.1:63801 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.700-0400 m30999| 2015-07-09T14:15:09.700-0400 I NETWORK [conn351] end connection 127.0.0.1:63794 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.709-0400 m30999| 2015-07-09T14:15:09.709-0400 I NETWORK [conn354] end connection 127.0.0.1:63803 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.715-0400 m30999| 2015-07-09T14:15:09.715-0400 I NETWORK [conn353] end connection 127.0.0.1:63800 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.738-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.739-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.739-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.739-0400 jstests/concurrency/fsm_workloads/indexed_insert_base_capped.js: Workload completed in 8792 ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.739-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.739-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.739-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.740-0400 m30999| 2015-07-09T14:15:09.739-0400 I COMMAND [conn1] DROP: db54.coll54 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.740-0400 m30999| 2015-07-09T14:15:09.739-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.740-0400 m31100| 2015-07-09T14:15:09.739-0400 I COMMAND [conn182] CMD: drop db54.coll54 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.745-0400 m31101| 2015-07-09T14:15:09.745-0400 I COMMAND [repl writer worker 4] CMD: drop db54.coll54 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.745-0400 m31102| 2015-07-09T14:15:09.744-0400 I COMMAND [repl writer worker 10] CMD: drop db54.coll54 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.745-0400 m30999| 2015-07-09T14:15:09.745-0400 I COMMAND [conn1] DROP DATABASE: db54 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.746-0400 m30999| 2015-07-09T14:15:09.745-0400 I SHARDING [conn1] DBConfig::dropDatabase: db54 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.746-0400 m30999| 2015-07-09T14:15:09.745-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:09.745-0400-559eba2dca4787b9985d1e37", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465709745), what: "dropDatabase.start", ns: "db54", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.853-0400 m30999| 2015-07-09T14:15:09.852-0400 I SHARDING [conn1] DBConfig::dropDatabase: db54 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.853-0400 m31100| 2015-07-09T14:15:09.853-0400 I COMMAND [conn160] dropDatabase db54 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.853-0400 m31100| 2015-07-09T14:15:09.853-0400 I COMMAND [conn160] dropDatabase db54 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.854-0400 m30999| 2015-07-09T14:15:09.853-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:09.853-0400-559eba2dca4787b9985d1e38", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465709853), what: "dropDatabase", ns: "db54", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.854-0400 m31102| 2015-07-09T14:15:09.854-0400 I COMMAND [repl writer worker 12] dropDatabase db54 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.855-0400 m31102| 2015-07-09T14:15:09.854-0400 I COMMAND [repl writer worker 12] dropDatabase db54 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.855-0400 m31101| 2015-07-09T14:15:09.854-0400 I COMMAND [repl writer worker 12] dropDatabase db54 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.855-0400 m31101| 2015-07-09T14:15:09.854-0400 I COMMAND [repl writer worker 12] dropDatabase db54 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.945-0400 m31100| 2015-07-09T14:15:09.944-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.949-0400 m31101| 2015-07-09T14:15:09.949-0400 I COMMAND [repl writer worker 1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.950-0400 m31102| 2015-07-09T14:15:09.949-0400 I COMMAND [repl writer worker 4] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.983-0400 m31200| 2015-07-09T14:15:09.983-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.986-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.986-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.986-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.986-0400 jstests/concurrency/fsm_workloads/map_reduce_reduce.js [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.987-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.987-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.987-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.987-0400 m31202| 2015-07-09T14:15:09.986-0400 I COMMAND [repl writer worker 9] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.987-0400 m31201| 2015-07-09T14:15:09.987-0400 I COMMAND [repl writer worker 6] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.994-0400 m30999| 2015-07-09T14:15:09.993-0400 I SHARDING [conn1] distributed lock 'db55/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba2dca4787b9985d1e39 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.998-0400 m30999| 2015-07-09T14:15:09.998-0400 I SHARDING [conn1] Placing [db55] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:09.998-0400 m30999| 2015-07-09T14:15:09.998-0400 I SHARDING [conn1] Enabling sharding for database [db55] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.053-0400 m30999| 2015-07-09T14:15:10.053-0400 I SHARDING [conn1] distributed lock 'db55/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.075-0400 m31100| 2015-07-09T14:15:10.075-0400 I INDEX [conn69] build index on: db55.coll55 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db55.coll55" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.075-0400 m31100| 2015-07-09T14:15:10.075-0400 I INDEX [conn69] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.085-0400 m31100| 2015-07-09T14:15:10.084-0400 I INDEX [conn69] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.087-0400 m30999| 2015-07-09T14:15:10.086-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db55.coll55", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.090-0400 m30999| 2015-07-09T14:15:10.090-0400 I SHARDING [conn1] distributed lock 'db55.coll55/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba2eca4787b9985d1e3a [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.091-0400 m30999| 2015-07-09T14:15:10.090-0400 I SHARDING [conn1] enable sharding on: db55.coll55 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.091-0400 m30999| 2015-07-09T14:15:10.090-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:10.090-0400-559eba2eca4787b9985d1e3b", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465710090), what: "shardCollection.start", ns: "db55.coll55", details: { shardKey: { _id: "hashed" }, collection: "db55.coll55", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.093-0400 m31102| 2015-07-09T14:15:10.093-0400 I INDEX [repl writer worker 14] build index on: db55.coll55 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db55.coll55" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.094-0400 m31102| 2015-07-09T14:15:10.093-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.097-0400 m31101| 2015-07-09T14:15:10.097-0400 I INDEX [repl writer worker 14] build index on: db55.coll55 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db55.coll55" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.097-0400 m31101| 2015-07-09T14:15:10.097-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.102-0400 m31102| 2015-07-09T14:15:10.102-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.103-0400 m31101| 2015-07-09T14:15:10.102-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.143-0400 m30999| 2015-07-09T14:15:10.143-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db55.coll55 using new epoch 559eba2eca4787b9985d1e3c [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.251-0400 m30999| 2015-07-09T14:15:10.250-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db55.coll55: 0ms sequenceNumber: 245 version: 1|1||559eba2eca4787b9985d1e3c based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.307-0400 m30999| 2015-07-09T14:15:10.306-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db55.coll55: 0ms sequenceNumber: 246 version: 1|1||559eba2eca4787b9985d1e3c based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.308-0400 m31100| 2015-07-09T14:15:10.308-0400 I SHARDING [conn182] remotely refreshing metadata for db55.coll55 with requested shard version 1|1||559eba2eca4787b9985d1e3c, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.310-0400 m31100| 2015-07-09T14:15:10.309-0400 I SHARDING [conn182] collection db55.coll55 was previously unsharded, new metadata loaded with shard version 1|1||559eba2eca4787b9985d1e3c [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.310-0400 m31100| 2015-07-09T14:15:10.309-0400 I SHARDING [conn182] collection version was loaded at version 1|1||559eba2eca4787b9985d1e3c, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.310-0400 m30999| 2015-07-09T14:15:10.309-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:10.309-0400-559eba2eca4787b9985d1e3d", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465710309), what: "shardCollection", ns: "db55.coll55", details: { version: "1|1||559eba2eca4787b9985d1e3c" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.365-0400 m30999| 2015-07-09T14:15:10.364-0400 I SHARDING [conn1] distributed lock 'db55.coll55/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.366-0400 m30999| 2015-07-09T14:15:10.365-0400 I SHARDING [conn1] moving chunk ns: db55.coll55 moving ( ns: db55.coll55, shard: test-rs0, lastmod: 1|1||000000000000000000000000, min: { _id: 0 }, max: { _id: MaxKey }) test-rs0 -> test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.366-0400 m31100| 2015-07-09T14:15:10.366-0400 I SHARDING [conn15] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.367-0400 m31100| 2015-07-09T14:15:10.367-0400 I SHARDING [conn15] received moveChunk request: { moveChunk: "db55.coll55", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eba2eca4787b9985d1e3c') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.372-0400 m31100| 2015-07-09T14:15:10.371-0400 I SHARDING [conn15] distributed lock 'db55.coll55/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eba2e792e00bb67274a2e [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.372-0400 m31100| 2015-07-09T14:15:10.372-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:10.371-0400-559eba2e792e00bb67274a2f", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436465710372), what: "moveChunk.start", ns: "db55.coll55", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.425-0400 m31100| 2015-07-09T14:15:10.424-0400 I SHARDING [conn15] remotely refreshing metadata for db55.coll55 based on current shard version 1|1||559eba2eca4787b9985d1e3c, current metadata version is 1|1||559eba2eca4787b9985d1e3c [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.427-0400 m31100| 2015-07-09T14:15:10.426-0400 I SHARDING [conn15] metadata of collection db55.coll55 already up to date (shard version : 1|1||559eba2eca4787b9985d1e3c, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.427-0400 m31100| 2015-07-09T14:15:10.426-0400 I SHARDING [conn15] moveChunk request accepted at version 1|1||559eba2eca4787b9985d1e3c [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.427-0400 m31100| 2015-07-09T14:15:10.427-0400 I SHARDING [conn15] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.428-0400 m31200| 2015-07-09T14:15:10.428-0400 I SHARDING [conn16] remotely refreshing metadata for db55.coll55, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.429-0400 m31200| 2015-07-09T14:15:10.429-0400 I SHARDING [conn16] collection db55.coll55 was previously unsharded, new metadata loaded with shard version 0|0||559eba2eca4787b9985d1e3c [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.430-0400 m31200| 2015-07-09T14:15:10.429-0400 I SHARDING [conn16] collection version was loaded at version 1|1||559eba2eca4787b9985d1e3c, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.430-0400 m31200| 2015-07-09T14:15:10.429-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: 0 } -> { _id: MaxKey } for collection db55.coll55 from test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 at epoch 559eba2eca4787b9985d1e3c [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.432-0400 m31100| 2015-07-09T14:15:10.431-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db55.coll55", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.436-0400 m31100| 2015-07-09T14:15:10.435-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db55.coll55", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.441-0400 m31100| 2015-07-09T14:15:10.440-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db55.coll55", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.448-0400 m31200| 2015-07-09T14:15:10.447-0400 I INDEX [migrateThread] build index on: db55.coll55 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db55.coll55" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.448-0400 m31200| 2015-07-09T14:15:10.448-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.450-0400 m31100| 2015-07-09T14:15:10.449-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db55.coll55", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.459-0400 m31200| 2015-07-09T14:15:10.458-0400 I INDEX [migrateThread] build index on: db55.coll55 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db55.coll55" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.459-0400 m31200| 2015-07-09T14:15:10.459-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.468-0400 m31100| 2015-07-09T14:15:10.467-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db55.coll55", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.470-0400 m31200| 2015-07-09T14:15:10.470-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.471-0400 m31200| 2015-07-09T14:15:10.471-0400 I SHARDING [migrateThread] Deleter starting delete for: db55.coll55 from { _id: 0 } -> { _id: MaxKey }, with opId: 89926 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.471-0400 m31200| 2015-07-09T14:15:10.471-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db55.coll55 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.480-0400 m31201| 2015-07-09T14:15:10.480-0400 I INDEX [repl writer worker 8] build index on: db55.coll55 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db55.coll55" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.480-0400 m31202| 2015-07-09T14:15:10.480-0400 I INDEX [repl writer worker 2] build index on: db55.coll55 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db55.coll55" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.481-0400 m31201| 2015-07-09T14:15:10.480-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.481-0400 m31202| 2015-07-09T14:15:10.480-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.485-0400 m31202| 2015-07-09T14:15:10.485-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.486-0400 m31201| 2015-07-09T14:15:10.485-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.486-0400 m31200| 2015-07-09T14:15:10.486-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.486-0400 m31200| 2015-07-09T14:15:10.486-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db55.coll55' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.502-0400 m31100| 2015-07-09T14:15:10.501-0400 I SHARDING [conn15] moveChunk data transfer progress: { active: true, ns: "db55.coll55", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.502-0400 m31100| 2015-07-09T14:15:10.502-0400 I SHARDING [conn15] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.503-0400 m31100| 2015-07-09T14:15:10.502-0400 I SHARDING [conn15] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.503-0400 m31100| 2015-07-09T14:15:10.502-0400 I SHARDING [conn15] moveChunk setting version to: 2|0||559eba2eca4787b9985d1e3c [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.510-0400 m31200| 2015-07-09T14:15:10.509-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db55.coll55' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.510-0400 m31200| 2015-07-09T14:15:10.510-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:10.509-0400-559eba2ed5a107a5b9c0db5b", server: "bs-osx108-8", clientAddr: "", time: new Date(1436465710510), what: "moveChunk.to", ns: "db55.coll55", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 5: 41, step 2 of 5: 15, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 23, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.563-0400 m31100| 2015-07-09T14:15:10.563-0400 I SHARDING [conn15] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db55.coll55", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.564-0400 m31100| 2015-07-09T14:15:10.563-0400 I SHARDING [conn15] moveChunk updating self version to: 2|1||559eba2eca4787b9985d1e3c through { _id: MinKey } -> { _id: 0 } for collection 'db55.coll55' [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.565-0400 m31100| 2015-07-09T14:15:10.564-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:10.564-0400-559eba2e792e00bb67274a30", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436465710564), what: "moveChunk.commit", ns: "db55.coll55", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.618-0400 m31100| 2015-07-09T14:15:10.618-0400 I SHARDING [conn15] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.618-0400 m31100| 2015-07-09T14:15:10.618-0400 I SHARDING [conn15] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.619-0400 m31100| 2015-07-09T14:15:10.618-0400 I SHARDING [conn15] Deleter starting delete for: db55.coll55 from { _id: 0 } -> { _id: MaxKey }, with opId: 138821 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.619-0400 m31100| 2015-07-09T14:15:10.618-0400 I SHARDING [conn15] rangeDeleter deleted 0 documents for db55.coll55 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.619-0400 m31100| 2015-07-09T14:15:10.618-0400 I SHARDING [conn15] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.620-0400 m31100| 2015-07-09T14:15:10.619-0400 I SHARDING [conn15] distributed lock 'db55.coll55/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.620-0400 m31100| 2015-07-09T14:15:10.620-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:10.620-0400-559eba2e792e00bb67274a31", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436465710620), what: "moveChunk.from", ns: "db55.coll55", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 6: 0, step 2 of 6: 59, step 3 of 6: 3, step 4 of 6: 71, step 5 of 6: 116, step 6 of 6: 0, to: "test-rs1", from: "test-rs0", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.674-0400 m31100| 2015-07-09T14:15:10.673-0400 I COMMAND [conn15] command db55.coll55 command: moveChunk { moveChunk: "db55.coll55", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eba2eca4787b9985d1e3c') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 307ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.676-0400 m30999| 2015-07-09T14:15:10.675-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db55.coll55: 0ms sequenceNumber: 247 version: 2|1||559eba2eca4787b9985d1e3c based on: 1|1||559eba2eca4787b9985d1e3c [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.677-0400 m31100| 2015-07-09T14:15:10.676-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db55.coll55", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba2eca4787b9985d1e3c') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.681-0400 m31100| 2015-07-09T14:15:10.681-0400 I SHARDING [conn15] distributed lock 'db55.coll55/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eba2e792e00bb67274a32 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.682-0400 m31100| 2015-07-09T14:15:10.681-0400 I SHARDING [conn15] remotely refreshing metadata for db55.coll55 based on current shard version 2|0||559eba2eca4787b9985d1e3c, current metadata version is 2|0||559eba2eca4787b9985d1e3c [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.684-0400 m31100| 2015-07-09T14:15:10.683-0400 I SHARDING [conn15] updating metadata for db55.coll55 from shard version 2|0||559eba2eca4787b9985d1e3c to shard version 2|1||559eba2eca4787b9985d1e3c [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.684-0400 m31100| 2015-07-09T14:15:10.683-0400 I SHARDING [conn15] collection version was loaded at version 2|1||559eba2eca4787b9985d1e3c, took 2ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.684-0400 m31100| 2015-07-09T14:15:10.683-0400 I SHARDING [conn15] splitChunk accepted at version 2|1||559eba2eca4787b9985d1e3c [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.685-0400 m31100| 2015-07-09T14:15:10.684-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:10.684-0400-559eba2e792e00bb67274a33", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436465710684), what: "split", ns: "db55.coll55", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559eba2eca4787b9985d1e3c') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559eba2eca4787b9985d1e3c') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.739-0400 m31100| 2015-07-09T14:15:10.739-0400 I SHARDING [conn15] distributed lock 'db55.coll55/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.741-0400 m30999| 2015-07-09T14:15:10.741-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db55.coll55: 0ms sequenceNumber: 248 version: 2|3||559eba2eca4787b9985d1e3c based on: 2|1||559eba2eca4787b9985d1e3c [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.742-0400 m31200| 2015-07-09T14:15:10.741-0400 I SHARDING [conn18] received splitChunk request: { splitChunk: "db55.coll55", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba2eca4787b9985d1e3c') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.745-0400 m31200| 2015-07-09T14:15:10.745-0400 I SHARDING [conn18] distributed lock 'db55.coll55/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eba2ed5a107a5b9c0db5c [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.746-0400 m31200| 2015-07-09T14:15:10.745-0400 I SHARDING [conn18] remotely refreshing metadata for db55.coll55 based on current shard version 0|0||559eba2eca4787b9985d1e3c, current metadata version is 1|1||559eba2eca4787b9985d1e3c [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.747-0400 m31200| 2015-07-09T14:15:10.746-0400 I SHARDING [conn18] updating metadata for db55.coll55 from shard version 0|0||559eba2eca4787b9985d1e3c to shard version 2|0||559eba2eca4787b9985d1e3c [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.747-0400 m31200| 2015-07-09T14:15:10.746-0400 I SHARDING [conn18] collection version was loaded at version 2|3||559eba2eca4787b9985d1e3c, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.747-0400 m31200| 2015-07-09T14:15:10.746-0400 I SHARDING [conn18] splitChunk accepted at version 2|0||559eba2eca4787b9985d1e3c [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.749-0400 m31200| 2015-07-09T14:15:10.749-0400 I SHARDING [conn18] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:10.749-0400-559eba2ed5a107a5b9c0db5d", server: "bs-osx108-8", clientAddr: "127.0.0.1:62585", time: new Date(1436465710749), what: "split", ns: "db55.coll55", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559eba2eca4787b9985d1e3c') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559eba2eca4787b9985d1e3c') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.804-0400 m31200| 2015-07-09T14:15:10.803-0400 I SHARDING [conn18] distributed lock 'db55.coll55/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:10.805-0400 m30999| 2015-07-09T14:15:10.805-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db55.coll55: 0ms sequenceNumber: 249 version: 2|5||559eba2eca4787b9985d1e3c based on: 2|3||559eba2eca4787b9985d1e3c [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:11.082-0400 m31200| 2015-07-09T14:15:11.081-0400 I COMMAND [conn72] command db55.$cmd command: insert { insert: "coll55", documents: 506, ordered: false, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 2000|5, ObjectId('559eba2eca4787b9985d1e3c') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 514, w: 514 } }, Database: { acquireCount: { w: 514 } }, Collection: { acquireCount: { w: 8 } }, Metadata: { acquireCount: { w: 506 } }, oplog: { acquireCount: { w: 506 } } } protocol:op_command 169ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:11.084-0400 m31100| 2015-07-09T14:15:11.083-0400 I COMMAND [conn69] command db55.$cmd command: insert { insert: "coll55", documents: 494, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 2000|3, ObjectId('559eba2eca4787b9985d1e3c') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 503, w: 503 } }, Database: { acquireCount: { w: 503 } }, Collection: { acquireCount: { w: 9 } }, Metadata: { acquireCount: { w: 494 } }, oplog: { acquireCount: { w: 494 } } } protocol:op_command 171ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:11.258-0400 m31200| 2015-07-09T14:15:11.257-0400 I COMMAND [conn72] command db55.$cmd command: insert { insert: "coll55", documents: 493, ordered: false, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 2000|5, ObjectId('559eba2eca4787b9985d1e3c') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 501, w: 501 } }, Database: { acquireCount: { w: 501 } }, Collection: { acquireCount: { w: 8 } }, Metadata: { acquireCount: { w: 493 } }, oplog: { acquireCount: { w: 493 } } } protocol:op_command 155ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:11.260-0400 m31100| 2015-07-09T14:15:11.260-0400 I COMMAND [conn69] command db55.$cmd command: insert { insert: "coll55", documents: 507, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 2000|3, ObjectId('559eba2eca4787b9985d1e3c') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 515, w: 515 } }, Database: { acquireCount: { w: 515 } }, Collection: { acquireCount: { w: 8 } }, Metadata: { acquireCount: { w: 507 } }, oplog: { acquireCount: { w: 507 } } } protocol:op_command 158ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:11.261-0400 Using 5 threads (requested 5) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:11.304-0400 m30998| 2015-07-09T14:15:11.304-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63806 #354 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:11.309-0400 m30998| 2015-07-09T14:15:11.309-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63807 #355 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:11.316-0400 m30999| 2015-07-09T14:15:11.316-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63808 #355 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:11.319-0400 m30998| 2015-07-09T14:15:11.318-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63809 #356 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:11.322-0400 m30999| 2015-07-09T14:15:11.322-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63810 #356 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:11.331-0400 setting random seed: 2198303355835 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:11.332-0400 setting random seed: 6885492545552 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:11.332-0400 setting random seed: 6884518726728 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:11.332-0400 setting random seed: 4473266988061 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:11.332-0400 setting random seed: 8106294008903 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:11.335-0400 m30998| 2015-07-09T14:15:11.334-0400 I SHARDING [conn354] ChunkManager: time to load chunks for db55.coll55: 0ms sequenceNumber: 65 version: 2|5||559eba2eca4787b9985d1e3c based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:11.456-0400 m31200| 2015-07-09T14:15:11.453-0400 I COMMAND [conn41] CMD: drop db55.tmp.mr.coll55_136 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:11.456-0400 m31100| 2015-07-09T14:15:11.455-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_136 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:11.456-0400 m31100| 2015-07-09T14:15:11.456-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_135 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:11.480-0400 m31100| 2015-07-09T14:15:11.480-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_137 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:11.481-0400 m31100| 2015-07-09T14:15:11.481-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_138 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:11.482-0400 m31200| 2015-07-09T14:15:11.482-0400 I COMMAND [conn32] CMD: drop db55.tmp.mr.coll55_137 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:11.484-0400 m31200| 2015-07-09T14:15:11.484-0400 I COMMAND [conn80] CMD: drop db55.tmp.mr.coll55_135 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:11.486-0400 m31200| 2015-07-09T14:15:11.486-0400 I COMMAND [conn52] CMD: drop db55.tmp.mr.coll55_138 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:11.511-0400 m31100| 2015-07-09T14:15:11.511-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_139 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:11.518-0400 m31200| 2015-07-09T14:15:11.517-0400 I COMMAND [conn37] CMD: drop db55.tmp.mr.coll55_139 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.108-0400 m31100| 2015-07-09T14:15:12.108-0400 I COMMAND [conn50] CMD: drop db55.tmp.mrs.coll55_1436465711_45 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.113-0400 m31100| 2015-07-09T14:15:12.113-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_136 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.114-0400 m31100| 2015-07-09T14:15:12.113-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_136 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.116-0400 m31100| 2015-07-09T14:15:12.116-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_136 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.133-0400 m31100| 2015-07-09T14:15:12.132-0400 I COMMAND [conn176] CMD: drop db55.tmp.mrs.coll55_1436465711_47 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.140-0400 m31100| 2015-07-09T14:15:12.139-0400 I COMMAND [conn50] command db55.tmp.mrs.coll55_1436465711_45 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.140-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.140-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.140-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.140-0400 m31100| values...., out: "tmp.mrs.coll55_1436465711_45", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:10 reslen:212 locks:{ Global: { acquireCount: { r: 175, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 515 } }, Database: { acquireCount: { r: 27, w: 66, R: 22, W: 11 }, acquireWaitCount: { r: 1, w: 12, R: 7, W: 6 }, timeAcquiringMicros: { r: 319, w: 78803, R: 119145, W: 15894 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 714ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.143-0400 m31100| 2015-07-09T14:15:12.143-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_139 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.144-0400 m31100| 2015-07-09T14:15:12.143-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_139 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.147-0400 m31100| 2015-07-09T14:15:12.146-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_139 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.151-0400 m31100| 2015-07-09T14:15:12.151-0400 I COMMAND [conn185] CMD: drop db55.tmp.mrs.coll55_1436465711_46 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.160-0400 m31100| 2015-07-09T14:15:12.158-0400 I COMMAND [conn176] command db55.tmp.mrs.coll55_1436465711_47 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.160-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.160-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.160-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.161-0400 m31100| values...., out: "tmp.mrs.coll55_1436465711_47", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:11 reslen:212 locks:{ Global: { acquireCount: { r: 177, w: 74, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 5378, W: 322 } }, Database: { acquireCount: { r: 27, w: 66, R: 23, W: 11 }, acquireWaitCount: { r: 5, w: 7, R: 7, W: 9 }, timeAcquiringMicros: { r: 22775, w: 40486, R: 5836, W: 142092 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 699ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.161-0400 m31100| 2015-07-09T14:15:12.159-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_138 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.161-0400 m31100| 2015-07-09T14:15:12.159-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_138 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.161-0400 m31100| 2015-07-09T14:15:12.160-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_138 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.162-0400 m31100| 2015-07-09T14:15:12.161-0400 I COMMAND [conn182] CMD: drop db55.tmp.mrs.coll55_1436465711_45 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.173-0400 m31100| 2015-07-09T14:15:12.172-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_135 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.173-0400 m31100| 2015-07-09T14:15:12.172-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_135 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.174-0400 m31102| 2015-07-09T14:15:12.173-0400 I COMMAND [repl writer worker 8] CMD: drop db55.tmp.mrs.coll55_1436465711_45 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.174-0400 m31100| 2015-07-09T14:15:12.173-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_135 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.174-0400 m31101| 2015-07-09T14:15:12.174-0400 I COMMAND [repl writer worker 13] CMD: drop db55.tmp.mrs.coll55_1436465711_45 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.185-0400 m31100| 2015-07-09T14:15:12.185-0400 I COMMAND [conn45] CMD: drop db55.tmp.mrs.coll55_1436465711_46 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.191-0400 m31100| 2015-07-09T14:15:12.191-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_137 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.191-0400 m31100| 2015-07-09T14:15:12.191-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_137 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.193-0400 m31102| 2015-07-09T14:15:12.193-0400 I COMMAND [repl writer worker 4] CMD: drop db55.tmp.mrs.coll55_1436465711_46 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.198-0400 m31100| 2015-07-09T14:15:12.198-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_137 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.199-0400 m31101| 2015-07-09T14:15:12.199-0400 I COMMAND [repl writer worker 14] CMD: drop db55.tmp.mrs.coll55_1436465711_46 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.214-0400 m31200| 2015-07-09T14:15:12.213-0400 I COMMAND [conn41] CMD: drop db55.tmp.mrs.coll55_1436465711_45 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.220-0400 m31200| 2015-07-09T14:15:12.220-0400 I COMMAND [conn41] CMD: drop db55.tmp.mr.coll55_136 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.221-0400 m31200| 2015-07-09T14:15:12.221-0400 I COMMAND [conn41] CMD: drop db55.tmp.mr.coll55_136 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.227-0400 m31200| 2015-07-09T14:15:12.226-0400 I COMMAND [conn41] CMD: drop db55.tmp.mr.coll55_136 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.231-0400 m31200| 2015-07-09T14:15:12.230-0400 I COMMAND [conn41] command db55.tmp.mrs.coll55_1436465711_45 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.231-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.231-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.231-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.232-0400 m31200| values...., out: "tmp.mrs.coll55_1436465711_45", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:10 reslen:212 locks:{ Global: { acquireCount: { r: 173, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 797 } }, Database: { acquireCount: { r: 27, w: 66, R: 21, W: 11 }, acquireWaitCount: { r: 1, w: 12, R: 8, W: 6 }, timeAcquiringMicros: { r: 479, w: 127916, R: 129162, W: 10508 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 800ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.232-0400 m31100| 2015-07-09T14:15:12.231-0400 I COMMAND [conn185] command db55.tmp.mrs.coll55_1436465711_46 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.232-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.232-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.232-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.233-0400 m31100| values...., out: "tmp.mrs.coll55_1436465711_46", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:13 reslen:212 locks:{ Global: { acquireCount: { r: 181, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 1, W: 1 }, timeAcquiringMicros: { r: 16237, w: 12303, W: 1511 } }, Database: { acquireCount: { r: 27, w: 66, R: 25, W: 11 }, acquireWaitCount: { r: 8, w: 8, R: 8, W: 7 }, timeAcquiringMicros: { r: 28305, w: 31901, R: 36035, W: 102472 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 785ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.234-0400 m31200| 2015-07-09T14:15:12.234-0400 I COMMAND [conn32] CMD: drop db55.tmp.mrs.coll55_1436465711_46 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.239-0400 m31200| 2015-07-09T14:15:12.239-0400 I COMMAND [conn32] CMD: drop db55.tmp.mr.coll55_137 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.240-0400 m31200| 2015-07-09T14:15:12.240-0400 I COMMAND [conn32] CMD: drop db55.tmp.mr.coll55_137 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.241-0400 m31100| 2015-07-09T14:15:12.240-0400 I COMMAND [conn182] command db55.tmp.mrs.coll55_1436465711_45 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.241-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.241-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.241-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.242-0400 m31100| values...., out: "tmp.mrs.coll55_1436465711_45", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:10 reslen:212 locks:{ Global: { acquireCount: { r: 176, w: 75, W: 3 }, acquireWaitCount: { r: 3, W: 1 }, timeAcquiringMicros: { r: 26117, W: 365 } }, Database: { acquireCount: { r: 27, w: 67, R: 22, W: 11 }, acquireWaitCount: { r: 8, w: 12, R: 9, W: 7 }, timeAcquiringMicros: { r: 26746, w: 43278, R: 83291, W: 49840 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_query 817ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.242-0400 m31200| 2015-07-09T14:15:12.242-0400 I COMMAND [conn32] CMD: drop db55.tmp.mr.coll55_137 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.249-0400 m31200| 2015-07-09T14:15:12.249-0400 I COMMAND [conn80] CMD: drop db55.tmp.mrs.coll55_1436465711_45 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.250-0400 m31200| 2015-07-09T14:15:12.249-0400 I COMMAND [conn32] command db55.tmp.mrs.coll55_1436465711_46 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.250-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.250-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.250-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.252-0400 m31200| values...., out: "tmp.mrs.coll55_1436465711_46", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:14 reslen:212 locks:{ Global: { acquireCount: { r: 181, w: 74, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 6900, W: 578 } }, Database: { acquireCount: { r: 27, w: 66, R: 25, W: 11 }, acquireWaitCount: { r: 7, w: 9, R: 7, W: 9 }, timeAcquiringMicros: { r: 40300, w: 71918, R: 62507, W: 10085 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 803ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.258-0400 m31200| 2015-07-09T14:15:12.258-0400 I COMMAND [conn80] CMD: drop db55.tmp.mr.coll55_135 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.258-0400 m31200| 2015-07-09T14:15:12.258-0400 I COMMAND [conn80] CMD: drop db55.tmp.mr.coll55_135 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.259-0400 m31200| 2015-07-09T14:15:12.259-0400 I COMMAND [conn80] CMD: drop db55.tmp.mr.coll55_135 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.259-0400 m31202| 2015-07-09T14:15:12.259-0400 I COMMAND [repl writer worker 6] CMD: drop db55.tmp.mrs.coll55_1436465711_45 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.261-0400 m31201| 2015-07-09T14:15:12.261-0400 I COMMAND [repl writer worker 7] CMD: drop db55.tmp.mrs.coll55_1436465711_45 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.268-0400 m31200| 2015-07-09T14:15:12.267-0400 I COMMAND [conn52] CMD: drop db55.tmp.mrs.coll55_1436465711_46 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.273-0400 m31200| 2015-07-09T14:15:12.272-0400 I COMMAND [conn80] command db55.tmp.mrs.coll55_1436465711_45 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.273-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.273-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.273-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.273-0400 m31200| values...., out: "tmp.mrs.coll55_1436465711_45", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:13 reslen:212 locks:{ Global: { acquireCount: { r: 180, w: 75, W: 3 }, acquireWaitCount: { r: 2, w: 1, W: 1 }, timeAcquiringMicros: { r: 13472, w: 149, W: 4931 } }, Database: { acquireCount: { r: 27, w: 67, R: 24, W: 11 }, acquireWaitCount: { r: 10, w: 11, R: 8, W: 7 }, timeAcquiringMicros: { r: 45427, w: 34002, R: 43720, W: 104756 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_query 845ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.279-0400 m31200| 2015-07-09T14:15:12.279-0400 I COMMAND [conn52] CMD: drop db55.tmp.mr.coll55_138 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.279-0400 m31200| 2015-07-09T14:15:12.279-0400 I COMMAND [conn52] CMD: drop db55.tmp.mr.coll55_138 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.280-0400 m31200| 2015-07-09T14:15:12.280-0400 I COMMAND [conn52] CMD: drop db55.tmp.mr.coll55_138 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.281-0400 m31202| 2015-07-09T14:15:12.281-0400 I COMMAND [repl writer worker 2] CMD: drop db55.tmp.mrs.coll55_1436465711_46 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.282-0400 m31201| 2015-07-09T14:15:12.281-0400 I COMMAND [repl writer worker 3] CMD: drop db55.tmp.mrs.coll55_1436465711_46 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.283-0400 m31200| 2015-07-09T14:15:12.282-0400 I COMMAND [conn37] CMD: drop db55.tmp.mrs.coll55_1436465711_47 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.288-0400 m31100| 2015-07-09T14:15:12.287-0400 I COMMAND [conn45] command db55.tmp.mrs.coll55_1436465711_46 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.288-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.288-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.288-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.289-0400 m31100| values...., out: "tmp.mrs.coll55_1436465711_46", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:15 reslen:212 locks:{ Global: { acquireCount: { r: 186, w: 75, W: 3 }, acquireWaitCount: { r: 2, w: 2 }, timeAcquiringMicros: { r: 21387, w: 17431 } }, Database: { acquireCount: { r: 27, w: 67, R: 27, W: 11 }, acquireWaitCount: { r: 7, w: 11, R: 9, W: 5 }, timeAcquiringMicros: { r: 5749, w: 69844, R: 37546, W: 89445 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_query 848ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.289-0400 m31100| 2015-07-09T14:15:12.288-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_141 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.289-0400 m31200| 2015-07-09T14:15:12.289-0400 I COMMAND [conn37] CMD: drop db55.tmp.mr.coll55_139 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.289-0400 m31100| 2015-07-09T14:15:12.289-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_140 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.291-0400 m31200| 2015-07-09T14:15:12.289-0400 I COMMAND [conn37] CMD: drop db55.tmp.mr.coll55_139 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.291-0400 m31200| 2015-07-09T14:15:12.290-0400 I COMMAND [conn37] CMD: drop db55.tmp.mr.coll55_139 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.293-0400 m31200| 2015-07-09T14:15:12.291-0400 I COMMAND [conn52] command db55.tmp.mrs.coll55_1436465711_46 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.293-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.294-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.294-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.295-0400 m31200| values...., out: "tmp.mrs.coll55_1436465711_46", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:13 reslen:212 locks:{ Global: { acquireCount: { r: 180, w: 75, W: 3 }, acquireWaitCount: { r: 3, w: 1, W: 1 }, timeAcquiringMicros: { r: 22189, w: 282, W: 34 } }, Database: { acquireCount: { r: 27, w: 67, R: 24, W: 11 }, acquireWaitCount: { r: 10, w: 12, R: 8, W: 7 }, timeAcquiringMicros: { r: 6128, w: 56555, R: 34562, W: 98810 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_query 838ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.295-0400 m31100| 2015-07-09T14:15:12.293-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_142 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.295-0400 m31100| 2015-07-09T14:15:12.294-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_143 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.297-0400 m31200| 2015-07-09T14:15:12.296-0400 I COMMAND [conn37] command db55.tmp.mrs.coll55_1436465711_47 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.297-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.297-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.298-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.298-0400 m31200| values...., out: "tmp.mrs.coll55_1436465711_47", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:14 reslen:212 locks:{ Global: { acquireCount: { r: 181, w: 74, W: 3 }, acquireWaitCount: { r: 3, w: 1 }, timeAcquiringMicros: { r: 27555, w: 7616 } }, Database: { acquireCount: { r: 27, w: 66, R: 25, W: 11 }, acquireWaitCount: { r: 15, w: 11, R: 7, W: 5 }, timeAcquiringMicros: { r: 38335, w: 72395, R: 39703, W: 94190 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 835ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.299-0400 m31100| 2015-07-09T14:15:12.299-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_144 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.309-0400 m31100| 2015-07-09T14:15:12.308-0400 I SHARDING [conn50] ChunkManager: time to load chunks for db55.coll55: 0ms sequenceNumber: 3 version: 2|5||559eba2eca4787b9985d1e3c based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.373-0400 m31100| 2015-07-09T14:15:12.373-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63811 #189 (112 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.375-0400 m31200| 2015-07-09T14:15:12.374-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63812 #150 (93 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.389-0400 m31100| 2015-07-09T14:15:12.389-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63813 #190 (113 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.392-0400 m31200| 2015-07-09T14:15:12.392-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63814 #151 (94 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.478-0400 m31100| 2015-07-09T14:15:12.478-0400 I COMMAND [conn182] CMD: drop db55.map_reduce_reduce1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.485-0400 m31100| 2015-07-09T14:15:12.485-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_140 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.486-0400 m31100| 2015-07-09T14:15:12.485-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_140 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.486-0400 m31100| 2015-07-09T14:15:12.485-0400 I COMMAND [conn50] CMD: drop db55.map_reduce_reduce0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.492-0400 m31100| 2015-07-09T14:15:12.492-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_141 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.493-0400 m31100| 2015-07-09T14:15:12.492-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_141 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.493-0400 m31100| 2015-07-09T14:15:12.492-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_141 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.493-0400 m31100| 2015-07-09T14:15:12.493-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_140 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.494-0400 m31100| 2015-07-09T14:15:12.493-0400 I COMMAND [conn50] command db55.map_reduce_reduce0 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.495-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.495-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.495-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.495-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.495-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.498-0400 m31100| }, out: { reduce: "map_reduce_reduce0" } }, inputDB: "db55", shardedOutputCollection: "tmp.mrs.coll55_1436465711_45", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll55_1436465711_45", timeMillis: 688, counts: { input: 1001, emit: 1001, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465712000|26, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll55_1436465711_45", timeMillis: 831, counts: { input: 999, emit: 999, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465712000|93, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1001, emit: 1001, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 999, emit: 999, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:221 locks:{ Global: { acquireCount: { r: 57, w: 50, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 8465 } }, Database: { acquireCount: { r: 2, w: 45, W: 6 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 61073 } }, Collection: { acquireCount: { r: 2, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 218ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.499-0400 m31100| 2015-07-09T14:15:12.493-0400 I COMMAND [conn182] command db55.map_reduce_reduce1 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.499-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.499-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.499-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.499-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.499-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.501-0400 m31100| }, out: { reduce: "map_reduce_reduce1" } }, inputDB: "db55", shardedOutputCollection: "tmp.mrs.coll55_1436465711_45", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll55_1436465711_45", timeMillis: 749, counts: { input: 1001, emit: 1001, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465712000|98, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll55_1436465711_45", timeMillis: 789, counts: { input: 999, emit: 999, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465712000|42, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1001, emit: 1001, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 999, emit: 999, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:221 locks:{ Global: { acquireCount: { r: 57, w: 50, W: 3 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 7079, W: 1035 } }, Database: { acquireCount: { r: 2, w: 45, W: 6 }, acquireWaitCount: { w: 2, W: 2 }, timeAcquiringMicros: { w: 14498, W: 17963 } }, Collection: { acquireCount: { r: 2, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 251ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.501-0400 m31100| 2015-07-09T14:15:12.494-0400 I COMMAND [conn39] CMD: drop db55.tmp.mrs.coll55_1436465711_45 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.501-0400 m31100| 2015-07-09T14:15:12.494-0400 I COMMAND [conn15] CMD: drop db55.tmp.mrs.coll55_1436465711_45 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.501-0400 m31100| 2015-07-09T14:15:12.496-0400 I COMMAND [conn45] CMD: drop db55.map_reduce_reduce3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.502-0400 m31102| 2015-07-09T14:15:12.496-0400 I COMMAND [repl writer worker 6] CMD: drop db55.map_reduce_reduce1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.502-0400 m31101| 2015-07-09T14:15:12.497-0400 I COMMAND [repl writer worker 3] CMD: drop db55.map_reduce_reduce1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.503-0400 m31100| 2015-07-09T14:15:12.503-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_142 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.503-0400 m31100| 2015-07-09T14:15:12.503-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_142 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.503-0400 m31100| 2015-07-09T14:15:12.503-0400 I COMMAND [conn176] CMD: drop db55.map_reduce_reduce2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.508-0400 m31102| 2015-07-09T14:15:12.508-0400 I COMMAND [repl writer worker 13] CMD: drop db55.map_reduce_reduce0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.511-0400 m31101| 2015-07-09T14:15:12.511-0400 I COMMAND [repl writer worker 2] CMD: drop db55.map_reduce_reduce0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.516-0400 m31100| 2015-07-09T14:15:12.516-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_144 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.517-0400 m31100| 2015-07-09T14:15:12.516-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_144 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.519-0400 m31100| 2015-07-09T14:15:12.519-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_144 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.520-0400 m31200| 2015-07-09T14:15:12.519-0400 I COMMAND [conn18] CMD: drop db55.tmp.mrs.coll55_1436465711_45 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.520-0400 m31200| 2015-07-09T14:15:12.519-0400 I COMMAND [conn65] CMD: drop db55.tmp.mrs.coll55_1436465711_45 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.521-0400 m31100| 2015-07-09T14:15:12.521-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_142 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.522-0400 m31100| 2015-07-09T14:15:12.521-0400 I COMMAND [conn176] command db55.map_reduce_reduce2 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.522-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.522-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.523-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.523-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.523-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.526-0400 m31100| }, out: { reduce: "map_reduce_reduce2" } }, inputDB: "db55", shardedOutputCollection: "tmp.mrs.coll55_1436465711_47", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll55_1436465711_47", timeMillis: 684, counts: { input: 1001, emit: 1001, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465712000|81, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll55_1436465711_47", timeMillis: 828, counts: { input: 999, emit: 999, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465712000|107, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1001, emit: 1001, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 999, emit: 999, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:221 locks:{ Global: { acquireCount: { r: 57, w: 50, W: 3 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 14915, W: 7313 } }, Database: { acquireCount: { r: 2, w: 45, W: 6 }, acquireWaitCount: { w: 1, W: 4 }, timeAcquiringMicros: { w: 14669, W: 59047 } }, Collection: { acquireCount: { r: 2, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 223ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.526-0400 m31100| 2015-07-09T14:15:12.522-0400 I COMMAND [conn39] CMD: drop db55.tmp.mrs.coll55_1436465711_47 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.526-0400 m31102| 2015-07-09T14:15:12.522-0400 I COMMAND [repl writer worker 0] CMD: drop db55.map_reduce_reduce3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.526-0400 m31202| 2015-07-09T14:15:12.524-0400 I COMMAND [repl writer worker 14] CMD: drop db55.tmp.mrs.coll55_1436465711_45 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.526-0400 m31100| 2015-07-09T14:15:12.523-0400 I COMMAND [conn185] CMD: drop db55.map_reduce_reduce4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.527-0400 m31201| 2015-07-09T14:15:12.526-0400 I COMMAND [repl writer worker 9] CMD: drop db55.tmp.mrs.coll55_1436465711_45 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.527-0400 m31101| 2015-07-09T14:15:12.527-0400 I COMMAND [repl writer worker 10] CMD: drop db55.map_reduce_reduce3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.531-0400 m31100| 2015-07-09T14:15:12.531-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_143 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.532-0400 m31100| 2015-07-09T14:15:12.531-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_143 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.532-0400 m31100| 2015-07-09T14:15:12.532-0400 I COMMAND [conn45] command db55.map_reduce_reduce3 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.533-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.533-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.533-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.533-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.533-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.535-0400 m31100| }, out: { reduce: "map_reduce_reduce3" } }, inputDB: "db55", shardedOutputCollection: "tmp.mrs.coll55_1436465711_46", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll55_1436465711_46", timeMillis: 752, counts: { input: 1001, emit: 1001, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465712000|107, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll55_1436465711_46", timeMillis: 794, counts: { input: 999, emit: 999, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465712000|79, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1001, emit: 1001, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 999, emit: 999, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:221 locks:{ Global: { acquireCount: { r: 57, w: 50, W: 3 }, acquireWaitCount: { w: 3, W: 1 }, timeAcquiringMicros: { w: 39848, W: 1589 } }, Database: { acquireCount: { r: 2, w: 45, W: 6 }, acquireWaitCount: { w: 2, W: 3 }, timeAcquiringMicros: { w: 58949, W: 17660 } }, Collection: { acquireCount: { r: 2, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 239ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.535-0400 m31100| 2015-07-09T14:15:12.532-0400 I COMMAND [conn15] CMD: drop db55.tmp.mrs.coll55_1436465711_46 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.538-0400 m31102| 2015-07-09T14:15:12.538-0400 I COMMAND [repl writer worker 5] CMD: drop db55.map_reduce_reduce2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.541-0400 m31200| 2015-07-09T14:15:12.540-0400 I COMMAND [conn65] CMD: drop db55.tmp.mrs.coll55_1436465711_47 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.542-0400 m31100| 2015-07-09T14:15:12.541-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_143 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.543-0400 m31101| 2015-07-09T14:15:12.543-0400 I COMMAND [repl writer worker 5] CMD: drop db55.map_reduce_reduce2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.545-0400 m31200| 2015-07-09T14:15:12.545-0400 I COMMAND [conn18] CMD: drop db55.tmp.mrs.coll55_1436465711_46 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.546-0400 m31100| 2015-07-09T14:15:12.546-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_146 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.547-0400 m31102| 2015-07-09T14:15:12.547-0400 I COMMAND [repl writer worker 4] CMD: drop db55.tmp.mrs.coll55_1436465711_45 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.549-0400 m31100| 2015-07-09T14:15:12.546-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_145 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.549-0400 m31201| 2015-07-09T14:15:12.548-0400 I COMMAND [repl writer worker 12] CMD: drop db55.tmp.mrs.coll55_1436465711_47 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.550-0400 m31202| 2015-07-09T14:15:12.548-0400 I COMMAND [repl writer worker 0] CMD: drop db55.tmp.mrs.coll55_1436465711_47 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.550-0400 m31100| 2015-07-09T14:15:12.546-0400 I COMMAND [conn185] command db55.map_reduce_reduce4 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.550-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.550-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.551-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.551-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.551-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.555-0400 m31100| }, out: { reduce: "map_reduce_reduce4" } }, inputDB: "db55", shardedOutputCollection: "tmp.mrs.coll55_1436465711_46", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll55_1436465711_46", timeMillis: 713, counts: { input: 1001, emit: 1001, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465712000|96, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll55_1436465711_46", timeMillis: 826, counts: { input: 999, emit: 999, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465712000|103, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1001, emit: 1001, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 999, emit: 999, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:221 locks:{ Global: { acquireCount: { r: 57, w: 50, W: 3 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 36247, W: 1535 } }, Database: { acquireCount: { r: 2, w: 45, W: 6 }, acquireWaitCount: { w: 2, W: 4 }, timeAcquiringMicros: { w: 16767, W: 46537 } }, Collection: { acquireCount: { r: 2, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 251ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.555-0400 m31100| 2015-07-09T14:15:12.548-0400 I COMMAND [conn39] CMD: drop db55.tmp.mrs.coll55_1436465711_46 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.555-0400 m31200| 2015-07-09T14:15:12.551-0400 I COMMAND [conn41] CMD: drop db55.tmp.mr.coll55_141 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.556-0400 m31200| 2015-07-09T14:15:12.552-0400 I COMMAND [conn80] CMD: drop db55.tmp.mr.coll55_140 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.556-0400 m31102| 2015-07-09T14:15:12.554-0400 I COMMAND [repl writer worker 15] CMD: drop db55.map_reduce_reduce4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.558-0400 m31101| 2015-07-09T14:15:12.558-0400 I COMMAND [repl writer worker 14] CMD: drop db55.tmp.mrs.coll55_1436465711_45 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.562-0400 m31101| 2015-07-09T14:15:12.561-0400 I COMMAND [repl writer worker 0] CMD: drop db55.map_reduce_reduce4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.569-0400 m31102| 2015-07-09T14:15:12.569-0400 I COMMAND [repl writer worker 6] CMD: drop db55.tmp.mrs.coll55_1436465711_47 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.572-0400 m31200| 2015-07-09T14:15:12.572-0400 I COMMAND [conn65] CMD: drop db55.tmp.mrs.coll55_1436465711_46 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.575-0400 m31200| 2015-07-09T14:15:12.575-0400 I COMMAND [conn32] CMD: drop db55.tmp.mr.coll55_142 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.579-0400 m31101| 2015-07-09T14:15:12.579-0400 I COMMAND [repl writer worker 3] CMD: drop db55.tmp.mrs.coll55_1436465711_47 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.592-0400 m31102| 2015-07-09T14:15:12.592-0400 I COMMAND [repl writer worker 14] CMD: drop db55.tmp.mrs.coll55_1436465711_46 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.593-0400 m31101| 2015-07-09T14:15:12.593-0400 I COMMAND [repl writer worker 6] CMD: drop db55.tmp.mrs.coll55_1436465711_46 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.596-0400 m31201| 2015-07-09T14:15:12.596-0400 I COMMAND [repl writer worker 7] CMD: drop db55.tmp.mrs.coll55_1436465711_46 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.598-0400 m31202| 2015-07-09T14:15:12.597-0400 I COMMAND [repl writer worker 6] CMD: drop db55.tmp.mrs.coll55_1436465711_46 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.599-0400 m31200| 2015-07-09T14:15:12.599-0400 I COMMAND [conn52] CMD: drop db55.tmp.mr.coll55_143 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.633-0400 m31100| 2015-07-09T14:15:12.632-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_147 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.634-0400 m31100| 2015-07-09T14:15:12.633-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_148 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.641-0400 m31200| 2015-07-09T14:15:12.641-0400 I COMMAND [conn37] CMD: drop db55.tmp.mr.coll55_144 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:12.657-0400 m31100| 2015-07-09T14:15:12.657-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_149 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.155-0400 m31200| 2015-07-09T14:15:13.155-0400 I COMMAND [conn41] CMD: drop db55.tmp.mrs.coll55_1436465712_47 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.159-0400 m31200| 2015-07-09T14:15:13.159-0400 I COMMAND [conn41] CMD: drop db55.tmp.mr.coll55_141 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.159-0400 m31200| 2015-07-09T14:15:13.159-0400 I COMMAND [conn41] CMD: drop db55.tmp.mr.coll55_141 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.163-0400 m31200| 2015-07-09T14:15:13.162-0400 I COMMAND [conn41] CMD: drop db55.tmp.mr.coll55_141 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.165-0400 m31200| 2015-07-09T14:15:13.164-0400 I COMMAND [conn41] command db55.tmp.mrs.coll55_1436465712_47 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.165-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.166-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.166-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.167-0400 m31200| values...., out: "tmp.mrs.coll55_1436465712_47", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:5 reslen:212 locks:{ Global: { acquireCount: { r: 161, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 498 } }, Database: { acquireCount: { r: 26, w: 66, R: 16, W: 11 }, acquireWaitCount: { r: 2, w: 16, R: 12, W: 6 }, timeAcquiringMicros: { r: 3257, w: 137126, R: 158832, W: 3821 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 621ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.181-0400 m31200| 2015-07-09T14:15:13.181-0400 I COMMAND [conn32] CMD: drop db55.tmp.mrs.coll55_1436465712_48 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.188-0400 m31200| 2015-07-09T14:15:13.188-0400 I COMMAND [conn32] CMD: drop db55.tmp.mr.coll55_142 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.188-0400 m31200| 2015-07-09T14:15:13.188-0400 I COMMAND [conn32] CMD: drop db55.tmp.mr.coll55_142 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.190-0400 m31200| 2015-07-09T14:15:13.189-0400 I COMMAND [conn80] CMD: drop db55.tmp.mrs.coll55_1436465712_48 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.195-0400 m31200| 2015-07-09T14:15:13.195-0400 I COMMAND [conn80] CMD: drop db55.tmp.mr.coll55_140 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.196-0400 m31200| 2015-07-09T14:15:13.195-0400 I COMMAND [conn80] CMD: drop db55.tmp.mr.coll55_140 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.197-0400 m31200| 2015-07-09T14:15:13.196-0400 I COMMAND [conn32] CMD: drop db55.tmp.mr.coll55_142 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.198-0400 m31200| 2015-07-09T14:15:13.197-0400 I COMMAND [conn80] CMD: drop db55.tmp.mr.coll55_140 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.198-0400 m31201| 2015-07-09T14:15:13.198-0400 I COMMAND [repl writer worker 10] CMD: drop db55.tmp.mrs.coll55_1436465712_48 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.200-0400 m31202| 2015-07-09T14:15:13.200-0400 I COMMAND [repl writer worker 6] CMD: drop db55.tmp.mrs.coll55_1436465712_48 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.204-0400 m31200| 2015-07-09T14:15:13.203-0400 I COMMAND [conn32] command db55.tmp.mrs.coll55_1436465712_48 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.204-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.205-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.205-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.208-0400 m31200| values...., out: "tmp.mrs.coll55_1436465712_48", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:5 reslen:212 locks:{ Global: { acquireCount: { r: 161, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 4634, w: 6914, W: 637 } }, Database: { acquireCount: { r: 26, w: 66, R: 16, W: 11 }, acquireWaitCount: { r: 2, w: 12, R: 11, W: 9 }, timeAcquiringMicros: { r: 431, w: 48911, R: 56052, W: 148003 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 630ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.208-0400 m31200| 2015-07-09T14:15:13.205-0400 I COMMAND [conn80] command db55.tmp.mrs.coll55_1436465712_48 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.208-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.208-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.209-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.209-0400 m31200| values...., out: "tmp.mrs.coll55_1436465712_48", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:8 reslen:212 locks:{ Global: { acquireCount: { r: 168, w: 75, W: 3 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 7674, w: 4913, W: 715 } }, Database: { acquireCount: { r: 26, w: 67, R: 19, W: 11 }, acquireWaitCount: { r: 4, w: 9, R: 12, W: 9 }, timeAcquiringMicros: { r: 31507, w: 68048, R: 100706, W: 47922 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_query 662ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.219-0400 m31200| 2015-07-09T14:15:13.219-0400 I COMMAND [conn37] CMD: drop db55.tmp.mrs.coll55_1436465712_50 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.226-0400 m31200| 2015-07-09T14:15:13.226-0400 I COMMAND [conn37] CMD: drop db55.tmp.mr.coll55_144 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.227-0400 m31200| 2015-07-09T14:15:13.227-0400 I COMMAND [conn37] CMD: drop db55.tmp.mr.coll55_144 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.227-0400 m31200| 2015-07-09T14:15:13.227-0400 I COMMAND [conn52] CMD: drop db55.tmp.mrs.coll55_1436465712_49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.232-0400 m31200| 2015-07-09T14:15:13.232-0400 I COMMAND [conn52] CMD: drop db55.tmp.mr.coll55_143 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.233-0400 m31200| 2015-07-09T14:15:13.233-0400 I COMMAND [conn52] CMD: drop db55.tmp.mr.coll55_143 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.234-0400 m31200| 2015-07-09T14:15:13.234-0400 I COMMAND [conn37] CMD: drop db55.tmp.mr.coll55_144 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.235-0400 m31200| 2015-07-09T14:15:13.234-0400 I COMMAND [conn52] CMD: drop db55.tmp.mr.coll55_143 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.235-0400 m31200| 2015-07-09T14:15:13.235-0400 I COMMAND [conn37] command db55.tmp.mrs.coll55_1436465712_50 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.236-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.236-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.236-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.237-0400 m31200| values...., out: "tmp.mrs.coll55_1436465712_50", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:8 reslen:212 locks:{ Global: { acquireCount: { r: 167, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 3, W: 1 }, timeAcquiringMicros: { r: 8126, w: 16473, W: 1239 } }, Database: { acquireCount: { r: 26, w: 66, R: 19, W: 11 }, acquireWaitCount: { r: 3, w: 10, R: 10, W: 6 }, timeAcquiringMicros: { r: 3960, w: 51803, R: 24205, W: 151334 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 602ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.238-0400 m31200| 2015-07-09T14:15:13.235-0400 I COMMAND [conn52] command db55.tmp.mrs.coll55_1436465712_49 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.239-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.239-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.239-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.240-0400 m31200| values...., out: "tmp.mrs.coll55_1436465712_49", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:7 reslen:212 locks:{ Global: { acquireCount: { r: 165, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 2 }, timeAcquiringMicros: { r: 16017, w: 11710 } }, Database: { acquireCount: { r: 26, w: 66, R: 18, W: 11 }, acquireWaitCount: { r: 5, w: 11, R: 12, W: 6 }, timeAcquiringMicros: { r: 20861, w: 78461, R: 38585, W: 98735 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 659ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.301-0400 m31100| 2015-07-09T14:15:13.301-0400 I COMMAND [conn182] CMD: drop db55.tmp.mrs.coll55_1436465712_47 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.309-0400 m31100| 2015-07-09T14:15:13.309-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_145 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.309-0400 m31100| 2015-07-09T14:15:13.309-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_145 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.311-0400 m31100| 2015-07-09T14:15:13.311-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_145 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.314-0400 m31100| 2015-07-09T14:15:13.314-0400 I COMMAND [conn182] command db55.tmp.mrs.coll55_1436465712_47 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.315-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.315-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.315-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.317-0400 m31100| values...., out: "tmp.mrs.coll55_1436465712_47", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:11 reslen:212 locks:{ Global: { acquireCount: { r: 175, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 214 } }, Database: { acquireCount: { r: 26, w: 66, R: 23, W: 11 }, acquireWaitCount: { r: 2, w: 14, R: 13, W: 9 }, timeAcquiringMicros: { r: 2151, w: 95951, R: 150536, W: 53310 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 771ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.317-0400 m31100| 2015-07-09T14:15:13.315-0400 I COMMAND [conn50] CMD: drop db55.tmp.mrs.coll55_1436465712_48 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.318-0400 m31100| 2015-07-09T14:15:13.316-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_150 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.323-0400 m31100| 2015-07-09T14:15:13.323-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_146 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.324-0400 m31100| 2015-07-09T14:15:13.323-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_146 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.327-0400 m31100| 2015-07-09T14:15:13.326-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_146 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.343-0400 m31100| 2015-07-09T14:15:13.342-0400 I COMMAND [conn50] command db55.tmp.mrs.coll55_1436465712_48 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.343-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.343-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.343-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.344-0400 m31100| values...., out: "tmp.mrs.coll55_1436465712_48", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:6 reslen:212 locks:{ Global: { acquireCount: { r: 165, w: 74, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 8410, W: 60 } }, Database: { acquireCount: { r: 26, w: 66, R: 18, W: 11 }, acquireWaitCount: { r: 4, w: 21, R: 11, W: 6 }, timeAcquiringMicros: { r: 42892, w: 204352, R: 143709, W: 17145 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 799ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.345-0400 m31100| 2015-07-09T14:15:13.344-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_151 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.373-0400 m31100| 2015-07-09T14:15:13.373-0400 I COMMAND [conn45] CMD: drop db55.tmp.mrs.coll55_1436465712_48 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.385-0400 m31100| 2015-07-09T14:15:13.385-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_147 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.386-0400 m31100| 2015-07-09T14:15:13.385-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_147 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.387-0400 m31100| 2015-07-09T14:15:13.386-0400 I COMMAND [conn185] CMD: drop db55.tmp.mrs.coll55_1436465712_49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.389-0400 m31100| 2015-07-09T14:15:13.389-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_148 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.390-0400 m31100| 2015-07-09T14:15:13.390-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_148 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.392-0400 m31100| 2015-07-09T14:15:13.392-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_147 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.393-0400 m31101| 2015-07-09T14:15:13.392-0400 I COMMAND [repl writer worker 4] CMD: drop db55.tmp.mrs.coll55_1436465712_48 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.395-0400 m31100| 2015-07-09T14:15:13.395-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_148 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.395-0400 m31102| 2015-07-09T14:15:13.395-0400 I COMMAND [repl writer worker 6] CMD: drop db55.tmp.mrs.coll55_1436465712_48 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.397-0400 m31100| 2015-07-09T14:15:13.396-0400 I COMMAND [conn45] command db55.tmp.mrs.coll55_1436465712_48 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.397-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.398-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.399-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.399-0400 m31100| values...., out: "tmp.mrs.coll55_1436465712_48", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:12 reslen:212 locks:{ Global: { acquireCount: { r: 178, w: 75, W: 3 }, acquireWaitCount: { r: 1, w: 2, W: 1 }, timeAcquiringMicros: { r: 8253, w: 11975, W: 1306 } }, Database: { acquireCount: { r: 26, w: 67, R: 24, W: 11 }, acquireWaitCount: { r: 6, w: 12, R: 13, W: 9 }, timeAcquiringMicros: { r: 82898, w: 104856, R: 49997, W: 62351 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_query 823ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.399-0400 m31100| 2015-07-09T14:15:13.398-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_152 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.401-0400 m31100| 2015-07-09T14:15:13.400-0400 I COMMAND [conn185] command db55.tmp.mrs.coll55_1436465712_49 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.401-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.402-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.402-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.403-0400 m31100| values...., out: "tmp.mrs.coll55_1436465712_49", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:14 reslen:212 locks:{ Global: { acquireCount: { r: 181, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 1, W: 1 }, timeAcquiringMicros: { r: 20754, w: 8294, W: 1207 } }, Database: { acquireCount: { r: 26, w: 66, R: 26, W: 11 }, acquireWaitCount: { r: 8, w: 8, R: 18, W: 9 }, timeAcquiringMicros: { r: 127442, w: 40900, R: 65481, W: 74904 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 825ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.406-0400 m31100| 2015-07-09T14:15:13.404-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_153 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.443-0400 m31100| 2015-07-09T14:15:13.443-0400 I COMMAND [conn176] CMD: drop db55.tmp.mrs.coll55_1436465712_50 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.451-0400 m31100| 2015-07-09T14:15:13.450-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_149 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.451-0400 m31100| 2015-07-09T14:15:13.450-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_149 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.454-0400 m31100| 2015-07-09T14:15:13.454-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_149 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.457-0400 m31100| 2015-07-09T14:15:13.456-0400 I COMMAND [conn176] command db55.tmp.mrs.coll55_1436465712_50 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.457-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.457-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.457-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.458-0400 m31100| values...., out: "tmp.mrs.coll55_1436465712_50", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:11 reslen:212 locks:{ Global: { acquireCount: { r: 175, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 3, W: 1 }, timeAcquiringMicros: { r: 15788, w: 16310, W: 656 } }, Database: { acquireCount: { r: 26, w: 66, R: 23, W: 11 }, acquireWaitCount: { r: 6, w: 17, R: 14, W: 8 }, timeAcquiringMicros: { r: 42049, w: 69226, R: 39833, W: 167324 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 822ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.463-0400 m31100| 2015-07-09T14:15:13.463-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_154 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.526-0400 m31100| 2015-07-09T14:15:13.526-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_151 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.530-0400 m31100| 2015-07-09T14:15:13.529-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_151 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.563-0400 m31100| 2015-07-09T14:15:13.563-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_150 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.564-0400 m31100| 2015-07-09T14:15:13.563-0400 I COMMAND [conn50] command db55.map_reduce_reduce0 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.564-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.564-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.564-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.564-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.565-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.566-0400 m31100| }, out: { reduce: "map_reduce_reduce0" } }, inputDB: "db55", shardedOutputCollection: "tmp.mrs.coll55_1436465712_48", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll55_1436465712_48", timeMillis: 780, counts: { input: 1001, emit: 1001, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465713000|56, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll55_1436465712_48", timeMillis: 653, counts: { input: 999, emit: 999, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465713000|73, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1001, emit: 1001, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 999, emit: 999, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:221 locks:{ Global: { acquireCount: { r: 96, w: 67, W: 21 }, acquireWaitCount: { w: 4 }, timeAcquiringMicros: { w: 55115 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 11, W: 3 }, timeAcquiringMicros: { w: 63497, W: 2172 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 219ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.567-0400 m31100| 2015-07-09T14:15:13.564-0400 I COMMAND [conn39] CMD: drop db55.tmp.mrs.coll55_1436465712_48 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.567-0400 m31102| 2015-07-09T14:15:13.565-0400 I COMMAND [repl writer worker 2] CMD: drop db55.tmp.mr.coll55_151 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.567-0400 m31100| 2015-07-09T14:15:13.566-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_150 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.567-0400 m31101| 2015-07-09T14:15:13.566-0400 I COMMAND [repl writer worker 4] CMD: drop db55.tmp.mr.coll55_151 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.568-0400 m31200| 2015-07-09T14:15:13.567-0400 I COMMAND [conn65] CMD: drop db55.tmp.mrs.coll55_1436465712_48 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.569-0400 m31100| 2015-07-09T14:15:13.568-0400 I COMMAND [conn182] command db55.map_reduce_reduce1 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.569-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.569-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.569-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.569-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.570-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.570-0400 m31100| }, out: { reduce: "map_reduce_reduce1" } }, inputDB: "db55", shardedOutputCollection: "tmp.mrs.coll55_1436465712_47", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll55_1436465712_47", timeMillis: 766, counts: { input: 1001, emit: 1001, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465713000|45, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll55_1436465712_47", timeMillis: 616, counts: { input: 999, emit: 999, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465713000|31, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1001, emit: 1001, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 999, emit: 999, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:221 locks:{ Global: { acquireCount: { r: 96, w: 67, W: 21 }, acquireWaitCount: { w: 5, W: 1 }, timeAcquiringMicros: { w: 60238, W: 3274 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 11, W: 2 }, timeAcquiringMicros: { w: 83120, W: 3601 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 252ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.571-0400 m31100| 2015-07-09T14:15:13.569-0400 I COMMAND [conn15] CMD: drop db55.tmp.mrs.coll55_1436465712_47 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.571-0400 m31201| 2015-07-09T14:15:13.571-0400 I COMMAND [repl writer worker 7] CMD: drop db55.tmp.mrs.coll55_1436465712_48 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.572-0400 m31202| 2015-07-09T14:15:13.571-0400 I COMMAND [repl writer worker 7] CMD: drop db55.tmp.mrs.coll55_1436465712_48 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.575-0400 m31200| 2015-07-09T14:15:13.575-0400 I COMMAND [conn80] CMD: drop db55.tmp.mr.coll55_145 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.576-0400 m31200| 2015-07-09T14:15:13.576-0400 I COMMAND [conn18] CMD: drop db55.tmp.mrs.coll55_1436465712_47 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.576-0400 m31102| 2015-07-09T14:15:13.576-0400 I COMMAND [repl writer worker 11] CMD: drop db55.tmp.mr.coll55_150 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.577-0400 m31101| 2015-07-09T14:15:13.576-0400 I COMMAND [repl writer worker 3] CMD: drop db55.tmp.mr.coll55_150 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.578-0400 m31100| 2015-07-09T14:15:13.578-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_155 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.578-0400 m31102| 2015-07-09T14:15:13.578-0400 I COMMAND [repl writer worker 1] CMD: drop db55.tmp.mrs.coll55_1436465712_48 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.581-0400 m31101| 2015-07-09T14:15:13.581-0400 I COMMAND [repl writer worker 12] CMD: drop db55.tmp.mrs.coll55_1436465712_48 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.584-0400 m31102| 2015-07-09T14:15:13.584-0400 I COMMAND [repl writer worker 0] CMD: drop db55.tmp.mrs.coll55_1436465712_47 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.585-0400 m31101| 2015-07-09T14:15:13.585-0400 I COMMAND [repl writer worker 11] CMD: drop db55.tmp.mrs.coll55_1436465712_47 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.596-0400 m31201| 2015-07-09T14:15:13.596-0400 I COMMAND [repl writer worker 5] CMD: drop db55.tmp.mrs.coll55_1436465712_47 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.598-0400 m31202| 2015-07-09T14:15:13.598-0400 I COMMAND [repl writer worker 2] CMD: drop db55.tmp.mrs.coll55_1436465712_47 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.616-0400 m31200| 2015-07-09T14:15:13.615-0400 I COMMAND [conn41] CMD: drop db55.tmp.mr.coll55_146 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.619-0400 m31100| 2015-07-09T14:15:13.617-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_156 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.697-0400 m31100| 2015-07-09T14:15:13.696-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_152 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.703-0400 m31100| 2015-07-09T14:15:13.703-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_152 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.711-0400 m31101| 2015-07-09T14:15:13.709-0400 I COMMAND [repl writer worker 13] CMD: drop db55.tmp.mr.coll55_152 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.711-0400 m31100| 2015-07-09T14:15:13.710-0400 I COMMAND [conn45] command db55.map_reduce_reduce3 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.711-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.711-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.711-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.711-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.712-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.713-0400 m31100| }, out: { reduce: "map_reduce_reduce3" } }, inputDB: "db55", shardedOutputCollection: "tmp.mrs.coll55_1436465712_48", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll55_1436465712_48", timeMillis: 811, counts: { input: 1001, emit: 1001, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465713000|99, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll55_1436465712_48", timeMillis: 615, counts: { input: 999, emit: 999, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465713000|71, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1001, emit: 1001, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 999, emit: 999, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:221 locks:{ Global: { acquireCount: { r: 96, w: 67, W: 21 }, acquireWaitCount: { w: 3, W: 1 }, timeAcquiringMicros: { w: 68468, W: 37248 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 14, W: 3 }, timeAcquiringMicros: { w: 82547, W: 8856 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 311ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.713-0400 m31100| 2015-07-09T14:15:13.710-0400 I COMMAND [conn15] CMD: drop db55.tmp.mrs.coll55_1436465712_48 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.713-0400 m31102| 2015-07-09T14:15:13.711-0400 I COMMAND [repl writer worker 7] CMD: drop db55.tmp.mr.coll55_152 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.721-0400 m31200| 2015-07-09T14:15:13.721-0400 I COMMAND [conn18] CMD: drop db55.tmp.mrs.coll55_1436465712_48 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.741-0400 m31200| 2015-07-09T14:15:13.740-0400 I COMMAND [conn32] CMD: drop db55.tmp.mr.coll55_147 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.774-0400 m31100| 2015-07-09T14:15:13.774-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_153 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.790-0400 m31100| 2015-07-09T14:15:13.790-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_157 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.790-0400 m31100| 2015-07-09T14:15:13.790-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_153 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.793-0400 m31101| 2015-07-09T14:15:13.792-0400 I COMMAND [repl writer worker 14] CMD: drop db55.tmp.mr.coll55_153 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.795-0400 m31102| 2015-07-09T14:15:13.794-0400 I COMMAND [repl writer worker 14] CMD: drop db55.tmp.mr.coll55_153 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.823-0400 m31200| 2015-07-09T14:15:13.823-0400 I COMMAND [conn80] CMD: drop db55.tmp.mrs.coll55_1436465713_51 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.827-0400 m31200| 2015-07-09T14:15:13.826-0400 I COMMAND [conn80] CMD: drop db55.tmp.mr.coll55_145 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.827-0400 m31200| 2015-07-09T14:15:13.827-0400 I COMMAND [conn80] CMD: drop db55.tmp.mr.coll55_145 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.836-0400 m31100| 2015-07-09T14:15:13.836-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_154 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.838-0400 m31100| 2015-07-09T14:15:13.837-0400 I COMMAND [conn185] command db55.map_reduce_reduce2 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.842-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.842-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.843-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.843-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.843-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.846-0400 m31100| }, out: { reduce: "map_reduce_reduce2" } }, inputDB: "db55", shardedOutputCollection: "tmp.mrs.coll55_1436465712_49", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll55_1436465712_49", timeMillis: 814, counts: { input: 1001, emit: 1001, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465713000|103, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll55_1436465712_49", timeMillis: 657, counts: { input: 999, emit: 999, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465713000|106, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1001, emit: 1001, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 999, emit: 999, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:221 locks:{ Global: { acquireCount: { r: 96, w: 67, W: 21 }, acquireWaitCount: { w: 7, W: 1 }, timeAcquiringMicros: { w: 182643, W: 24295 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 14, W: 3 }, timeAcquiringMicros: { w: 88732, W: 30053 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 432ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.846-0400 m31100| 2015-07-09T14:15:13.838-0400 I COMMAND [conn39] CMD: drop db55.tmp.mrs.coll55_1436465712_49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.846-0400 m31200| 2015-07-09T14:15:13.841-0400 I COMMAND [conn80] CMD: drop db55.tmp.mr.coll55_145 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.846-0400 m31100| 2015-07-09T14:15:13.843-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_154 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.850-0400 m31200| 2015-07-09T14:15:13.848-0400 I COMMAND [conn80] command db55.tmp.mrs.coll55_1436465713_51 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.850-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.850-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.850-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.851-0400 m31200| values...., out: "tmp.mrs.coll55_1436465713_51", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 44 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { w: 13, R: 5, W: 4 }, timeAcquiringMicros: { w: 77866, R: 11898, W: 18787 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 273ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.856-0400 m31102| 2015-07-09T14:15:13.856-0400 I COMMAND [repl writer worker 6] CMD: drop db55.tmp.mr.coll55_154 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.857-0400 m31101| 2015-07-09T14:15:13.856-0400 I COMMAND [repl writer worker 0] CMD: drop db55.tmp.mr.coll55_154 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.859-0400 m31200| 2015-07-09T14:15:13.858-0400 I COMMAND [conn65] CMD: drop db55.tmp.mrs.coll55_1436465712_49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.859-0400 m31100| 2015-07-09T14:15:13.858-0400 I COMMAND [conn176] command db55.map_reduce_reduce4 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.860-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.860-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.860-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.860-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.860-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.861-0400 m31100| }, out: { reduce: "map_reduce_reduce4" } }, inputDB: "db55", shardedOutputCollection: "tmp.mrs.coll55_1436465712_50", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll55_1436465712_50", timeMillis: 816, counts: { input: 1001, emit: 1001, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465713000|136, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll55_1436465712_50", timeMillis: 594, counts: { input: 999, emit: 999, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465713000|105, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1001, emit: 1001, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 999, emit: 999, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:221 locks:{ Global: { acquireCount: { r: 96, w: 67, W: 21 }, acquireWaitCount: { w: 8, W: 1 }, timeAcquiringMicros: { w: 170948, W: 15294 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 15, W: 3 }, timeAcquiringMicros: { w: 88460, W: 15958 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 395ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.861-0400 m31100| 2015-07-09T14:15:13.859-0400 I COMMAND [conn39] CMD: drop db55.tmp.mrs.coll55_1436465712_50 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.865-0400 m31101| 2015-07-09T14:15:13.864-0400 I COMMAND [repl writer worker 3] CMD: drop db55.tmp.mrs.coll55_1436465712_49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.868-0400 m31200| 2015-07-09T14:15:13.867-0400 I COMMAND [conn65] CMD: drop db55.tmp.mrs.coll55_1436465712_50 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.871-0400 m31202| 2015-07-09T14:15:13.871-0400 I COMMAND [repl writer worker 7] CMD: drop db55.tmp.mrs.coll55_1436465712_49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.871-0400 m31102| 2015-07-09T14:15:13.871-0400 I COMMAND [repl writer worker 0] CMD: drop db55.tmp.mrs.coll55_1436465712_49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.872-0400 m31201| 2015-07-09T14:15:13.871-0400 I COMMAND [repl writer worker 4] CMD: drop db55.tmp.mrs.coll55_1436465712_49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.875-0400 m31200| 2015-07-09T14:15:13.874-0400 I COMMAND [conn52] CMD: drop db55.tmp.mr.coll55_148 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.876-0400 m31201| 2015-07-09T14:15:13.876-0400 I COMMAND [repl writer worker 0] CMD: drop db55.tmp.mrs.coll55_1436465712_50 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.877-0400 m31202| 2015-07-09T14:15:13.877-0400 I COMMAND [repl writer worker 2] CMD: drop db55.tmp.mrs.coll55_1436465712_50 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.879-0400 m31102| 2015-07-09T14:15:13.879-0400 I COMMAND [repl writer worker 14] CMD: drop db55.tmp.mrs.coll55_1436465712_50 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.893-0400 m31100| 2015-07-09T14:15:13.892-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_158 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.894-0400 m31200| 2015-07-09T14:15:13.893-0400 I COMMAND [conn37] CMD: drop db55.tmp.mr.coll55_149 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.901-0400 m31100| 2015-07-09T14:15:13.900-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_159 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.929-0400 m31101| 2015-07-09T14:15:13.928-0400 I COMMAND [repl writer worker 14] CMD: drop db55.tmp.mrs.coll55_1436465712_50 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.963-0400 m31200| 2015-07-09T14:15:13.963-0400 I COMMAND [conn41] CMD: drop db55.tmp.mrs.coll55_1436465713_49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.968-0400 m31200| 2015-07-09T14:15:13.968-0400 I COMMAND [conn41] CMD: drop db55.tmp.mr.coll55_146 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.968-0400 m31200| 2015-07-09T14:15:13.968-0400 I COMMAND [conn41] CMD: drop db55.tmp.mr.coll55_146 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:13.995-0400 m31200| 2015-07-09T14:15:13.995-0400 I COMMAND [conn41] CMD: drop db55.tmp.mr.coll55_146 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.019-0400 m31200| 2015-07-09T14:15:14.017-0400 I COMMAND [conn41] command db55.tmp.mrs.coll55_1436465713_49 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.019-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.019-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.019-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.020-0400 m31200| values...., out: "tmp.mrs.coll55_1436465713_49", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 3949, W: 47319 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 5, w: 11, R: 5, W: 7 }, timeAcquiringMicros: { r: 24022, w: 58097, R: 12850, W: 64710 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 402ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.065-0400 m31200| 2015-07-09T14:15:14.065-0400 I COMMAND [conn32] CMD: drop db55.tmp.mrs.coll55_1436465713_50 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.079-0400 m31200| 2015-07-09T14:15:14.077-0400 I COMMAND [conn32] CMD: drop db55.tmp.mr.coll55_147 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.079-0400 m31200| 2015-07-09T14:15:14.077-0400 I COMMAND [conn32] CMD: drop db55.tmp.mr.coll55_147 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.079-0400 m31200| 2015-07-09T14:15:14.078-0400 I COMMAND [conn32] CMD: drop db55.tmp.mr.coll55_147 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.091-0400 m31200| 2015-07-09T14:15:14.089-0400 I COMMAND [conn32] command db55.tmp.mrs.coll55_1436465713_50 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.091-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.091-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.091-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.092-0400 m31200| values...., out: "tmp.mrs.coll55_1436465713_50", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 3 }, timeAcquiringMicros: { r: 1870, w: 23535 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { w: 13, R: 10, W: 7 }, timeAcquiringMicros: { w: 122671, R: 18398, W: 18060 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 352ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.117-0400 m31200| 2015-07-09T14:15:14.116-0400 I COMMAND [conn52] CMD: drop db55.tmp.mrs.coll55_1436465713_52 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.122-0400 m31200| 2015-07-09T14:15:14.122-0400 I COMMAND [conn52] CMD: drop db55.tmp.mr.coll55_148 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.122-0400 m31200| 2015-07-09T14:15:14.122-0400 I COMMAND [conn52] CMD: drop db55.tmp.mr.coll55_148 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.123-0400 m31200| 2015-07-09T14:15:14.123-0400 I COMMAND [conn52] CMD: drop db55.tmp.mr.coll55_148 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.124-0400 m31200| 2015-07-09T14:15:14.123-0400 I COMMAND [conn52] command db55.tmp.mrs.coll55_1436465713_52 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.124-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.124-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.124-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.127-0400 m31200| values...., out: "tmp.mrs.coll55_1436465713_52", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 3 }, timeAcquiringMicros: { r: 7996 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 1, w: 5, R: 11, W: 5 }, timeAcquiringMicros: { r: 3205, w: 11419, R: 43902, W: 11008 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 254ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.136-0400 m31200| 2015-07-09T14:15:14.135-0400 I COMMAND [conn37] CMD: drop db55.tmp.mrs.coll55_1436465713_53 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.143-0400 m31200| 2015-07-09T14:15:14.142-0400 I COMMAND [conn37] CMD: drop db55.tmp.mr.coll55_149 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.143-0400 m31200| 2015-07-09T14:15:14.143-0400 I COMMAND [conn37] CMD: drop db55.tmp.mr.coll55_149 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.144-0400 m31200| 2015-07-09T14:15:14.144-0400 I COMMAND [conn37] CMD: drop db55.tmp.mr.coll55_149 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.144-0400 m31200| 2015-07-09T14:15:14.144-0400 I COMMAND [conn37] command db55.tmp.mrs.coll55_1436465713_53 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.145-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.145-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.145-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.146-0400 m31200| values...., out: "tmp.mrs.coll55_1436465713_53", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 3 }, timeAcquiringMicros: { r: 10059, w: 9766 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 2, w: 2, R: 10, W: 5 }, timeAcquiringMicros: { r: 7117, w: 25613, R: 15143, W: 27607 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 256ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.210-0400 m31100| 2015-07-09T14:15:14.210-0400 I COMMAND [conn182] CMD: drop db55.tmp.mrs.coll55_1436465713_49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.215-0400 m31100| 2015-07-09T14:15:14.214-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_156 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.215-0400 m31100| 2015-07-09T14:15:14.214-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_156 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.224-0400 m31100| 2015-07-09T14:15:14.223-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_156 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.226-0400 m31100| 2015-07-09T14:15:14.226-0400 I COMMAND [conn50] CMD: drop db55.tmp.mrs.coll55_1436465713_51 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.226-0400 m31100| 2015-07-09T14:15:14.226-0400 I COMMAND [conn182] command db55.tmp.mrs.coll55_1436465713_49 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.227-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.227-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.227-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.228-0400 m31100| values...., out: "tmp.mrs.coll55_1436465713_49", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:212 locks:{ Global: { acquireCount: { r: 157, w: 74, W: 3 }, acquireWaitCount: { r: 3, w: 4, W: 1 }, timeAcquiringMicros: { r: 77943, w: 55498, W: 234 } }, Database: { acquireCount: { r: 26, w: 66, R: 14, W: 11 }, acquireWaitCount: { w: 15, R: 13, W: 9 }, timeAcquiringMicros: { w: 133300, R: 115471, W: 24472 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 611ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.228-0400 m31100| 2015-07-09T14:15:14.228-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_160 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.235-0400 m31100| 2015-07-09T14:15:14.234-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_155 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.236-0400 m31100| 2015-07-09T14:15:14.234-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_155 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.238-0400 m31100| 2015-07-09T14:15:14.237-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_155 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.258-0400 m31100| 2015-07-09T14:15:14.258-0400 I COMMAND [conn50] command db55.tmp.mrs.coll55_1436465713_51 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.259-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.259-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.259-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.260-0400 m31100| values...., out: "tmp.mrs.coll55_1436465713_51", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 6, w: 1, W: 1 }, timeAcquiringMicros: { r: 116800, w: 4702, W: 1369 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 3, w: 21, R: 11, W: 9 }, timeAcquiringMicros: { r: 7915, w: 282666, R: 55073, W: 27887 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 682ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.260-0400 m31100| 2015-07-09T14:15:14.259-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_161 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.301-0400 m31100| 2015-07-09T14:15:14.301-0400 I COMMAND [conn45] CMD: drop db55.tmp.mrs.coll55_1436465713_50 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.307-0400 m31100| 2015-07-09T14:15:14.306-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_157 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.307-0400 m31100| 2015-07-09T14:15:14.306-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_157 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.308-0400 m31100| 2015-07-09T14:15:14.308-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_157 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.311-0400 m31100| 2015-07-09T14:15:14.309-0400 I COMMAND [conn45] command db55.tmp.mrs.coll55_1436465713_50 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.311-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.311-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.311-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.312-0400 m31100| values...., out: "tmp.mrs.coll55_1436465713_50", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:212 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 3, W: 1 }, timeAcquiringMicros: { r: 33463, w: 60167, W: 1255 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 7, w: 9, R: 12, W: 9 }, timeAcquiringMicros: { r: 58466, w: 42540, R: 90030, W: 40255 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 569ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.312-0400 m31100| 2015-07-09T14:15:14.311-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_162 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.332-0400 m31100| 2015-07-09T14:15:14.332-0400 I COMMAND [conn185] CMD: drop db55.tmp.mrs.coll55_1436465713_52 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.338-0400 m31100| 2015-07-09T14:15:14.338-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_158 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.339-0400 m31100| 2015-07-09T14:15:14.338-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_158 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.340-0400 m31100| 2015-07-09T14:15:14.340-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_158 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.344-0400 m31100| 2015-07-09T14:15:14.343-0400 I COMMAND [conn185] command db55.tmp.mrs.coll55_1436465713_52 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.344-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.344-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.345-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.345-0400 m31100| values...., out: "tmp.mrs.coll55_1436465713_52", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:212 locks:{ Global: { acquireCount: { r: 157, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 2, W: 1 }, timeAcquiringMicros: { r: 3086, w: 9444, W: 235 } }, Database: { acquireCount: { r: 26, w: 66, R: 14, W: 11 }, acquireWaitCount: { r: 4, w: 12, R: 14, W: 9 }, timeAcquiringMicros: { r: 20169, w: 111680, R: 27088, W: 77410 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 474ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.346-0400 m31100| 2015-07-09T14:15:14.344-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_163 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.378-0400 m31100| 2015-07-09T14:15:14.377-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_160 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.383-0400 m31100| 2015-07-09T14:15:14.382-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_160 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.385-0400 m31100| 2015-07-09T14:15:14.385-0400 I COMMAND [conn182] command db55.map_reduce_reduce1 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.385-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.385-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.386-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.386-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.386-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.387-0400 m31100| }, out: { reduce: "map_reduce_reduce1" } }, inputDB: "db55", shardedOutputCollection: "tmp.mrs.coll55_1436465713_49", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll55_1436465713_49", timeMillis: 600, counts: { input: 1001, emit: 1001, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465714000|41, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll55_1436465713_49", timeMillis: 353, counts: { input: 999, emit: 999, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465713000|156, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1001, emit: 1001, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 999, emit: 999, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:221 locks:{ Global: { acquireCount: { r: 96, w: 67, W: 21 }, acquireWaitCount: { w: 3, W: 1 }, timeAcquiringMicros: { w: 19320, W: 1538 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 9, W: 4 }, timeAcquiringMicros: { w: 46151, W: 3252 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 156ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.387-0400 m31100| 2015-07-09T14:15:14.385-0400 I COMMAND [conn15] CMD: drop db55.tmp.mrs.coll55_1436465713_49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.391-0400 m31101| 2015-07-09T14:15:14.390-0400 I COMMAND [repl writer worker 9] CMD: drop db55.tmp.mr.coll55_160 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.393-0400 m31102| 2015-07-09T14:15:14.393-0400 I COMMAND [repl writer worker 10] CMD: drop db55.tmp.mr.coll55_160 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.401-0400 m31200| 2015-07-09T14:15:14.401-0400 I COMMAND [conn18] CMD: drop db55.tmp.mrs.coll55_1436465713_49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.405-0400 m31201| 2015-07-09T14:15:14.405-0400 I COMMAND [repl writer worker 2] CMD: drop db55.tmp.mrs.coll55_1436465713_49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.406-0400 m31202| 2015-07-09T14:15:14.405-0400 I COMMAND [repl writer worker 5] CMD: drop db55.tmp.mrs.coll55_1436465713_49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.437-0400 m31100| 2015-07-09T14:15:14.437-0400 I COMMAND [conn176] CMD: drop db55.tmp.mrs.coll55_1436465713_53 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.437-0400 m31100| 2015-07-09T14:15:14.437-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_161 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.441-0400 m31100| 2015-07-09T14:15:14.440-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_159 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.442-0400 m31100| 2015-07-09T14:15:14.441-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_159 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.444-0400 m31102| 2015-07-09T14:15:14.443-0400 I COMMAND [repl writer worker 12] CMD: drop db55.tmp.mrs.coll55_1436465713_49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.445-0400 m31100| 2015-07-09T14:15:14.444-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_161 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.445-0400 m31100| 2015-07-09T14:15:14.445-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_159 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.447-0400 m31200| 2015-07-09T14:15:14.447-0400 I COMMAND [conn41] CMD: drop db55.tmp.mr.coll55_150 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.450-0400 m31100| 2015-07-09T14:15:14.449-0400 I COMMAND [conn176] command db55.tmp.mrs.coll55_1436465713_53 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.450-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.450-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.450-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.451-0400 m31100| values...., out: "tmp.mrs.coll55_1436465713_53", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:212 locks:{ Global: { acquireCount: { r: 157, w: 74, W: 3 }, acquireWaitCount: { r: 4, w: 1, W: 1 }, timeAcquiringMicros: { r: 47404, w: 4701, W: 34926 } }, Database: { acquireCount: { r: 26, w: 66, R: 14, W: 11 }, acquireWaitCount: { r: 8, w: 16, R: 14, W: 8 }, timeAcquiringMicros: { r: 20472, w: 121539, R: 47967, W: 39634 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 561ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.451-0400 m31100| 2015-07-09T14:15:14.450-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_165 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.452-0400 m31100| 2015-07-09T14:15:14.451-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_164 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.453-0400 m31100| 2015-07-09T14:15:14.452-0400 I COMMAND [conn50] command db55.map_reduce_reduce0 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.453-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.453-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.453-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.454-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.454-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.456-0400 m31100| }, out: { reduce: "map_reduce_reduce0" } }, inputDB: "db55", shardedOutputCollection: "tmp.mrs.coll55_1436465713_51", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll55_1436465713_51", timeMillis: 659, counts: { input: 1001, emit: 1001, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465714000|44, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll55_1436465713_51", timeMillis: 252, counts: { input: 999, emit: 999, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465713000|132, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1001, emit: 1001, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 999, emit: 999, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:221 locks:{ Global: { acquireCount: { r: 96, w: 67, W: 21 }, acquireWaitCount: { w: 4, W: 1 }, timeAcquiringMicros: { w: 49527, W: 193 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 7, W: 4 }, timeAcquiringMicros: { w: 40260, W: 6313 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 193ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.456-0400 m31100| 2015-07-09T14:15:14.455-0400 I COMMAND [conn39] CMD: drop db55.tmp.mrs.coll55_1436465713_51 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.460-0400 m31101| 2015-07-09T14:15:14.459-0400 I COMMAND [repl writer worker 12] CMD: drop db55.tmp.mrs.coll55_1436465713_49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.468-0400 m31102| 2015-07-09T14:15:14.468-0400 I COMMAND [repl writer worker 7] CMD: drop db55.tmp.mr.coll55_161 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.487-0400 m31101| 2015-07-09T14:15:14.487-0400 I COMMAND [repl writer worker 5] CMD: drop db55.tmp.mr.coll55_161 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.502-0400 m31200| 2015-07-09T14:15:14.502-0400 I COMMAND [conn65] CMD: drop db55.tmp.mrs.coll55_1436465713_51 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.523-0400 m31201| 2015-07-09T14:15:14.522-0400 I COMMAND [repl writer worker 7] CMD: drop db55.tmp.mrs.coll55_1436465713_51 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.523-0400 m31202| 2015-07-09T14:15:14.523-0400 I COMMAND [repl writer worker 10] CMD: drop db55.tmp.mrs.coll55_1436465713_51 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.526-0400 m31101| 2015-07-09T14:15:14.525-0400 I COMMAND [repl writer worker 1] CMD: drop db55.tmp.mrs.coll55_1436465713_51 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.530-0400 m31102| 2015-07-09T14:15:14.530-0400 I COMMAND [repl writer worker 0] CMD: drop db55.tmp.mrs.coll55_1436465713_51 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.536-0400 m31200| 2015-07-09T14:15:14.536-0400 I COMMAND [conn80] CMD: drop db55.tmp.mr.coll55_151 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.546-0400 m31100| 2015-07-09T14:15:14.546-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_166 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.636-0400 m31200| 2015-07-09T14:15:14.635-0400 I COMMAND [conn41] CMD: drop db55.tmp.mrs.coll55_1436465714_51 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.639-0400 m31200| 2015-07-09T14:15:14.638-0400 I COMMAND [conn41] CMD: drop db55.tmp.mr.coll55_150 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.639-0400 m31200| 2015-07-09T14:15:14.638-0400 I COMMAND [conn41] CMD: drop db55.tmp.mr.coll55_150 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.640-0400 m31100| 2015-07-09T14:15:14.640-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_162 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.644-0400 m31200| 2015-07-09T14:15:14.640-0400 I COMMAND [conn41] CMD: drop db55.tmp.mr.coll55_150 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.649-0400 m31100| 2015-07-09T14:15:14.648-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_162 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.649-0400 m31101| 2015-07-09T14:15:14.649-0400 I COMMAND [repl writer worker 6] CMD: drop db55.tmp.mr.coll55_162 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.651-0400 m31200| 2015-07-09T14:15:14.650-0400 I COMMAND [conn41] command db55.tmp.mrs.coll55_1436465714_51 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.651-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.651-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.651-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.652-0400 m31200| values...., out: "tmp.mrs.coll55_1436465714_51", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { w: 1, R: 4, W: 2 }, timeAcquiringMicros: { w: 1449, R: 18955, W: 9800 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 203ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.652-0400 m31102| 2015-07-09T14:15:14.652-0400 I COMMAND [repl writer worker 1] CMD: drop db55.tmp.mr.coll55_162 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.662-0400 m31100| 2015-07-09T14:15:14.661-0400 I COMMAND [conn45] command db55.map_reduce_reduce3 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.663-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.663-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.663-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.663-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.663-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.665-0400 m31100| }, out: { reduce: "map_reduce_reduce3" } }, inputDB: "db55", shardedOutputCollection: "tmp.mrs.coll55_1436465713_50", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll55_1436465713_50", timeMillis: 566, counts: { input: 1001, emit: 1001, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465714000|100, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll55_1436465713_50", timeMillis: 339, counts: { input: 999, emit: 999, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465714000|21, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1001, emit: 1001, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 999, emit: 999, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:221 locks:{ Global: { acquireCount: { r: 96, w: 67, W: 21 }, acquireWaitCount: { w: 3, W: 1 }, timeAcquiringMicros: { w: 77911, W: 33587 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 16, W: 4 }, timeAcquiringMicros: { w: 139474, W: 14089 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 350ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.665-0400 m31100| 2015-07-09T14:15:14.662-0400 I COMMAND [conn15] CMD: drop db55.tmp.mrs.coll55_1436465713_50 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.670-0400 m31200| 2015-07-09T14:15:14.669-0400 I COMMAND [conn18] CMD: drop db55.tmp.mrs.coll55_1436465713_50 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.672-0400 m31102| 2015-07-09T14:15:14.671-0400 I COMMAND [repl writer worker 14] CMD: drop db55.tmp.mrs.coll55_1436465713_50 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.673-0400 m31101| 2015-07-09T14:15:14.672-0400 I COMMAND [repl writer worker 13] CMD: drop db55.tmp.mrs.coll55_1436465713_50 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.677-0400 m31202| 2015-07-09T14:15:14.677-0400 I COMMAND [repl writer worker 7] CMD: drop db55.tmp.mrs.coll55_1436465713_50 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.678-0400 m31201| 2015-07-09T14:15:14.677-0400 I COMMAND [repl writer worker 5] CMD: drop db55.tmp.mrs.coll55_1436465713_50 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.686-0400 m31200| 2015-07-09T14:15:14.685-0400 I COMMAND [conn32] CMD: drop db55.tmp.mr.coll55_152 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.698-0400 m31100| 2015-07-09T14:15:14.697-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_167 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.751-0400 m31100| 2015-07-09T14:15:14.751-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_163 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.753-0400 m31100| 2015-07-09T14:15:14.753-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_163 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.758-0400 m31102| 2015-07-09T14:15:14.757-0400 I COMMAND [repl writer worker 10] CMD: drop db55.tmp.mr.coll55_163 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.760-0400 m31200| 2015-07-09T14:15:14.760-0400 I COMMAND [conn80] CMD: drop db55.tmp.mrs.coll55_1436465714_54 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.761-0400 m31101| 2015-07-09T14:15:14.761-0400 I COMMAND [repl writer worker 7] CMD: drop db55.tmp.mr.coll55_163 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.764-0400 m31200| 2015-07-09T14:15:14.763-0400 I COMMAND [conn80] CMD: drop db55.tmp.mr.coll55_151 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.765-0400 m31200| 2015-07-09T14:15:14.764-0400 I COMMAND [conn80] CMD: drop db55.tmp.mr.coll55_151 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.765-0400 m31200| 2015-07-09T14:15:14.765-0400 I COMMAND [conn80] CMD: drop db55.tmp.mr.coll55_151 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.766-0400 m31200| 2015-07-09T14:15:14.765-0400 I COMMAND [conn80] command db55.tmp.mrs.coll55_1436465714_54 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.766-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.766-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.766-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.767-0400 m31200| values...., out: "tmp.mrs.coll55_1436465714_54", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 2922, W: 3381 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { w: 12, R: 3, W: 1 }, timeAcquiringMicros: { w: 68922, R: 3448, W: 3849 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 229ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.775-0400 m31100| 2015-07-09T14:15:14.775-0400 I COMMAND [conn185] command db55.map_reduce_reduce2 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.776-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.776-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.776-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.776-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.776-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.778-0400 m31100| }, out: { reduce: "map_reduce_reduce2" } }, inputDB: "db55", shardedOutputCollection: "tmp.mrs.coll55_1436465713_52", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll55_1436465713_52", timeMillis: 470, counts: { input: 1001, emit: 1001, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465714000|131, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll55_1436465713_52", timeMillis: 253, counts: { input: 999, emit: 999, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465714000|42, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1001, emit: 1001, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 999, emit: 999, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:221 locks:{ Global: { acquireCount: { r: 96, w: 67, W: 21 }, acquireWaitCount: { w: 4, W: 1 }, timeAcquiringMicros: { w: 111806, W: 7843 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 21, W: 4 }, timeAcquiringMicros: { w: 180076, W: 26198 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 431ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.778-0400 m31100| 2015-07-09T14:15:14.777-0400 I COMMAND [conn39] CMD: drop db55.tmp.mrs.coll55_1436465713_52 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.790-0400 m31200| 2015-07-09T14:15:14.788-0400 I COMMAND [conn65] CMD: drop db55.tmp.mrs.coll55_1436465713_52 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.790-0400 m31102| 2015-07-09T14:15:14.789-0400 I COMMAND [repl writer worker 5] CMD: drop db55.tmp.mrs.coll55_1436465713_52 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.790-0400 m31101| 2015-07-09T14:15:14.790-0400 I COMMAND [repl writer worker 14] CMD: drop db55.tmp.mrs.coll55_1436465713_52 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.792-0400 m31201| 2015-07-09T14:15:14.792-0400 I COMMAND [repl writer worker 0] CMD: drop db55.tmp.mrs.coll55_1436465713_52 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.792-0400 m31202| 2015-07-09T14:15:14.792-0400 I COMMAND [repl writer worker 11] CMD: drop db55.tmp.mrs.coll55_1436465713_52 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.807-0400 m31200| 2015-07-09T14:15:14.807-0400 I COMMAND [conn52] CMD: drop db55.tmp.mr.coll55_153 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.814-0400 m31100| 2015-07-09T14:15:14.812-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_168 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.847-0400 m31100| 2015-07-09T14:15:14.846-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_165 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.870-0400 m31100| 2015-07-09T14:15:14.869-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_165 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.870-0400 m31102| 2015-07-09T14:15:14.870-0400 I COMMAND [repl writer worker 11] CMD: drop db55.tmp.mr.coll55_165 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.874-0400 m31101| 2015-07-09T14:15:14.874-0400 I COMMAND [repl writer worker 9] CMD: drop db55.tmp.mr.coll55_165 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.879-0400 m31200| 2015-07-09T14:15:14.878-0400 I COMMAND [conn32] CMD: drop db55.tmp.mrs.coll55_1436465714_52 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.883-0400 m31100| 2015-07-09T14:15:14.882-0400 I COMMAND [conn176] command db55.map_reduce_reduce4 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.883-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.883-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.883-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.884-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.884-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.886-0400 m31100| }, out: { reduce: "map_reduce_reduce4" } }, inputDB: "db55", shardedOutputCollection: "tmp.mrs.coll55_1436465713_53", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll55_1436465713_53", timeMillis: 553, counts: { input: 1001, emit: 1001, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465714000|203, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll55_1436465713_53", timeMillis: 255, counts: { input: 999, emit: 999, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465714000|63, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1001, emit: 1001, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 999, emit: 999, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:221 locks:{ Global: { acquireCount: { r: 96, w: 67, W: 21 }, acquireWaitCount: { w: 3, W: 1 }, timeAcquiringMicros: { w: 77623, W: 15453 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 20, W: 4 }, timeAcquiringMicros: { w: 184703, W: 63438 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 431ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.886-0400 m31100| 2015-07-09T14:15:14.883-0400 I COMMAND [conn39] CMD: drop db55.tmp.mrs.coll55_1436465713_53 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.886-0400 m31200| 2015-07-09T14:15:14.884-0400 I COMMAND [conn32] CMD: drop db55.tmp.mr.coll55_152 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.886-0400 m31200| 2015-07-09T14:15:14.884-0400 I COMMAND [conn32] CMD: drop db55.tmp.mr.coll55_152 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.889-0400 m31200| 2015-07-09T14:15:14.889-0400 I COMMAND [conn65] CMD: drop db55.tmp.mrs.coll55_1436465713_53 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.890-0400 m31200| 2015-07-09T14:15:14.890-0400 I COMMAND [conn32] CMD: drop db55.tmp.mr.coll55_152 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.892-0400 m31101| 2015-07-09T14:15:14.891-0400 I COMMAND [repl writer worker 2] CMD: drop db55.tmp.mrs.coll55_1436465713_53 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.898-0400 m31201| 2015-07-09T14:15:14.898-0400 I COMMAND [repl writer worker 8] CMD: drop db55.tmp.mrs.coll55_1436465713_53 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.898-0400 m31202| 2015-07-09T14:15:14.898-0400 I COMMAND [repl writer worker 5] CMD: drop db55.tmp.mrs.coll55_1436465713_53 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.900-0400 m31102| 2015-07-09T14:15:14.900-0400 I COMMAND [repl writer worker 12] CMD: drop db55.tmp.mrs.coll55_1436465713_53 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.905-0400 m31200| 2015-07-09T14:15:14.904-0400 I COMMAND [conn32] command db55.tmp.mrs.coll55_1436465714_52 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.905-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.905-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.905-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.905-0400 m31200| values...., out: "tmp.mrs.coll55_1436465714_52", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { w: 6, R: 7, W: 7 }, timeAcquiringMicros: { w: 60513, R: 4285, W: 14680 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 219ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.911-0400 m31200| 2015-07-09T14:15:14.911-0400 I COMMAND [conn37] CMD: drop db55.tmp.mr.coll55_154 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:14.914-0400 m31100| 2015-07-09T14:15:14.914-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_169 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.016-0400 m31200| 2015-07-09T14:15:15.016-0400 I COMMAND [conn52] CMD: drop db55.tmp.mrs.coll55_1436465714_55 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.128-0400 m31200| 2015-07-09T14:15:15.021-0400 I COMMAND [conn52] CMD: drop db55.tmp.mr.coll55_153 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.128-0400 m31200| 2015-07-09T14:15:15.021-0400 I COMMAND [conn52] CMD: drop db55.tmp.mr.coll55_153 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.128-0400 m31200| 2015-07-09T14:15:15.022-0400 I COMMAND [conn52] CMD: drop db55.tmp.mr.coll55_153 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.128-0400 m31200| 2015-07-09T14:15:15.033-0400 I COMMAND [conn52] command db55.tmp.mrs.coll55_1436465714_55 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.128-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.128-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.128-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.129-0400 m31200| values...., out: "tmp.mrs.coll55_1436465714_55", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 5115, W: 7458 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { w: 2, R: 7, W: 3 }, timeAcquiringMicros: { w: 8875, R: 19714, W: 11287 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 227ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.129-0400 m31200| 2015-07-09T14:15:15.062-0400 I COMMAND [conn37] CMD: drop db55.tmp.mrs.coll55_1436465714_56 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.129-0400 m31200| 2015-07-09T14:15:15.068-0400 I COMMAND [conn37] CMD: drop db55.tmp.mr.coll55_154 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.129-0400 m31200| 2015-07-09T14:15:15.068-0400 I COMMAND [conn37] CMD: drop db55.tmp.mr.coll55_154 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.129-0400 m31200| 2015-07-09T14:15:15.070-0400 I COMMAND [conn37] CMD: drop db55.tmp.mr.coll55_154 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.129-0400 m31200| 2015-07-09T14:15:15.070-0400 I COMMAND [conn37] command db55.tmp.mrs.coll55_1436465714_56 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.130-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.130-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.130-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.130-0400 m31200| values...., out: "tmp.mrs.coll55_1436465714_56", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 5839 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { R: 3 }, timeAcquiringMicros: { R: 1324 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 159ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.130-0400 m31100| 2015-07-09T14:15:15.112-0400 I COMMAND [conn182] CMD: drop db55.tmp.mrs.coll55_1436465714_51 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.130-0400 m31100| 2015-07-09T14:15:15.118-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_164 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.130-0400 m31100| 2015-07-09T14:15:15.118-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_164 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.131-0400 m31100| 2015-07-09T14:15:15.120-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_164 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.131-0400 m31100| 2015-07-09T14:15:15.123-0400 I COMMAND [conn182] command db55.tmp.mrs.coll55_1436465714_51 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.131-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.131-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.131-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.132-0400 m31100| values...., out: "tmp.mrs.coll55_1436465714_51", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:212 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { r: 3, w: 1, W: 1 }, timeAcquiringMicros: { r: 48179, w: 35904, W: 6468 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 1, w: 32, R: 13, W: 9 }, timeAcquiringMicros: { r: 1899, w: 282602, R: 64805, W: 7985 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 676ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.132-0400 m31100| 2015-07-09T14:15:15.125-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_170 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.133-0400 m31100| 2015-07-09T14:15:15.131-0400 I COMMAND [conn50] CMD: drop db55.tmp.mrs.coll55_1436465714_54 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.137-0400 m31100| 2015-07-09T14:15:15.137-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_166 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.138-0400 m31100| 2015-07-09T14:15:15.137-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_166 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.158-0400 m31100| 2015-07-09T14:15:15.158-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_166 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.160-0400 m31100| 2015-07-09T14:15:15.160-0400 I COMMAND [conn50] command db55.tmp.mrs.coll55_1436465714_54 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.161-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.161-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.161-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.161-0400 m31100| values...., out: "tmp.mrs.coll55_1436465714_54", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:212 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { r: 6, w: 2, W: 1 }, timeAcquiringMicros: { r: 105052, w: 551, W: 7333 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 3, w: 30, R: 12, W: 7 }, timeAcquiringMicros: { r: 2848, w: 199692, R: 68412, W: 39497 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 624ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.162-0400 m31100| 2015-07-09T14:15:15.160-0400 I COMMAND [conn45] CMD: drop db55.tmp.mrs.coll55_1436465714_52 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.162-0400 m31100| 2015-07-09T14:15:15.161-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_171 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.173-0400 m31100| 2015-07-09T14:15:15.172-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_167 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.173-0400 m31100| 2015-07-09T14:15:15.172-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_167 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.173-0400 m31100| 2015-07-09T14:15:15.173-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_167 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.195-0400 m31100| 2015-07-09T14:15:15.194-0400 I COMMAND [conn45] command db55.tmp.mrs.coll55_1436465714_52 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.195-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.195-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.196-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.196-0400 m31100| values...., out: "tmp.mrs.coll55_1436465714_52", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 5, w: 4, W: 1 }, timeAcquiringMicros: { r: 32182, w: 53995, W: 612 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 7, w: 27, R: 11, W: 9 }, timeAcquiringMicros: { r: 3894, w: 129156, R: 49844, W: 61697 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 509ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.200-0400 m31100| 2015-07-09T14:15:15.199-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_172 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.228-0400 m31100| 2015-07-09T14:15:15.227-0400 I COMMAND [conn185] CMD: drop db55.tmp.mrs.coll55_1436465714_55 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.238-0400 m31100| 2015-07-09T14:15:15.238-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_168 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.239-0400 m31100| 2015-07-09T14:15:15.239-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_168 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.241-0400 m31100| 2015-07-09T14:15:15.240-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_168 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.243-0400 m31100| 2015-07-09T14:15:15.243-0400 I COMMAND [conn185] command db55.tmp.mrs.coll55_1436465714_55 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.243-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.243-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.244-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.245-0400 m31100| values...., out: "tmp.mrs.coll55_1436465714_55", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 3, w: 8, W: 1 }, timeAcquiringMicros: { r: 17143, w: 49733, W: 1617 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 5, w: 21, R: 12, W: 8 }, timeAcquiringMicros: { r: 42650, w: 61182, R: 48846, W: 48993 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 436ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.249-0400 m31100| 2015-07-09T14:15:15.249-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_173 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.270-0400 m31100| 2015-07-09T14:15:15.269-0400 I COMMAND [conn176] CMD: drop db55.tmp.mrs.coll55_1436465714_56 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.275-0400 m31100| 2015-07-09T14:15:15.275-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_169 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.276-0400 m31100| 2015-07-09T14:15:15.275-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_169 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.279-0400 m31100| 2015-07-09T14:15:15.279-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_169 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.280-0400 m31100| 2015-07-09T14:15:15.280-0400 I COMMAND [conn176] command db55.tmp.mrs.coll55_1436465714_56 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.281-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.281-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.282-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.283-0400 m31100| values...., out: "tmp.mrs.coll55_1436465714_56", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 1, W: 1 }, timeAcquiringMicros: { r: 13099, w: 12525, W: 91 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 4, w: 16, R: 12, W: 7 }, timeAcquiringMicros: { r: 16463, w: 100567, R: 27529, W: 45576 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 369ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.290-0400 m31100| 2015-07-09T14:15:15.290-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_174 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.345-0400 m31100| 2015-07-09T14:15:15.345-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_170 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.348-0400 m31100| 2015-07-09T14:15:15.348-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_170 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.350-0400 m31100| 2015-07-09T14:15:15.349-0400 I COMMAND [conn182] command db55.map_reduce_reduce1 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.350-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.350-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.350-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.350-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.350-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.351-0400 m31100| }, out: { reduce: "map_reduce_reduce1" } }, inputDB: "db55", shardedOutputCollection: "tmp.mrs.coll55_1436465714_51", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll55_1436465714_51", timeMillis: 671, counts: { input: 1001, emit: 1001, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465715000|50, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll55_1436465714_51", timeMillis: 191, counts: { input: 999, emit: 999, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465714000|88, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1001, emit: 1001, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 999, emit: 999, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:221 locks:{ Global: { acquireCount: { r: 96, w: 67, W: 21 }, acquireWaitCount: { w: 6 }, timeAcquiringMicros: { w: 40480 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 11, W: 4 }, timeAcquiringMicros: { w: 73480, W: 2420 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 223ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.351-0400 m31100| 2015-07-09T14:15:15.350-0400 I COMMAND [conn15] CMD: drop db55.tmp.mrs.coll55_1436465714_51 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.353-0400 m31200| 2015-07-09T14:15:15.353-0400 I COMMAND [conn18] CMD: drop db55.tmp.mrs.coll55_1436465714_51 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.353-0400 m31101| 2015-07-09T14:15:15.353-0400 I COMMAND [repl writer worker 1] CMD: drop db55.tmp.mr.coll55_170 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.354-0400 m31102| 2015-07-09T14:15:15.354-0400 I COMMAND [repl writer worker 3] CMD: drop db55.tmp.mr.coll55_170 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.358-0400 m31200| 2015-07-09T14:15:15.358-0400 I COMMAND [conn41] CMD: drop db55.tmp.mr.coll55_155 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.359-0400 m31101| 2015-07-09T14:15:15.358-0400 I COMMAND [repl writer worker 10] CMD: drop db55.tmp.mrs.coll55_1436465714_51 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.359-0400 m31202| 2015-07-09T14:15:15.358-0400 I COMMAND [repl writer worker 8] CMD: drop db55.tmp.mrs.coll55_1436465714_51 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.359-0400 m31100| 2015-07-09T14:15:15.359-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_175 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.362-0400 m31102| 2015-07-09T14:15:15.362-0400 I COMMAND [repl writer worker 6] CMD: drop db55.tmp.mrs.coll55_1436465714_51 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.393-0400 m31201| 2015-07-09T14:15:15.392-0400 I COMMAND [repl writer worker 12] CMD: drop db55.tmp.mrs.coll55_1436465714_51 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.395-0400 m31100| 2015-07-09T14:15:15.395-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_171 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.429-0400 m31100| 2015-07-09T14:15:15.428-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_172 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.433-0400 m31100| 2015-07-09T14:15:15.432-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_172 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.437-0400 m31100| 2015-07-09T14:15:15.437-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_171 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.438-0400 m31100| 2015-07-09T14:15:15.437-0400 I COMMAND [conn45] command db55.map_reduce_reduce3 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.438-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.439-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.439-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.439-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.439-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.440-0400 m31100| }, out: { reduce: "map_reduce_reduce3" } }, inputDB: "db55", shardedOutputCollection: "tmp.mrs.coll55_1436465714_52", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll55_1436465714_52", timeMillis: 487, counts: { input: 1001, emit: 1001, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465715000|67, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll55_1436465714_52", timeMillis: 199, counts: { input: 999, emit: 999, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465714000|134, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1001, emit: 1001, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 999, emit: 999, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:221 locks:{ Global: { acquireCount: { r: 96, w: 67, W: 21 }, acquireWaitCount: { w: 3, W: 1 }, timeAcquiringMicros: { w: 42792, W: 37338 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 7, W: 4 }, timeAcquiringMicros: { w: 33918, W: 5522 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 240ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.440-0400 m31100| 2015-07-09T14:15:15.438-0400 I COMMAND [conn15] CMD: drop db55.tmp.mrs.coll55_1436465714_52 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.443-0400 m31100| 2015-07-09T14:15:15.443-0400 I COMMAND [conn50] command db55.map_reduce_reduce0 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.443-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.444-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.444-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.444-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.444-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.445-0400 m31100| }, out: { reduce: "map_reduce_reduce0" } }, inputDB: "db55", shardedOutputCollection: "tmp.mrs.coll55_1436465714_54", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll55_1436465714_54", timeMillis: 601, counts: { input: 1001, emit: 1001, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465715000|60, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll55_1436465714_54", timeMillis: 228, counts: { input: 999, emit: 999, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465714000|111, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1001, emit: 1001, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 999, emit: 999, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:221 locks:{ Global: { acquireCount: { r: 96, w: 67, W: 21 }, acquireWaitCount: { w: 5, W: 1 }, timeAcquiringMicros: { w: 85737, W: 1287 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 11, W: 4 }, timeAcquiringMicros: { w: 58082, W: 12292 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 282ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.445-0400 m31100| 2015-07-09T14:15:15.443-0400 I COMMAND [conn39] CMD: drop db55.tmp.mrs.coll55_1436465714_54 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.446-0400 m31101| 2015-07-09T14:15:15.446-0400 I COMMAND [repl writer worker 11] CMD: drop db55.tmp.mr.coll55_172 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.446-0400 m31102| 2015-07-09T14:15:15.446-0400 I COMMAND [repl writer worker 9] CMD: drop db55.tmp.mr.coll55_172 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.451-0400 m31101| 2015-07-09T14:15:15.451-0400 I COMMAND [repl writer worker 15] CMD: drop db55.tmp.mr.coll55_171 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.453-0400 m31102| 2015-07-09T14:15:15.453-0400 I COMMAND [repl writer worker 15] CMD: drop db55.tmp.mr.coll55_171 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.459-0400 m31200| 2015-07-09T14:15:15.458-0400 I COMMAND [conn18] CMD: drop db55.tmp.mrs.coll55_1436465714_52 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.461-0400 m31200| 2015-07-09T14:15:15.460-0400 I COMMAND [conn65] CMD: drop db55.tmp.mrs.coll55_1436465714_54 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.462-0400 m31202| 2015-07-09T14:15:15.462-0400 I COMMAND [repl writer worker 7] CMD: drop db55.tmp.mrs.coll55_1436465714_52 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.463-0400 m31102| 2015-07-09T14:15:15.462-0400 I COMMAND [repl writer worker 10] CMD: drop db55.tmp.mrs.coll55_1436465714_52 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.463-0400 m31201| 2015-07-09T14:15:15.463-0400 I COMMAND [repl writer worker 4] CMD: drop db55.tmp.mrs.coll55_1436465714_52 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.463-0400 m31101| 2015-07-09T14:15:15.463-0400 I COMMAND [repl writer worker 14] CMD: drop db55.tmp.mrs.coll55_1436465714_52 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.467-0400 m31102| 2015-07-09T14:15:15.467-0400 I COMMAND [repl writer worker 5] CMD: drop db55.tmp.mrs.coll55_1436465714_54 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.469-0400 m31201| 2015-07-09T14:15:15.468-0400 I COMMAND [repl writer worker 6] CMD: drop db55.tmp.mrs.coll55_1436465714_54 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.469-0400 m31202| 2015-07-09T14:15:15.469-0400 I COMMAND [repl writer worker 1] CMD: drop db55.tmp.mrs.coll55_1436465714_54 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.472-0400 m31101| 2015-07-09T14:15:15.471-0400 I COMMAND [repl writer worker 7] CMD: drop db55.tmp.mrs.coll55_1436465714_54 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.480-0400 m31200| 2015-07-09T14:15:15.479-0400 I COMMAND [conn80] CMD: drop db55.tmp.mr.coll55_156 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.482-0400 m31100| 2015-07-09T14:15:15.481-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_176 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.492-0400 m31100| 2015-07-09T14:15:15.492-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_177 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.500-0400 m31200| 2015-07-09T14:15:15.500-0400 I COMMAND [conn32] CMD: drop db55.tmp.mr.coll55_157 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.646-0400 m31100| 2015-07-09T14:15:15.646-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_174 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.647-0400 m31200| 2015-07-09T14:15:15.647-0400 I COMMAND [conn41] CMD: drop db55.tmp.mrs.coll55_1436465715_53 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.652-0400 m31200| 2015-07-09T14:15:15.651-0400 I COMMAND [conn41] CMD: drop db55.tmp.mr.coll55_155 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.652-0400 m31200| 2015-07-09T14:15:15.651-0400 I COMMAND [conn41] CMD: drop db55.tmp.mr.coll55_155 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.660-0400 m31200| 2015-07-09T14:15:15.660-0400 I COMMAND [conn41] CMD: drop db55.tmp.mr.coll55_155 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.660-0400 m31200| 2015-07-09T14:15:15.660-0400 I COMMAND [conn41] command db55.tmp.mrs.coll55_1436465715_53 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.661-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.661-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.661-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.662-0400 m31200| values...., out: "tmp.mrs.coll55_1436465715_53", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { w: 16, W: 1 }, timeAcquiringMicros: { w: 126960, W: 3988 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 302ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.689-0400 m31100| 2015-07-09T14:15:15.684-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_173 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.700-0400 m31100| 2015-07-09T14:15:15.700-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_173 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.708-0400 m31100| 2015-07-09T14:15:15.706-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_174 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.709-0400 m31200| 2015-07-09T14:15:15.709-0400 I COMMAND [conn80] CMD: drop db55.tmp.mrs.coll55_1436465715_57 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.714-0400 m31100| 2015-07-09T14:15:15.713-0400 I COMMAND [conn185] command db55.map_reduce_reduce2 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.714-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.716-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.716-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.716-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.716-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.717-0400 m31100| }, out: { reduce: "map_reduce_reduce2" } }, inputDB: "db55", shardedOutputCollection: "tmp.mrs.coll55_1436465714_55", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll55_1436465714_55", timeMillis: 432, counts: { input: 1001, emit: 1001, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465715000|110, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll55_1436465714_55", timeMillis: 215, counts: { input: 999, emit: 999, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465715000|6, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1001, emit: 1001, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 999, emit: 999, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:221 locks:{ Global: { acquireCount: { r: 96, w: 67, W: 21 }, acquireWaitCount: { w: 4, W: 1 }, timeAcquiringMicros: { w: 99370, W: 78874 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 15, W: 4 }, timeAcquiringMicros: { w: 141084, W: 22781 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 464ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.718-0400 m31100| 2015-07-09T14:15:15.714-0400 I COMMAND [conn39] CMD: drop db55.tmp.mrs.coll55_1436465714_55 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.719-0400 m31200| 2015-07-09T14:15:15.718-0400 I COMMAND [conn80] CMD: drop db55.tmp.mr.coll55_156 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.720-0400 m31200| 2015-07-09T14:15:15.719-0400 I COMMAND [conn80] CMD: drop db55.tmp.mr.coll55_156 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.727-0400 m31200| 2015-07-09T14:15:15.726-0400 I COMMAND [conn80] CMD: drop db55.tmp.mr.coll55_156 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.729-0400 m31200| 2015-07-09T14:15:15.729-0400 I COMMAND [conn80] command db55.tmp.mrs.coll55_1436465715_57 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.729-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.729-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.729-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.730-0400 m31200| values...., out: "tmp.mrs.coll55_1436465715_57", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 3790, W: 309 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 1, w: 2, R: 9, W: 6 }, timeAcquiringMicros: { r: 1143, w: 4627, R: 31406, W: 3815 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 249ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.732-0400 m31100| 2015-07-09T14:15:15.731-0400 I COMMAND [conn176] command db55.map_reduce_reduce4 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.732-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.732-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.732-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.732-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.733-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.734-0400 m31100| }, out: { reduce: "map_reduce_reduce4" } }, inputDB: "db55", shardedOutputCollection: "tmp.mrs.coll55_1436465714_56", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll55_1436465714_56", timeMillis: 364, counts: { input: 1001, emit: 1001, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465715000|141, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll55_1436465714_56", timeMillis: 157, counts: { input: 999, emit: 999, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465715000|27, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1001, emit: 1001, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 999, emit: 999, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:221 locks:{ Global: { acquireCount: { r: 96, w: 67, W: 21 }, acquireWaitCount: { w: 6, W: 1 }, timeAcquiringMicros: { w: 147052, W: 11310 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 15, W: 2 }, timeAcquiringMicros: { w: 138939, W: 36866 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 448ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.734-0400 m31100| 2015-07-09T14:15:15.732-0400 I COMMAND [conn32] CMD: drop db55.tmp.mrs.coll55_1436465714_56 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.734-0400 m31200| 2015-07-09T14:15:15.732-0400 I COMMAND [conn32] CMD: drop db55.tmp.mrs.coll55_1436465715_54 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.734-0400 m31101| 2015-07-09T14:15:15.733-0400 I COMMAND [repl writer worker 8] CMD: drop db55.tmp.mr.coll55_173 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.736-0400 m31102| 2015-07-09T14:15:15.734-0400 I COMMAND [repl writer worker 11] CMD: drop db55.tmp.mr.coll55_173 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.739-0400 m31102| 2015-07-09T14:15:15.738-0400 I COMMAND [repl writer worker 15] CMD: drop db55.tmp.mr.coll55_174 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.739-0400 m31200| 2015-07-09T14:15:15.739-0400 I COMMAND [conn65] CMD: drop db55.tmp.mrs.coll55_1436465714_55 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.740-0400 m31200| 2015-07-09T14:15:15.740-0400 I COMMAND [conn32] CMD: drop db55.tmp.mr.coll55_157 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.741-0400 m31200| 2015-07-09T14:15:15.740-0400 I COMMAND [conn32] CMD: drop db55.tmp.mr.coll55_157 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.742-0400 m31200| 2015-07-09T14:15:15.741-0400 I COMMAND [conn32] CMD: drop db55.tmp.mr.coll55_157 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.742-0400 m31101| 2015-07-09T14:15:15.742-0400 I COMMAND [repl writer worker 11] CMD: drop db55.tmp.mr.coll55_174 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.744-0400 m31200| 2015-07-09T14:15:15.744-0400 I COMMAND [conn32] command db55.tmp.mrs.coll55_1436465715_54 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.745-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.746-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.746-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.746-0400 m31200| values...., out: "tmp.mrs.coll55_1436465715_54", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 9282 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 4, w: 3, R: 10, W: 6 }, timeAcquiringMicros: { r: 17567, w: 23683, R: 4149, W: 22981 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 263ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.748-0400 m31202| 2015-07-09T14:15:15.747-0400 I COMMAND [repl writer worker 3] CMD: drop db55.tmp.mrs.coll55_1436465714_55 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.750-0400 m31101| 2015-07-09T14:15:15.750-0400 I COMMAND [repl writer worker 6] CMD: drop db55.tmp.mrs.coll55_1436465714_55 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.750-0400 m31102| 2015-07-09T14:15:15.750-0400 I COMMAND [repl writer worker 3] CMD: drop db55.tmp.mrs.coll55_1436465714_55 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.751-0400 m31200| 2015-07-09T14:15:15.751-0400 I COMMAND [conn65] CMD: drop db55.tmp.mrs.coll55_1436465714_56 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.753-0400 m31201| 2015-07-09T14:15:15.752-0400 I COMMAND [repl writer worker 4] CMD: drop db55.tmp.mrs.coll55_1436465714_55 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.755-0400 m31101| 2015-07-09T14:15:15.755-0400 I COMMAND [repl writer worker 3] CMD: drop db55.tmp.mrs.coll55_1436465714_56 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.755-0400 m31102| 2015-07-09T14:15:15.755-0400 I COMMAND [repl writer worker 0] CMD: drop db55.tmp.mrs.coll55_1436465714_56 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.756-0400 m31200| 2015-07-09T14:15:15.756-0400 I COMMAND [conn37] CMD: drop db55.tmp.mr.coll55_158 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.757-0400 m31202| 2015-07-09T14:15:15.757-0400 I COMMAND [repl writer worker 5] CMD: drop db55.tmp.mrs.coll55_1436465714_56 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.759-0400 m31201| 2015-07-09T14:15:15.759-0400 I COMMAND [repl writer worker 9] CMD: drop db55.tmp.mrs.coll55_1436465714_56 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.760-0400 m31100| 2015-07-09T14:15:15.760-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_178 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.761-0400 m31100| 2015-07-09T14:15:15.761-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_179 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.782-0400 m31200| 2015-07-09T14:15:15.782-0400 I COMMAND [conn52] CMD: drop db55.tmp.mr.coll55_159 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.927-0400 m31200| 2015-07-09T14:15:15.927-0400 I COMMAND [conn37] CMD: drop db55.tmp.mrs.coll55_1436465715_58 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.936-0400 m31200| 2015-07-09T14:15:15.934-0400 I COMMAND [conn37] CMD: drop db55.tmp.mr.coll55_158 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.936-0400 m31200| 2015-07-09T14:15:15.936-0400 I COMMAND [conn37] CMD: drop db55.tmp.mr.coll55_158 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.938-0400 m31200| 2015-07-09T14:15:15.937-0400 I COMMAND [conn37] CMD: drop db55.tmp.mr.coll55_158 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.938-0400 m31200| 2015-07-09T14:15:15.938-0400 I COMMAND [conn37] command db55.tmp.mrs.coll55_1436465715_58 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.939-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.939-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.939-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.940-0400 m31200| values...., out: "tmp.mrs.coll55_1436465715_58", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 70 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { w: 2, R: 6, W: 5 }, timeAcquiringMicros: { w: 4717, R: 14775, W: 1028 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 182ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.958-0400 m31200| 2015-07-09T14:15:15.958-0400 I COMMAND [conn52] CMD: drop db55.tmp.mrs.coll55_1436465715_59 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.962-0400 m31200| 2015-07-09T14:15:15.962-0400 I COMMAND [conn52] CMD: drop db55.tmp.mr.coll55_159 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.962-0400 m31200| 2015-07-09T14:15:15.962-0400 I COMMAND [conn52] CMD: drop db55.tmp.mr.coll55_159 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.963-0400 m31200| 2015-07-09T14:15:15.963-0400 I COMMAND [conn52] CMD: drop db55.tmp.mr.coll55_159 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.963-0400 m31200| 2015-07-09T14:15:15.963-0400 I COMMAND [conn52] command db55.tmp.mrs.coll55_1436465715_59 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.964-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.964-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.964-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:15.964-0400 m31200| values...., out: "tmp.mrs.coll55_1436465715_59", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 7479 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 1, w: 7, R: 2, W: 3 }, timeAcquiringMicros: { r: 12159, w: 16695, R: 12812, W: 16533 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 206ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.337-0400 m31100| 2015-07-09T14:15:16.337-0400 I COMMAND [conn182] CMD: drop db55.tmp.mrs.coll55_1436465715_53 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.343-0400 m31100| 2015-07-09T14:15:16.341-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_175 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.344-0400 m31100| 2015-07-09T14:15:16.341-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_175 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.344-0400 m31100| 2015-07-09T14:15:16.343-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_175 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.344-0400 m31100| 2015-07-09T14:15:16.343-0400 I COMMAND [conn182] command db55.tmp.mrs.coll55_1436465715_53 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.344-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.345-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.345-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.345-0400 m31100| values...., out: "tmp.mrs.coll55_1436465715_53", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:212 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 1 }, timeAcquiringMicros: { r: 53700, w: 69026 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { w: 25, R: 12, W: 5 }, timeAcquiringMicros: { w: 534317, R: 69269, W: 17326 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 985ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.348-0400 m31100| 2015-07-09T14:15:16.348-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_180 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.552-0400 m31100| 2015-07-09T14:15:16.551-0400 I COMMAND [conn50] CMD: drop db55.tmp.mrs.coll55_1436465715_57 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.555-0400 m31100| 2015-07-09T14:15:16.555-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_177 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.556-0400 m31100| 2015-07-09T14:15:16.555-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_177 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.568-0400 m31100| 2015-07-09T14:15:16.567-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_177 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.586-0400 m31100| 2015-07-09T14:15:16.586-0400 I COMMAND [conn50] command db55.tmp.mrs.coll55_1436465715_57 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.587-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.587-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.587-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.587-0400 m31100| values...., out: "tmp.mrs.coll55_1436465715_57", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:17 reslen:212 locks:{ Global: { acquireCount: { r: 187, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 1, W: 1 }, timeAcquiringMicros: { r: 85264, w: 396, W: 35 } }, Database: { acquireCount: { r: 26, w: 66, R: 29, W: 11 }, acquireWaitCount: { r: 1, w: 7, R: 28, W: 9 }, timeAcquiringMicros: { r: 19897, w: 92646, R: 138901, W: 78651 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 1104ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.587-0400 m31100| 2015-07-09T14:15:16.587-0400 I COMMAND [conn45] CMD: drop db55.tmp.mrs.coll55_1436465715_54 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.597-0400 m31100| 2015-07-09T14:15:16.596-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_176 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.597-0400 m31100| 2015-07-09T14:15:16.597-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_176 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.598-0400 m31100| 2015-07-09T14:15:16.597-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_181 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.599-0400 m31100| 2015-07-09T14:15:16.598-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_176 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.608-0400 m31100| 2015-07-09T14:15:16.608-0400 I COMMAND [conn45] command db55.tmp.mrs.coll55_1436465715_54 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.608-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.609-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.609-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.609-0400 m31100| values...., out: "tmp.mrs.coll55_1436465715_54", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:17 reslen:212 locks:{ Global: { acquireCount: { r: 187, w: 74, W: 3 }, acquireWaitCount: { r: 3, w: 1 }, timeAcquiringMicros: { r: 79807, w: 9707 } }, Database: { acquireCount: { r: 26, w: 66, R: 29, W: 11 }, acquireWaitCount: { r: 3, w: 7, R: 28, W: 7 }, timeAcquiringMicros: { r: 17664, w: 69745, R: 170959, W: 64517 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 1128ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.610-0400 m31100| 2015-07-09T14:15:16.610-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_182 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.689-0400 m31100| 2015-07-09T14:15:16.688-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_180 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.693-0400 m31100| 2015-07-09T14:15:16.693-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_180 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.694-0400 m31100| 2015-07-09T14:15:16.694-0400 I COMMAND [conn182] command db55.map_reduce_reduce1 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.694-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.694-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.695-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.695-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.695-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.696-0400 m31100| }, out: { reduce: "map_reduce_reduce1" } }, inputDB: "db55", shardedOutputCollection: "tmp.mrs.coll55_1436465715_53", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll55_1436465715_53", timeMillis: 984, counts: { input: 1001, emit: 1001, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465716000|21, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll55_1436465715_53", timeMillis: 294, counts: { input: 999, emit: 999, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465715000|54, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1001, emit: 1001, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 999, emit: 999, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:221 locks:{ Global: { acquireCount: { r: 96, w: 67, W: 21 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 3120, W: 2383 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 15, W: 4 }, timeAcquiringMicros: { w: 175548, W: 37960 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 349ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.697-0400 m31102| 2015-07-09T14:15:16.696-0400 I COMMAND [repl writer worker 12] CMD: drop db55.tmp.mr.coll55_180 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.697-0400 m31100| 2015-07-09T14:15:16.697-0400 I COMMAND [conn15] CMD: drop db55.tmp.mrs.coll55_1436465715_53 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.698-0400 m31101| 2015-07-09T14:15:16.697-0400 I COMMAND [repl writer worker 8] CMD: drop db55.tmp.mr.coll55_180 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.699-0400 m31200| 2015-07-09T14:15:16.699-0400 I COMMAND [conn18] CMD: drop db55.tmp.mrs.coll55_1436465715_53 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.701-0400 m31201| 2015-07-09T14:15:16.701-0400 I COMMAND [repl writer worker 0] CMD: drop db55.tmp.mrs.coll55_1436465715_53 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.703-0400 m31102| 2015-07-09T14:15:16.703-0400 I COMMAND [repl writer worker 3] CMD: drop db55.tmp.mrs.coll55_1436465715_53 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.704-0400 m31200| 2015-07-09T14:15:16.703-0400 I COMMAND [conn41] CMD: drop db55.tmp.mr.coll55_160 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.705-0400 m31202| 2015-07-09T14:15:16.704-0400 I COMMAND [repl writer worker 12] CMD: drop db55.tmp.mrs.coll55_1436465715_53 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.705-0400 m31100| 2015-07-09T14:15:16.705-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_183 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.706-0400 m31101| 2015-07-09T14:15:16.705-0400 I COMMAND [repl writer worker 12] CMD: drop db55.tmp.mrs.coll55_1436465715_53 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.779-0400 m31100| 2015-07-09T14:15:16.779-0400 I COMMAND [conn185] CMD: drop db55.tmp.mrs.coll55_1436465715_58 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.784-0400 m31100| 2015-07-09T14:15:16.784-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_178 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.784-0400 m31100| 2015-07-09T14:15:16.784-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_178 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.793-0400 m31100| 2015-07-09T14:15:16.792-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_178 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.822-0400 m31100| 2015-07-09T14:15:16.822-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_182 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.826-0400 m31200| 2015-07-09T14:15:16.826-0400 I COMMAND [conn41] CMD: drop db55.tmp.mrs.coll55_1436465716_55 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.831-0400 m31200| 2015-07-09T14:15:16.830-0400 I COMMAND [conn41] CMD: drop db55.tmp.mr.coll55_160 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.831-0400 m31200| 2015-07-09T14:15:16.831-0400 I COMMAND [conn41] CMD: drop db55.tmp.mr.coll55_160 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.832-0400 m31200| 2015-07-09T14:15:16.832-0400 I COMMAND [conn41] CMD: drop db55.tmp.mr.coll55_160 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.832-0400 m31200| 2015-07-09T14:15:16.832-0400 I COMMAND [conn41] command db55.tmp.mrs.coll55_1436465716_55 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.833-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.833-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.833-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.833-0400 m31200| values...., out: "tmp.mrs.coll55_1436465716_55", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 129ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.848-0400 m31100| 2015-07-09T14:15:16.848-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_181 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.856-0400 m31100| 2015-07-09T14:15:16.855-0400 I COMMAND [conn185] command db55.tmp.mrs.coll55_1436465715_58 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.856-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.856-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.856-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.857-0400 m31100| values...., out: "tmp.mrs.coll55_1436465715_58", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:16 reslen:212 locks:{ Global: { acquireCount: { r: 185, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 2, W: 1 }, timeAcquiringMicros: { r: 27427, w: 49342, W: 2334 } }, Database: { acquireCount: { r: 26, w: 66, R: 28, W: 11 }, acquireWaitCount: { r: 5, w: 19, R: 27, W: 9 }, timeAcquiringMicros: { r: 33509, w: 119341, R: 110600, W: 111353 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 1099ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.857-0400 m31100| 2015-07-09T14:15:16.856-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_184 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.863-0400 m31100| 2015-07-09T14:15:16.863-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_181 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.864-0400 m31100| 2015-07-09T14:15:16.864-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_182 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.865-0400 m31101| 2015-07-09T14:15:16.865-0400 I COMMAND [repl writer worker 3] CMD: drop db55.tmp.mr.coll55_181 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.865-0400 m31102| 2015-07-09T14:15:16.865-0400 I COMMAND [repl writer worker 12] CMD: drop db55.tmp.mr.coll55_181 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.866-0400 m31100| 2015-07-09T14:15:16.865-0400 I COMMAND [conn50] command db55.map_reduce_reduce0 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.866-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.866-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.866-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.866-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.867-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.868-0400 m31100| }, out: { reduce: "map_reduce_reduce0" } }, inputDB: "db55", shardedOutputCollection: "tmp.mrs.coll55_1436465715_57", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll55_1436465715_57", timeMillis: 1074, counts: { input: 1001, emit: 1001, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465716000|63, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll55_1436465715_57", timeMillis: 240, counts: { input: 999, emit: 999, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465715000|83, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1001, emit: 1001, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 999, emit: 999, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:221 locks:{ Global: { acquireCount: { r: 96, w: 67, W: 21 }, acquireWaitCount: { w: 4, W: 1 }, timeAcquiringMicros: { w: 30508, W: 28577 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 16, W: 4 }, timeAcquiringMicros: { w: 100117, W: 16422 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 277ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.869-0400 m31100| 2015-07-09T14:15:16.867-0400 I COMMAND [conn32] CMD: drop db55.tmp.mrs.coll55_1436465715_57 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.869-0400 m31102| 2015-07-09T14:15:16.867-0400 I COMMAND [repl writer worker 13] CMD: drop db55.tmp.mr.coll55_182 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.869-0400 m31100| 2015-07-09T14:15:16.868-0400 I COMMAND [conn45] command db55.map_reduce_reduce3 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.870-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.870-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.870-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.870-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.870-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.871-0400 m31100| }, out: { reduce: "map_reduce_reduce3" } }, inputDB: "db55", shardedOutputCollection: "tmp.mrs.coll55_1436465715_54", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll55_1436465715_54", timeMillis: 1117, counts: { input: 1001, emit: 1001, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465716000|78, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll55_1436465715_54", timeMillis: 259, counts: { input: 999, emit: 999, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465715000|96, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1001, emit: 1001, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 999, emit: 999, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:221 locks:{ Global: { acquireCount: { r: 96, w: 67, W: 21 }, acquireWaitCount: { w: 4, W: 1 }, timeAcquiringMicros: { w: 58490, W: 5562 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 15, W: 4 }, timeAcquiringMicros: { w: 70026, W: 35163 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 258ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.872-0400 m31101| 2015-07-09T14:15:16.869-0400 I COMMAND [repl writer worker 1] CMD: drop db55.tmp.mr.coll55_182 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.872-0400 m31100| 2015-07-09T14:15:16.869-0400 I COMMAND [conn15] CMD: drop db55.tmp.mrs.coll55_1436465715_54 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.872-0400 m31200| 2015-07-09T14:15:16.870-0400 I COMMAND [conn65] CMD: drop db55.tmp.mrs.coll55_1436465715_57 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.872-0400 m31102| 2015-07-09T14:15:16.872-0400 I COMMAND [repl writer worker 7] CMD: drop db55.tmp.mrs.coll55_1436465715_57 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.874-0400 m31101| 2015-07-09T14:15:16.874-0400 I COMMAND [repl writer worker 13] CMD: drop db55.tmp.mrs.coll55_1436465715_57 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.874-0400 m31201| 2015-07-09T14:15:16.874-0400 I COMMAND [repl writer worker 2] CMD: drop db55.tmp.mrs.coll55_1436465715_57 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.874-0400 m31202| 2015-07-09T14:15:16.874-0400 I COMMAND [repl writer worker 2] CMD: drop db55.tmp.mrs.coll55_1436465715_57 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.879-0400 m31101| 2015-07-09T14:15:16.879-0400 I COMMAND [repl writer worker 5] CMD: drop db55.tmp.mrs.coll55_1436465715_54 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.879-0400 m31200| 2015-07-09T14:15:16.879-0400 I COMMAND [conn18] CMD: drop db55.tmp.mrs.coll55_1436465715_54 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.881-0400 m31102| 2015-07-09T14:15:16.881-0400 I COMMAND [repl writer worker 1] CMD: drop db55.tmp.mrs.coll55_1436465715_54 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.882-0400 m31201| 2015-07-09T14:15:16.882-0400 I COMMAND [repl writer worker 14] CMD: drop db55.tmp.mrs.coll55_1436465715_54 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.883-0400 m31202| 2015-07-09T14:15:16.882-0400 I COMMAND [repl writer worker 10] CMD: drop db55.tmp.mrs.coll55_1436465715_54 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.893-0400 m31200| 2015-07-09T14:15:16.893-0400 I COMMAND [conn80] CMD: drop db55.tmp.mr.coll55_161 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.893-0400 m31200| 2015-07-09T14:15:16.893-0400 I COMMAND [conn32] CMD: drop db55.tmp.mr.coll55_162 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.895-0400 m31100| 2015-07-09T14:15:16.895-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_185 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:16.902-0400 m31100| 2015-07-09T14:15:16.902-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_186 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.025-0400 m31100| 2015-07-09T14:15:17.025-0400 I COMMAND [conn176] CMD: drop db55.tmp.mrs.coll55_1436465715_59 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.034-0400 m31100| 2015-07-09T14:15:17.033-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_179 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.035-0400 m31100| 2015-07-09T14:15:17.034-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_179 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.061-0400 m31100| 2015-07-09T14:15:17.061-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_179 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.079-0400 m31100| 2015-07-09T14:15:17.079-0400 I COMMAND [conn176] command db55.tmp.mrs.coll55_1436465715_59 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.079-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.080-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.080-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.080-0400 m31100| values...., out: "tmp.mrs.coll55_1436465715_59", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:19 reslen:212 locks:{ Global: { acquireCount: { r: 191, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 3, W: 1 }, timeAcquiringMicros: { r: 12747, w: 76969, W: 24040 } }, Database: { acquireCount: { r: 26, w: 66, R: 31, W: 11 }, acquireWaitCount: { r: 13, w: 27, R: 28, W: 9 }, timeAcquiringMicros: { r: 113995, w: 163648, R: 87687, W: 136429 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 1321ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.084-0400 m31100| 2015-07-09T14:15:17.084-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_187 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.110-0400 m31200| 2015-07-09T14:15:17.110-0400 I COMMAND [conn80] CMD: drop db55.tmp.mrs.coll55_1436465716_60 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.120-0400 m31200| 2015-07-09T14:15:17.120-0400 I COMMAND [conn80] CMD: drop db55.tmp.mr.coll55_161 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.121-0400 m31200| 2015-07-09T14:15:17.121-0400 I COMMAND [conn80] CMD: drop db55.tmp.mr.coll55_161 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.125-0400 m31200| 2015-07-09T14:15:17.125-0400 I COMMAND [conn80] CMD: drop db55.tmp.mr.coll55_161 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.127-0400 m31200| 2015-07-09T14:15:17.126-0400 I COMMAND [conn80] command db55.tmp.mrs.coll55_1436465716_60 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.128-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.128-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.128-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.128-0400 m31200| values...., out: "tmp.mrs.coll55_1436465716_60", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 1, w: 2, R: 3, W: 4 }, timeAcquiringMicros: { r: 23289, w: 3340, R: 13432, W: 853 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 233ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.135-0400 m31200| 2015-07-09T14:15:17.134-0400 I COMMAND [conn32] CMD: drop db55.tmp.mrs.coll55_1436465716_56 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.150-0400 m31200| 2015-07-09T14:15:17.147-0400 I COMMAND [conn32] CMD: drop db55.tmp.mr.coll55_162 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.150-0400 m31200| 2015-07-09T14:15:17.147-0400 I COMMAND [conn32] CMD: drop db55.tmp.mr.coll55_162 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.150-0400 m31200| 2015-07-09T14:15:17.148-0400 I COMMAND [conn32] CMD: drop db55.tmp.mr.coll55_162 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.150-0400 m31200| 2015-07-09T14:15:17.148-0400 I COMMAND [conn32] command db55.tmp.mrs.coll55_1436465716_56 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.151-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.151-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.151-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.152-0400 m31200| values...., out: "tmp.mrs.coll55_1436465716_56", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { w: 5, W: 5 }, timeAcquiringMicros: { w: 7988, W: 43801 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 255ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.205-0400 m31100| 2015-07-09T14:15:17.205-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_184 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.215-0400 m31100| 2015-07-09T14:15:17.215-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_184 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.217-0400 m31102| 2015-07-09T14:15:17.217-0400 I COMMAND [repl writer worker 7] CMD: drop db55.tmp.mr.coll55_184 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.218-0400 m31101| 2015-07-09T14:15:17.218-0400 I COMMAND [repl writer worker 13] CMD: drop db55.tmp.mr.coll55_184 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.221-0400 m31100| 2015-07-09T14:15:17.221-0400 I COMMAND [conn185] command db55.map_reduce_reduce2 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.222-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.222-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.222-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.222-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.222-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.223-0400 m31100| }, out: { reduce: "map_reduce_reduce2" } }, inputDB: "db55", shardedOutputCollection: "tmp.mrs.coll55_1436465715_58", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll55_1436465715_58", timeMillis: 1028, counts: { input: 1001, emit: 1001, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465716000|175, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll55_1436465715_58", timeMillis: 180, counts: { input: 999, emit: 999, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465715000|121, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1001, emit: 1001, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 999, emit: 999, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:221 locks:{ Global: { acquireCount: { r: 96, w: 67, W: 21 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 21051, W: 279 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 21, W: 4 }, timeAcquiringMicros: { w: 213800, W: 31421 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 364ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.223-0400 m31100| 2015-07-09T14:15:17.223-0400 I COMMAND [conn32] CMD: drop db55.tmp.mrs.coll55_1436465715_58 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.228-0400 m31200| 2015-07-09T14:15:17.228-0400 I COMMAND [conn65] CMD: drop db55.tmp.mrs.coll55_1436465715_58 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.230-0400 m31101| 2015-07-09T14:15:17.230-0400 I COMMAND [repl writer worker 15] CMD: drop db55.tmp.mrs.coll55_1436465715_58 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.231-0400 m31201| 2015-07-09T14:15:17.231-0400 I COMMAND [repl writer worker 4] CMD: drop db55.tmp.mrs.coll55_1436465715_58 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.231-0400 m31202| 2015-07-09T14:15:17.231-0400 I COMMAND [repl writer worker 14] CMD: drop db55.tmp.mrs.coll55_1436465715_58 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.233-0400 m31102| 2015-07-09T14:15:17.233-0400 I COMMAND [repl writer worker 8] CMD: drop db55.tmp.mrs.coll55_1436465715_58 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.234-0400 m31200| 2015-07-09T14:15:17.234-0400 I COMMAND [conn37] CMD: drop db55.tmp.mr.coll55_163 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.238-0400 m31100| 2015-07-09T14:15:17.238-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_188 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.332-0400 m31100| 2015-07-09T14:15:17.331-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_187 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.339-0400 m31100| 2015-07-09T14:15:17.338-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_187 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.339-0400 m31101| 2015-07-09T14:15:17.339-0400 I COMMAND [repl writer worker 0] CMD: drop db55.tmp.mr.coll55_187 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.341-0400 m31102| 2015-07-09T14:15:17.341-0400 I COMMAND [repl writer worker 13] CMD: drop db55.tmp.mr.coll55_187 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.348-0400 m31100| 2015-07-09T14:15:17.348-0400 I COMMAND [conn176] command db55.map_reduce_reduce4 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.349-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.349-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.349-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.349-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.349-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.351-0400 m31100| }, out: { reduce: "map_reduce_reduce4" } }, inputDB: "db55", shardedOutputCollection: "tmp.mrs.coll55_1436465715_59", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll55_1436465715_59", timeMillis: 1276, counts: { input: 1001, emit: 1001, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465717000|4, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll55_1436465715_59", timeMillis: 205, counts: { input: 999, emit: 999, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465715000|142, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1001, emit: 1001, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 999, emit: 999, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:221 locks:{ Global: { acquireCount: { r: 96, w: 67, W: 21 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 34709, W: 13226 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 15, W: 3 }, timeAcquiringMicros: { w: 101820, W: 17904 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 266ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.351-0400 m31100| 2015-07-09T14:15:17.348-0400 I COMMAND [conn32] CMD: drop db55.tmp.mrs.coll55_1436465715_59 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.356-0400 m31200| 2015-07-09T14:15:17.356-0400 I COMMAND [conn65] CMD: drop db55.tmp.mrs.coll55_1436465715_59 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.360-0400 m31200| 2015-07-09T14:15:17.360-0400 I COMMAND [conn37] CMD: drop db55.tmp.mrs.coll55_1436465717_61 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.361-0400 m31102| 2015-07-09T14:15:17.361-0400 I COMMAND [repl writer worker 11] CMD: drop db55.tmp.mrs.coll55_1436465715_59 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.362-0400 m31100| 2015-07-09T14:15:17.361-0400 I COMMAND [conn182] CMD: drop db55.tmp.mrs.coll55_1436465716_55 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.364-0400 m31101| 2015-07-09T14:15:17.363-0400 I COMMAND [repl writer worker 8] CMD: drop db55.tmp.mrs.coll55_1436465715_59 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.366-0400 m31202| 2015-07-09T14:15:17.366-0400 I COMMAND [repl writer worker 14] CMD: drop db55.tmp.mrs.coll55_1436465715_59 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.370-0400 m31200| 2015-07-09T14:15:17.370-0400 I COMMAND [conn37] CMD: drop db55.tmp.mr.coll55_163 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.371-0400 m31200| 2015-07-09T14:15:17.371-0400 I COMMAND [conn37] CMD: drop db55.tmp.mr.coll55_163 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.372-0400 m31100| 2015-07-09T14:15:17.371-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_183 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.372-0400 m31201| 2015-07-09T14:15:17.371-0400 I COMMAND [repl writer worker 4] CMD: drop db55.tmp.mrs.coll55_1436465715_59 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.372-0400 m31100| 2015-07-09T14:15:17.371-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_183 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.373-0400 m31100| 2015-07-09T14:15:17.373-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_183 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.376-0400 m31100| 2015-07-09T14:15:17.376-0400 I COMMAND [conn182] command db55.tmp.mrs.coll55_1436465716_55 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.377-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.377-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.378-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.379-0400 m31100| values...., out: "tmp.mrs.coll55_1436465716_55", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 4, w: 4, W: 1 }, timeAcquiringMicros: { r: 57804, w: 75295, W: 119 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 6, w: 31, R: 11, W: 9 }, timeAcquiringMicros: { r: 37419, w: 232624, R: 65566, W: 7347 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 672ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.379-0400 m31100| 2015-07-09T14:15:17.376-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_189 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.382-0400 m31200| 2015-07-09T14:15:17.382-0400 I COMMAND [conn37] CMD: drop db55.tmp.mr.coll55_163 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.382-0400 m31200| 2015-07-09T14:15:17.382-0400 I COMMAND [conn37] command db55.tmp.mrs.coll55_1436465717_61 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.383-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.383-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.383-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.383-0400 m31200| values...., out: "tmp.mrs.coll55_1436465717_61", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 1977 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 148ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.408-0400 m31200| 2015-07-09T14:15:17.408-0400 I COMMAND [conn52] CMD: drop db55.tmp.mr.coll55_164 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.414-0400 m31100| 2015-07-09T14:15:17.414-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_190 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.468-0400 m31100| 2015-07-09T14:15:17.467-0400 I COMMAND [conn50] CMD: drop db55.tmp.mrs.coll55_1436465716_60 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.481-0400 m31100| 2015-07-09T14:15:17.479-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_185 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.481-0400 m31100| 2015-07-09T14:15:17.479-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_185 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.497-0400 m31100| 2015-07-09T14:15:17.497-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_185 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.504-0400 m31100| 2015-07-09T14:15:17.503-0400 I COMMAND [conn50] command db55.tmp.mrs.coll55_1436465716_60 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.504-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.504-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.505-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.505-0400 m31100| values...., out: "tmp.mrs.coll55_1436465716_60", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 5, w: 2, W: 1 }, timeAcquiringMicros: { r: 52038, w: 26129, W: 23584 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 12, w: 26, R: 10, W: 9 }, timeAcquiringMicros: { r: 27646, w: 144642, R: 47067, W: 75865 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 610ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.505-0400 m31100| 2015-07-09T14:15:17.504-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_191 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.518-0400 m31200| 2015-07-09T14:15:17.518-0400 I COMMAND [conn52] CMD: drop db55.tmp.mrs.coll55_1436465717_62 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.527-0400 m31200| 2015-07-09T14:15:17.524-0400 I COMMAND [conn52] CMD: drop db55.tmp.mr.coll55_164 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.527-0400 m31200| 2015-07-09T14:15:17.524-0400 I COMMAND [conn52] CMD: drop db55.tmp.mr.coll55_164 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.527-0400 m31200| 2015-07-09T14:15:17.526-0400 I COMMAND [conn52] CMD: drop db55.tmp.mr.coll55_164 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.527-0400 m31200| 2015-07-09T14:15:17.527-0400 I COMMAND [conn52] command db55.tmp.mrs.coll55_1436465717_62 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.528-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.528-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.528-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.529-0400 m31200| values...., out: "tmp.mrs.coll55_1436465717_62", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 119ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.546-0400 m31100| 2015-07-09T14:15:17.546-0400 I COMMAND [conn45] CMD: drop db55.tmp.mrs.coll55_1436465716_56 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.552-0400 m31100| 2015-07-09T14:15:17.551-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_186 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.552-0400 m31100| 2015-07-09T14:15:17.552-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_186 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.555-0400 m31100| 2015-07-09T14:15:17.555-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_186 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.569-0400 m31100| 2015-07-09T14:15:17.569-0400 I COMMAND [conn45] command db55.tmp.mrs.coll55_1436465716_56 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.569-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.570-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.570-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.570-0400 m31100| values...., out: "tmp.mrs.coll55_1436465716_56", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:212 locks:{ Global: { acquireCount: { r: 157, w: 74, W: 3 }, acquireWaitCount: { r: 5, w: 7, W: 1 }, timeAcquiringMicros: { r: 45561, w: 54807, W: 296 } }, Database: { acquireCount: { r: 26, w: 66, R: 14, W: 11 }, acquireWaitCount: { r: 7, w: 29, R: 14, W: 8 }, timeAcquiringMicros: { r: 68244, w: 164131, R: 77986, W: 64795 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 675ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.576-0400 m31100| 2015-07-09T14:15:17.576-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_192 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.605-0400 m31100| 2015-07-09T14:15:17.605-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_189 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.620-0400 m31100| 2015-07-09T14:15:17.620-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_189 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.621-0400 m31100| 2015-07-09T14:15:17.621-0400 I COMMAND [conn182] command db55.map_reduce_reduce1 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.622-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.622-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.622-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.622-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.623-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.623-0400 m31100| }, out: { reduce: "map_reduce_reduce1" } }, inputDB: "db55", shardedOutputCollection: "tmp.mrs.coll55_1436465716_55", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll55_1436465716_55", timeMillis: 669, counts: { input: 1001, emit: 1001, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465717000|111, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll55_1436465716_55", timeMillis: 128, counts: { input: 999, emit: 999, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465716000|23, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1001, emit: 1001, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 999, emit: 999, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:221 locks:{ Global: { acquireCount: { r: 96, w: 67, W: 21 }, acquireWaitCount: { w: 4, W: 1 }, timeAcquiringMicros: { w: 37689, W: 18336 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 14, W: 4 }, timeAcquiringMicros: { w: 87830, W: 10171 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 244ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.624-0400 m31100| 2015-07-09T14:15:17.621-0400 I COMMAND [conn15] CMD: drop db55.tmp.mrs.coll55_1436465716_55 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.624-0400 m31102| 2015-07-09T14:15:17.622-0400 I COMMAND [repl writer worker 14] CMD: drop db55.tmp.mr.coll55_189 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.624-0400 m31101| 2015-07-09T14:15:17.622-0400 I COMMAND [repl writer worker 7] CMD: drop db55.tmp.mr.coll55_189 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.634-0400 m31200| 2015-07-09T14:15:17.634-0400 I COMMAND [conn18] CMD: drop db55.tmp.mrs.coll55_1436465716_55 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.639-0400 m31201| 2015-07-09T14:15:17.639-0400 I COMMAND [repl writer worker 14] CMD: drop db55.tmp.mrs.coll55_1436465716_55 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.640-0400 m31200| 2015-07-09T14:15:17.639-0400 I COMMAND [conn41] CMD: drop db55.tmp.mr.coll55_165 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.640-0400 m31202| 2015-07-09T14:15:17.639-0400 I COMMAND [repl writer worker 6] CMD: drop db55.tmp.mrs.coll55_1436465716_55 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.642-0400 m31100| 2015-07-09T14:15:17.641-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_193 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.649-0400 m31102| 2015-07-09T14:15:17.649-0400 I COMMAND [repl writer worker 4] CMD: drop db55.tmp.mrs.coll55_1436465716_55 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.659-0400 m31101| 2015-07-09T14:15:17.658-0400 I COMMAND [repl writer worker 2] CMD: drop db55.tmp.mrs.coll55_1436465716_55 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.743-0400 m31100| 2015-07-09T14:15:17.743-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_191 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.753-0400 m31100| 2015-07-09T14:15:17.752-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_191 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.756-0400 m31101| 2015-07-09T14:15:17.755-0400 I COMMAND [repl writer worker 0] CMD: drop db55.tmp.mr.coll55_191 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.756-0400 m31102| 2015-07-09T14:15:17.755-0400 I COMMAND [repl writer worker 8] CMD: drop db55.tmp.mr.coll55_191 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.775-0400 m31100| 2015-07-09T14:15:17.774-0400 I COMMAND [conn50] command db55.map_reduce_reduce0 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.775-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.775-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.776-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.776-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.776-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.777-0400 m31100| }, out: { reduce: "map_reduce_reduce0" } }, inputDB: "db55", shardedOutputCollection: "tmp.mrs.coll55_1436465716_60", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll55_1436465716_60", timeMillis: 586, counts: { input: 1001, emit: 1001, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465717000|137, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll55_1436465716_60", timeMillis: 228, counts: { input: 999, emit: 999, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465717000|21, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1001, emit: 1001, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 999, emit: 999, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:221 locks:{ Global: { acquireCount: { r: 96, w: 67, W: 21 }, acquireWaitCount: { w: 3, W: 1 }, timeAcquiringMicros: { w: 37858, W: 27505 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 15, W: 4 }, timeAcquiringMicros: { w: 92003, W: 17519 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 269ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.778-0400 m31100| 2015-07-09T14:15:17.775-0400 I COMMAND [conn32] CMD: drop db55.tmp.mrs.coll55_1436465716_60 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.778-0400 m31100| 2015-07-09T14:15:17.778-0400 I COMMAND [conn185] CMD: drop db55.tmp.mrs.coll55_1436465717_61 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.781-0400 m31200| 2015-07-09T14:15:17.780-0400 I COMMAND [conn41] CMD: drop db55.tmp.mrs.coll55_1436465717_57 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.784-0400 m31100| 2015-07-09T14:15:17.784-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_188 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.785-0400 m31100| 2015-07-09T14:15:17.785-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_188 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.788-0400 m31200| 2015-07-09T14:15:17.788-0400 I COMMAND [conn65] CMD: drop db55.tmp.mrs.coll55_1436465716_60 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.790-0400 m31200| 2015-07-09T14:15:17.789-0400 I COMMAND [conn41] CMD: drop db55.tmp.mr.coll55_165 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.791-0400 m31200| 2015-07-09T14:15:17.790-0400 I COMMAND [conn41] CMD: drop db55.tmp.mr.coll55_165 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.796-0400 m31102| 2015-07-09T14:15:17.796-0400 I COMMAND [repl writer worker 6] CMD: drop db55.tmp.mrs.coll55_1436465716_60 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.800-0400 m31200| 2015-07-09T14:15:17.800-0400 I COMMAND [conn41] CMD: drop db55.tmp.mr.coll55_165 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.801-0400 m31200| 2015-07-09T14:15:17.801-0400 I COMMAND [conn41] command db55.tmp.mrs.coll55_1436465717_57 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.801-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.801-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.801-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.802-0400 m31200| values...., out: "tmp.mrs.coll55_1436465717_57", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 7517 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 161ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.802-0400 m31100| 2015-07-09T14:15:17.802-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_188 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.803-0400 m31101| 2015-07-09T14:15:17.803-0400 I COMMAND [repl writer worker 2] CMD: drop db55.tmp.mrs.coll55_1436465716_60 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.808-0400 m31200| 2015-07-09T14:15:17.808-0400 I COMMAND [conn80] CMD: drop db55.tmp.mr.coll55_166 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.829-0400 m31100| 2015-07-09T14:15:17.828-0400 I COMMAND [conn185] command db55.tmp.mrs.coll55_1436465717_61 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.829-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.829-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.829-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.830-0400 m31100| values...., out: "tmp.mrs.coll55_1436465717_61", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 7, w: 8, W: 1 }, timeAcquiringMicros: { r: 58054, w: 61591, W: 2660 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 6, w: 25, R: 12, W: 9 }, timeAcquiringMicros: { r: 64831, w: 116860, R: 51294, W: 48448 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 594ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.836-0400 m31201| 2015-07-09T14:15:17.835-0400 I COMMAND [repl writer worker 3] CMD: drop db55.tmp.mrs.coll55_1436465716_60 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.840-0400 m31202| 2015-07-09T14:15:17.840-0400 I COMMAND [repl writer worker 7] CMD: drop db55.tmp.mrs.coll55_1436465716_60 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.859-0400 m31100| 2015-07-09T14:15:17.858-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_192 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.861-0400 m31100| 2015-07-09T14:15:17.861-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_192 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.864-0400 m31100| 2015-07-09T14:15:17.863-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_194 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.864-0400 m31100| 2015-07-09T14:15:17.863-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_195 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.871-0400 m31102| 2015-07-09T14:15:17.870-0400 I COMMAND [repl writer worker 10] CMD: drop db55.tmp.mr.coll55_192 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.872-0400 m31101| 2015-07-09T14:15:17.872-0400 I COMMAND [repl writer worker 15] CMD: drop db55.tmp.mr.coll55_192 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.882-0400 m31100| 2015-07-09T14:15:17.881-0400 I COMMAND [conn45] command db55.map_reduce_reduce3 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.882-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.882-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.882-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.882-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.883-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.884-0400 m31100| }, out: { reduce: "map_reduce_reduce3" } }, inputDB: "db55", shardedOutputCollection: "tmp.mrs.coll55_1436465716_56", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll55_1436465716_56", timeMillis: 658, counts: { input: 1001, emit: 1001, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465717000|168, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll55_1436465716_56", timeMillis: 254, counts: { input: 999, emit: 999, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465717000|42, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1001, emit: 1001, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 999, emit: 999, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:221 locks:{ Global: { acquireCount: { r: 96, w: 67, W: 21 }, acquireWaitCount: { w: 5, W: 1 }, timeAcquiringMicros: { w: 74823, W: 94 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 17, W: 3 }, timeAcquiringMicros: { w: 121366, W: 10510 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 310ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.885-0400 m31100| 2015-07-09T14:15:17.882-0400 I COMMAND [conn15] CMD: drop db55.tmp.mrs.coll55_1436465716_56 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.888-0400 m31200| 2015-07-09T14:15:17.888-0400 I COMMAND [conn18] CMD: drop db55.tmp.mrs.coll55_1436465716_56 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.889-0400 m31101| 2015-07-09T14:15:17.889-0400 I COMMAND [repl writer worker 5] CMD: drop db55.tmp.mrs.coll55_1436465716_56 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.891-0400 m31102| 2015-07-09T14:15:17.891-0400 I COMMAND [repl writer worker 2] CMD: drop db55.tmp.mrs.coll55_1436465716_56 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.893-0400 m31201| 2015-07-09T14:15:17.892-0400 I COMMAND [repl writer worker 8] CMD: drop db55.tmp.mrs.coll55_1436465716_56 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.893-0400 m31202| 2015-07-09T14:15:17.893-0400 I COMMAND [repl writer worker 4] CMD: drop db55.tmp.mrs.coll55_1436465716_56 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.905-0400 m31200| 2015-07-09T14:15:17.905-0400 I COMMAND [conn32] CMD: drop db55.tmp.mr.coll55_167 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.961-0400 m31100| 2015-07-09T14:15:17.960-0400 I COMMAND [conn176] CMD: drop db55.tmp.mrs.coll55_1436465717_62 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.968-0400 m31100| 2015-07-09T14:15:17.966-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_190 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.968-0400 m31100| 2015-07-09T14:15:17.966-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_190 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.970-0400 m31100| 2015-07-09T14:15:17.969-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_190 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.979-0400 m31100| 2015-07-09T14:15:17.978-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_196 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.990-0400 m31100| 2015-07-09T14:15:17.989-0400 I COMMAND [conn176] command db55.tmp.mrs.coll55_1436465717_62 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.991-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.991-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.991-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.991-0400 m31100| values...., out: "tmp.mrs.coll55_1436465717_62", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 6, w: 4, W: 1 }, timeAcquiringMicros: { r: 74043, w: 36583, W: 12459 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 9, w: 26, R: 10, W: 6 }, timeAcquiringMicros: { r: 35148, w: 171073, R: 24424, W: 42659 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 582ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:17.993-0400 m31100| 2015-07-09T14:15:17.993-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_197 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.013-0400 m31200| 2015-07-09T14:15:18.012-0400 I COMMAND [conn80] CMD: drop db55.tmp.mrs.coll55_1436465717_63 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.020-0400 m31200| 2015-07-09T14:15:18.018-0400 I COMMAND [conn80] CMD: drop db55.tmp.mr.coll55_166 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.021-0400 m31200| 2015-07-09T14:15:18.019-0400 I COMMAND [conn80] CMD: drop db55.tmp.mr.coll55_166 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.033-0400 m31200| 2015-07-09T14:15:18.032-0400 I COMMAND [conn80] CMD: drop db55.tmp.mr.coll55_166 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.036-0400 m31200| 2015-07-09T14:15:18.036-0400 I COMMAND [conn80] command db55.tmp.mrs.coll55_1436465717_63 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.037-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.037-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.037-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.038-0400 m31200| values...., out: "tmp.mrs.coll55_1436465717_63", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 235 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { w: 7, R: 2, W: 3 }, timeAcquiringMicros: { w: 36997, R: 13833, W: 15169 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 227ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.070-0400 m31200| 2015-07-09T14:15:18.070-0400 I COMMAND [conn32] CMD: drop db55.tmp.mrs.coll55_1436465717_58 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.078-0400 m31200| 2015-07-09T14:15:18.075-0400 I COMMAND [conn32] CMD: drop db55.tmp.mr.coll55_167 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.078-0400 m31200| 2015-07-09T14:15:18.075-0400 I COMMAND [conn32] CMD: drop db55.tmp.mr.coll55_167 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.079-0400 m31200| 2015-07-09T14:15:18.079-0400 I COMMAND [conn32] CMD: drop db55.tmp.mr.coll55_167 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.080-0400 m31200| 2015-07-09T14:15:18.079-0400 I COMMAND [conn32] command db55.tmp.mrs.coll55_1436465717_58 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.081-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.081-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.081-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.083-0400 m31200| values...., out: "tmp.mrs.coll55_1436465717_58", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 2629 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { w: 1, R: 8 }, timeAcquiringMicros: { w: 7397, R: 5647 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 174ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.150-0400 m31100| 2015-07-09T14:15:18.150-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_195 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.165-0400 m31100| 2015-07-09T14:15:18.164-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_195 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.167-0400 m31102| 2015-07-09T14:15:18.167-0400 I COMMAND [repl writer worker 9] CMD: drop db55.tmp.mr.coll55_195 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.168-0400 m31101| 2015-07-09T14:15:18.167-0400 I COMMAND [repl writer worker 1] CMD: drop db55.tmp.mr.coll55_195 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.175-0400 m31100| 2015-07-09T14:15:18.175-0400 I COMMAND [conn185] command db55.map_reduce_reduce2 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.175-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.176-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.176-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.176-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.176-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.178-0400 m31100| }, out: { reduce: "map_reduce_reduce2" } }, inputDB: "db55", shardedOutputCollection: "tmp.mrs.coll55_1436465717_61", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll55_1436465717_61", timeMillis: 551, counts: { input: 1001, emit: 1001, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465717000|271, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll55_1436465717_61", timeMillis: 136, counts: { input: 999, emit: 999, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465717000|66, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1001, emit: 1001, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 999, emit: 999, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:221 locks:{ Global: { acquireCount: { r: 98, w: 67, W: 21 }, acquireWaitCount: { r: 1, w: 2, W: 1 }, timeAcquiringMicros: { r: 6689, w: 14414, W: 925 } }, Database: { acquireCount: { r: 5, w: 64, W: 4 }, acquireWaitCount: { r: 1, w: 17, W: 4 }, timeAcquiringMicros: { r: 3291, w: 162059, W: 33938 } }, Collection: { acquireCount: { r: 5, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 345ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.178-0400 m31100| 2015-07-09T14:15:18.176-0400 I COMMAND [conn32] CMD: drop db55.tmp.mrs.coll55_1436465717_61 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.187-0400 m31200| 2015-07-09T14:15:18.187-0400 I COMMAND [conn65] CMD: drop db55.tmp.mrs.coll55_1436465717_61 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.190-0400 m31101| 2015-07-09T14:15:18.190-0400 I COMMAND [repl writer worker 7] CMD: drop db55.tmp.mrs.coll55_1436465717_61 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.191-0400 m31201| 2015-07-09T14:15:18.191-0400 I COMMAND [repl writer worker 10] CMD: drop db55.tmp.mrs.coll55_1436465717_61 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.192-0400 m31102| 2015-07-09T14:15:18.191-0400 I COMMAND [repl writer worker 13] CMD: drop db55.tmp.mrs.coll55_1436465717_61 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.192-0400 m31202| 2015-07-09T14:15:18.192-0400 I COMMAND [repl writer worker 5] CMD: drop db55.tmp.mrs.coll55_1436465717_61 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.196-0400 m31200| 2015-07-09T14:15:18.196-0400 I COMMAND [conn37] CMD: drop db55.tmp.mr.coll55_168 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.199-0400 m31100| 2015-07-09T14:15:18.199-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_198 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.281-0400 m31100| 2015-07-09T14:15:18.280-0400 I COMMAND [conn182] CMD: drop db55.tmp.mrs.coll55_1436465717_57 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.286-0400 m31100| 2015-07-09T14:15:18.286-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_193 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.286-0400 m31100| 2015-07-09T14:15:18.286-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_193 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.316-0400 m31100| 2015-07-09T14:15:18.316-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_197 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.332-0400 m31200| 2015-07-09T14:15:18.332-0400 I COMMAND [conn37] CMD: drop db55.tmp.mrs.coll55_1436465718_64 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.333-0400 m31100| 2015-07-09T14:15:18.333-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_197 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.339-0400 m31100| 2015-07-09T14:15:18.339-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_193 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.341-0400 m31101| 2015-07-09T14:15:18.341-0400 I COMMAND [repl writer worker 4] CMD: drop db55.tmp.mr.coll55_197 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.341-0400 m31102| 2015-07-09T14:15:18.341-0400 I COMMAND [repl writer worker 3] CMD: drop db55.tmp.mr.coll55_197 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.347-0400 m31100| 2015-07-09T14:15:18.346-0400 I COMMAND [conn176] command db55.map_reduce_reduce4 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.348-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.348-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.348-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.348-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.348-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.351-0400 m31100| }, out: { reduce: "map_reduce_reduce4" } }, inputDB: "db55", shardedOutputCollection: "tmp.mrs.coll55_1436465717_62", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll55_1436465717_62", timeMillis: 559, counts: { input: 1001, emit: 1001, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465717000|323, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll55_1436465717_62", timeMillis: 116, counts: { input: 999, emit: 999, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465717000|88, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1001, emit: 1001, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 999, emit: 999, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:221 locks:{ Global: { acquireCount: { r: 96, w: 67, W: 21 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 33359, W: 22955 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 17, W: 4 }, timeAcquiringMicros: { w: 160755, W: 22796 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 355ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.351-0400 m31100| 2015-07-09T14:15:18.347-0400 I COMMAND [conn32] CMD: drop db55.tmp.mrs.coll55_1436465717_62 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.351-0400 m31200| 2015-07-09T14:15:18.348-0400 I COMMAND [conn37] CMD: drop db55.tmp.mr.coll55_168 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.351-0400 m31200| 2015-07-09T14:15:18.349-0400 I COMMAND [conn37] CMD: drop db55.tmp.mr.coll55_168 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.351-0400 m31200| 2015-07-09T14:15:18.349-0400 I COMMAND [conn37] CMD: drop db55.tmp.mr.coll55_168 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.359-0400 m31200| 2015-07-09T14:15:18.359-0400 I COMMAND [conn65] CMD: drop db55.tmp.mrs.coll55_1436465717_62 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.360-0400 m31102| 2015-07-09T14:15:18.360-0400 I COMMAND [repl writer worker 15] CMD: drop db55.tmp.mrs.coll55_1436465717_62 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.368-0400 m31101| 2015-07-09T14:15:18.368-0400 I COMMAND [repl writer worker 5] CMD: drop db55.tmp.mrs.coll55_1436465717_62 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.371-0400 m31100| 2015-07-09T14:15:18.370-0400 I COMMAND [conn182] command db55.tmp.mrs.coll55_1436465717_57 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.371-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.371-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.371-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.372-0400 m31100| values...., out: "tmp.mrs.coll55_1436465717_57", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 5, w: 5, W: 1 }, timeAcquiringMicros: { r: 50319, w: 79682, W: 7761 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 7, w: 26, R: 12, W: 9 }, timeAcquiringMicros: { r: 27985, w: 256922, R: 22355, W: 38350 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 731ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.374-0400 m31200| 2015-07-09T14:15:18.373-0400 I COMMAND [conn37] command db55.tmp.mrs.coll55_1436465718_64 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.374-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.374-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.374-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.375-0400 m31200| values...., out: "tmp.mrs.coll55_1436465718_64", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 177ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.379-0400 m31201| 2015-07-09T14:15:18.379-0400 I COMMAND [repl writer worker 14] CMD: drop db55.tmp.mrs.coll55_1436465717_62 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.381-0400 m31200| 2015-07-09T14:15:18.381-0400 I COMMAND [conn52] CMD: drop db55.tmp.mr.coll55_169 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.401-0400 m31100| 2015-07-09T14:15:18.401-0400 I COMMAND [conn50] CMD: drop db55.tmp.mrs.coll55_1436465717_63 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.407-0400 m31100| 2015-07-09T14:15:18.407-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_194 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.408-0400 m31100| 2015-07-09T14:15:18.408-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_194 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.409-0400 m31202| 2015-07-09T14:15:18.408-0400 I COMMAND [repl writer worker 2] CMD: drop db55.tmp.mrs.coll55_1436465717_62 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.411-0400 m31100| 2015-07-09T14:15:18.410-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_199 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.415-0400 m31100| 2015-07-09T14:15:18.411-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_194 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.415-0400 m31100| 2015-07-09T14:15:18.413-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_200 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.442-0400 m31100| 2015-07-09T14:15:18.441-0400 I COMMAND [conn50] command db55.tmp.mrs.coll55_1436465717_63 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.442-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.442-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.443-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.443-0400 m31100| values...., out: "tmp.mrs.coll55_1436465717_63", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 1, W: 1 }, timeAcquiringMicros: { r: 65148, w: 36036, W: 2793 } }, Database: { acquireCount: { r: 27, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 8, w: 20, R: 12, W: 9 }, timeAcquiringMicros: { r: 35212, w: 158914, R: 51960, W: 43578 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 633ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.446-0400 m31100| 2015-07-09T14:15:18.445-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_201 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.532-0400 m31100| 2015-07-09T14:15:18.532-0400 I COMMAND [conn45] CMD: drop db55.tmp.mrs.coll55_1436465717_58 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.536-0400 m31200| 2015-07-09T14:15:18.536-0400 I COMMAND [conn52] CMD: drop db55.tmp.mrs.coll55_1436465718_65 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.538-0400 m31100| 2015-07-09T14:15:18.538-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_196 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.539-0400 m31100| 2015-07-09T14:15:18.539-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_196 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.540-0400 m31200| 2015-07-09T14:15:18.540-0400 I COMMAND [conn52] CMD: drop db55.tmp.mr.coll55_169 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.541-0400 m31200| 2015-07-09T14:15:18.540-0400 I COMMAND [conn52] CMD: drop db55.tmp.mr.coll55_169 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.544-0400 m31200| 2015-07-09T14:15:18.543-0400 I COMMAND [conn52] CMD: drop db55.tmp.mr.coll55_169 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.544-0400 m31200| 2015-07-09T14:15:18.544-0400 I COMMAND [conn52] command db55.tmp.mrs.coll55_1436465718_65 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.545-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.545-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.545-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.545-0400 m31200| values...., out: "tmp.mrs.coll55_1436465718_65", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 162ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.550-0400 m31100| 2015-07-09T14:15:18.550-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_196 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.573-0400 m31100| 2015-07-09T14:15:18.573-0400 I COMMAND [conn45] command db55.tmp.mrs.coll55_1436465717_58 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.574-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.574-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.574-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.575-0400 m31100| values...., out: "tmp.mrs.coll55_1436465717_58", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { r: 4, w: 1, W: 1 }, timeAcquiringMicros: { r: 52244, w: 31541, W: 9881 } }, Database: { acquireCount: { r: 27, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 7, w: 20, R: 12, W: 9 }, timeAcquiringMicros: { r: 62487, w: 154637, R: 39312, W: 61048 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 667ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.577-0400 m31100| 2015-07-09T14:15:18.576-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_202 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.687-0400 m31100| 2015-07-09T14:15:18.686-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_199 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.692-0400 m31100| 2015-07-09T14:15:18.691-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_199 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.702-0400 m31100| 2015-07-09T14:15:18.701-0400 I COMMAND [conn182] command db55.map_reduce_reduce1 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.702-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.703-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.703-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.703-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.703-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.705-0400 m31100| }, out: { reduce: "map_reduce_reduce1" } }, inputDB: "db55", shardedOutputCollection: "tmp.mrs.coll55_1436465717_57", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll55_1436465717_57", timeMillis: 647, counts: { input: 1001, emit: 1001, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465718000|83, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll55_1436465717_57", timeMillis: 151, counts: { input: 999, emit: 999, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465717000|111, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1001, emit: 1001, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 999, emit: 999, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:221 locks:{ Global: { acquireCount: { r: 98, w: 67, W: 21 }, acquireWaitCount: { r: 1, w: 2, W: 1 }, timeAcquiringMicros: { r: 5661, w: 11749, W: 3454 } }, Database: { acquireCount: { r: 5, w: 64, W: 4 }, acquireWaitCount: { r: 1, w: 15, W: 4 }, timeAcquiringMicros: { r: 446, w: 141450, W: 12832 } }, Collection: { acquireCount: { r: 5, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 330ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.705-0400 m31100| 2015-07-09T14:15:18.702-0400 I COMMAND [conn15] CMD: drop db55.tmp.mrs.coll55_1436465717_57 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.705-0400 m31200| 2015-07-09T14:15:18.703-0400 I COMMAND [conn18] CMD: drop db55.tmp.mrs.coll55_1436465717_57 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.719-0400 m31101| 2015-07-09T14:15:18.719-0400 I COMMAND [repl writer worker 6] CMD: drop db55.tmp.mr.coll55_199 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.719-0400 m31202| 2015-07-09T14:15:18.719-0400 I COMMAND [repl writer worker 1] CMD: drop db55.tmp.mrs.coll55_1436465717_57 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.720-0400 m31201| 2015-07-09T14:15:18.720-0400 I COMMAND [repl writer worker 6] CMD: drop db55.tmp.mrs.coll55_1436465717_57 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.721-0400 m31102| 2015-07-09T14:15:18.720-0400 I COMMAND [repl writer worker 7] CMD: drop db55.tmp.mr.coll55_199 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.764-0400 m31100| 2015-07-09T14:15:18.763-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_201 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.767-0400 m31102| 2015-07-09T14:15:18.767-0400 I COMMAND [repl writer worker 5] CMD: drop db55.tmp.mrs.coll55_1436465717_57 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:18.772-0400 m31101| 2015-07-09T14:15:18.771-0400 I COMMAND [repl writer worker 5] CMD: drop db55.tmp.mrs.coll55_1436465717_57 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.025-0400 m31100| 2015-07-09T14:15:19.022-0400 I COMMAND [conn50] command db55.tmp.mr.coll55_201 command: drop { drop: "tmp.mr.coll55_201" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:145 locks:{ Global: { acquireCount: { r: 95, w: 66, W: 21 }, acquireWaitCount: { w: 3, W: 1 }, timeAcquiringMicros: { w: 46548, W: 12436 } }, Database: { acquireCount: { r: 4, w: 64, W: 3 }, acquireWaitCount: { w: 17, W: 3 }, timeAcquiringMicros: { w: 104545, W: 32959 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_query 258ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.025-0400 m31100| 2015-07-09T14:15:19.023-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_201 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.025-0400 m31101| 2015-07-09T14:15:19.023-0400 I COMMAND [repl writer worker 3] CMD: drop db55.tmp.mr.coll55_201 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.026-0400 m31100| 2015-07-09T14:15:19.024-0400 I COMMAND [conn182] command db55.$cmd command: listCollections { listCollections: 1.0, filter: { name: "map_reduce_reduce1" } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:209 locks:{ Global: { acquireCount: { r: 2 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 46852 } }, Database: { acquireCount: { R: 1 }, acquireWaitCount: { R: 1 }, timeAcquiringMicros: { R: 257225 } } } protocol:op_command 305ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.026-0400 m31100| 2015-07-09T14:15:19.025-0400 I COMMAND [conn50] command db55.map_reduce_reduce0 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.026-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.027-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.027-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.027-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.027-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.029-0400 m31100| }, out: { reduce: "map_reduce_reduce0" } }, inputDB: "db55", shardedOutputCollection: "tmp.mrs.coll55_1436465717_63", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll55_1436465717_63", timeMillis: 600, counts: { input: 1001, emit: 1001, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465718000|128, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll55_1436465717_63", timeMillis: 211, counts: { input: 999, emit: 999, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465718000|16, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1001, emit: 1001, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 999, emit: 999, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:221 locks:{ Global: { acquireCount: { r: 96, w: 67, W: 21 }, acquireWaitCount: { w: 3, W: 1 }, timeAcquiringMicros: { w: 46548, W: 12436 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 17, W: 4 }, timeAcquiringMicros: { w: 104545, W: 34700 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 579ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.029-0400 m31100| 2015-07-09T14:15:19.025-0400 I COMMAND [conn32] CMD: drop db55.tmp.mrs.coll55_1436465717_63 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.030-0400 m31200| 2015-07-09T14:15:19.027-0400 I COMMAND [conn41] CMD: drop db55.tmp.mr.coll55_170 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.045-0400 m31200| 2015-07-09T14:15:19.045-0400 I COMMAND [conn65] CMD: drop db55.tmp.mrs.coll55_1436465717_63 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.046-0400 m31101| 2015-07-09T14:15:19.045-0400 I COMMAND [repl writer worker 8] CMD: drop db55.tmp.mrs.coll55_1436465717_63 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.048-0400 m31100| 2015-07-09T14:15:19.047-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_203 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.057-0400 m31202| 2015-07-09T14:15:19.056-0400 I COMMAND [repl writer worker 6] CMD: drop db55.tmp.mrs.coll55_1436465717_63 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.057-0400 m31201| 2015-07-09T14:15:19.057-0400 I COMMAND [repl writer worker 2] CMD: drop db55.tmp.mrs.coll55_1436465717_63 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.089-0400 m31102| 2015-07-09T14:15:19.089-0400 I COMMAND [repl writer worker 14] CMD: drop db55.tmp.mr.coll55_201 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.096-0400 m31200| 2015-07-09T14:15:19.096-0400 I COMMAND [conn80] CMD: drop db55.tmp.mr.coll55_171 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.130-0400 m31100| 2015-07-09T14:15:19.129-0400 I COMMAND [conn185] CMD: drop db55.tmp.mrs.coll55_1436465718_64 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.142-0400 m31100| 2015-07-09T14:15:19.141-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_198 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.142-0400 m31100| 2015-07-09T14:15:19.141-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_198 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.143-0400 m31100| 2015-07-09T14:15:19.143-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_198 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.146-0400 m31100| 2015-07-09T14:15:19.146-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_204 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.150-0400 m31102| 2015-07-09T14:15:19.149-0400 I COMMAND [repl writer worker 12] CMD: drop db55.tmp.mrs.coll55_1436465717_63 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.154-0400 m31100| 2015-07-09T14:15:19.153-0400 I COMMAND [conn185] command db55.tmp.mrs.coll55_1436465718_64 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.154-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.154-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.154-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.155-0400 m31100| values...., out: "tmp.mrs.coll55_1436465718_64", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 3, w: 3, W: 1 }, timeAcquiringMicros: { r: 72562, w: 68493, W: 46573 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 6, w: 28, R: 11, W: 7 }, timeAcquiringMicros: { r: 293316, w: 194398, R: 31253, W: 33473 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 957ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.155-0400 m31100| 2015-07-09T14:15:19.153-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_205 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.271-0400 m31100| 2015-07-09T14:15:19.269-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_202 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.274-0400 m31100| 2015-07-09T14:15:19.274-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_202 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.283-0400 m31100| 2015-07-09T14:15:19.283-0400 I COMMAND [conn45] command db55.map_reduce_reduce3 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.283-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.284-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.284-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.284-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.285-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.287-0400 m31100| }, out: { reduce: "map_reduce_reduce3" } }, inputDB: "db55", shardedOutputCollection: "tmp.mrs.coll55_1436465717_58", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll55_1436465717_58", timeMillis: 633, counts: { input: 1001, emit: 1001, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465718000|153, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll55_1436465717_58", timeMillis: 170, counts: { input: 999, emit: 999, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465718000|37, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1001, emit: 1001, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 999, emit: 999, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:221 locks:{ Global: { acquireCount: { r: 96, w: 67, W: 21 }, acquireWaitCount: { w: 4, W: 1 }, timeAcquiringMicros: { w: 141559, W: 62994 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 16, W: 4 }, timeAcquiringMicros: { w: 357014, W: 17734 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 707ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.290-0400 m31100| 2015-07-09T14:15:19.286-0400 I COMMAND [conn15] CMD: drop db55.tmp.mrs.coll55_1436465717_58 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.297-0400 m31102| 2015-07-09T14:15:19.297-0400 I COMMAND [repl writer worker 11] CMD: drop db55.tmp.mr.coll55_202 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.319-0400 m31200| 2015-07-09T14:15:19.318-0400 I COMMAND [conn18] CMD: drop db55.tmp.mrs.coll55_1436465717_58 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.331-0400 m31202| 2015-07-09T14:15:19.330-0400 I COMMAND [repl writer worker 1] CMD: drop db55.tmp.mrs.coll55_1436465717_58 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.332-0400 m31201| 2015-07-09T14:15:19.330-0400 I COMMAND [repl writer worker 6] CMD: drop db55.tmp.mrs.coll55_1436465717_58 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.334-0400 m31200| 2015-07-09T14:15:19.333-0400 I COMMAND [conn41] CMD: drop db55.tmp.mrs.coll55_1436465719_59 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.340-0400 m31102| 2015-07-09T14:15:19.339-0400 I COMMAND [repl writer worker 12] CMD: drop db55.tmp.mrs.coll55_1436465717_58 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.347-0400 m31200| 2015-07-09T14:15:19.346-0400 I COMMAND [conn41] CMD: drop db55.tmp.mr.coll55_170 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.347-0400 m31200| 2015-07-09T14:15:19.347-0400 I COMMAND [conn41] CMD: drop db55.tmp.mr.coll55_170 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.347-0400 m31200| 2015-07-09T14:15:19.347-0400 I COMMAND [conn32] CMD: drop db55.tmp.mr.coll55_172 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.353-0400 m31100| 2015-07-09T14:15:19.352-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_206 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.382-0400 m31200| 2015-07-09T14:15:19.381-0400 I COMMAND [conn41] CMD: drop db55.tmp.mr.coll55_170 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.428-0400 m31200| 2015-07-09T14:15:19.427-0400 I COMMAND [conn41] command db55.tmp.mrs.coll55_1436465719_59 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.428-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.428-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.428-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.430-0400 m31200| values...., out: "tmp.mrs.coll55_1436465719_59", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 1557 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 1, w: 2, R: 6, W: 4 }, timeAcquiringMicros: { r: 9243, w: 13572, R: 73948, W: 50325 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 401ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.458-0400 m31200| 2015-07-09T14:15:19.457-0400 I COMMAND [conn80] CMD: drop db55.tmp.mrs.coll55_1436465719_66 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.469-0400 m31200| 2015-07-09T14:15:19.467-0400 I COMMAND [conn80] CMD: drop db55.tmp.mr.coll55_171 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.469-0400 m31200| 2015-07-09T14:15:19.467-0400 I COMMAND [conn80] CMD: drop db55.tmp.mr.coll55_171 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.469-0400 m31200| 2015-07-09T14:15:19.468-0400 I COMMAND [conn80] CMD: drop db55.tmp.mr.coll55_171 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.470-0400 m31200| 2015-07-09T14:15:19.470-0400 I COMMAND [conn80] command db55.tmp.mrs.coll55_1436465719_66 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.471-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.471-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.471-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.472-0400 m31200| values...., out: "tmp.mrs.coll55_1436465719_66", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 12936 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 3, w: 14, R: 1, W: 3 }, timeAcquiringMicros: { r: 29293, w: 85888, R: 1578, W: 10693 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 375ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.555-0400 m31101| 2015-07-09T14:15:19.554-0400 I COMMAND [repl writer worker 9] CMD: drop db55.tmp.mr.coll55_202 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.568-0400 m31200| 2015-07-09T14:15:19.567-0400 I COMMAND [conn32] CMD: drop db55.tmp.mrs.coll55_1436465719_60 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.576-0400 m31200| 2015-07-09T14:15:19.576-0400 I COMMAND [conn32] CMD: drop db55.tmp.mr.coll55_172 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.577-0400 m31100| 2015-07-09T14:15:19.576-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_205 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.578-0400 m31200| 2015-07-09T14:15:19.576-0400 I COMMAND [conn32] CMD: drop db55.tmp.mr.coll55_172 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.578-0400 m31200| 2015-07-09T14:15:19.578-0400 I COMMAND [conn32] CMD: drop db55.tmp.mr.coll55_172 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.579-0400 m31101| 2015-07-09T14:15:19.578-0400 I COMMAND [repl writer worker 8] CMD: drop db55.tmp.mrs.coll55_1436465717_58 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.579-0400 m31200| 2015-07-09T14:15:19.578-0400 I COMMAND [conn32] command db55.tmp.mrs.coll55_1436465719_60 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.579-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.579-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.579-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.581-0400 m31200| values...., out: "tmp.mrs.coll55_1436465719_60", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 13051 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 1, w: 1, R: 4, W: 5 }, timeAcquiringMicros: { r: 500, w: 3304, R: 2721, W: 2686 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 240ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.590-0400 m31100| 2015-07-09T14:15:19.589-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_205 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.599-0400 m31100| 2015-07-09T14:15:19.599-0400 I COMMAND [conn185] command db55.map_reduce_reduce2 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.600-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.600-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.600-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.601-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.601-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.603-0400 m31100| }, out: { reduce: "map_reduce_reduce2" } }, inputDB: "db55", shardedOutputCollection: "tmp.mrs.coll55_1436465718_64", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll55_1436465718_64", timeMillis: 945, counts: { input: 1001, emit: 1001, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465719000|17, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll55_1436465718_64", timeMillis: 152, counts: { input: 999, emit: 999, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465718000|60, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1001, emit: 1001, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 999, emit: 999, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:221 locks:{ Global: { acquireCount: { r: 96, w: 67, W: 21 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 5561 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 20, W: 4 }, timeAcquiringMicros: { w: 192468, W: 23253 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 445ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.603-0400 m31100| 2015-07-09T14:15:19.599-0400 I COMMAND [conn32] CMD: drop db55.tmp.mrs.coll55_1436465718_64 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.609-0400 m31200| 2015-07-09T14:15:19.608-0400 I COMMAND [conn65] CMD: drop db55.tmp.mrs.coll55_1436465718_64 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.613-0400 m31202| 2015-07-09T14:15:19.612-0400 I COMMAND [repl writer worker 6] CMD: drop db55.tmp.mrs.coll55_1436465718_64 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.615-0400 m31201| 2015-07-09T14:15:19.613-0400 I COMMAND [repl writer worker 6] CMD: drop db55.tmp.mrs.coll55_1436465718_64 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.617-0400 m31100| 2015-07-09T14:15:19.617-0400 I COMMAND [conn176] CMD: drop db55.tmp.mrs.coll55_1436465718_65 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.619-0400 m31200| 2015-07-09T14:15:19.618-0400 I COMMAND [conn37] CMD: drop db55.tmp.mr.coll55_173 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.627-0400 m31101| 2015-07-09T14:15:19.626-0400 I COMMAND [repl writer worker 6] CMD: drop db55.tmp.mr.coll55_205 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.630-0400 m31100| 2015-07-09T14:15:19.629-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_200 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.630-0400 m31100| 2015-07-09T14:15:19.629-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_200 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.631-0400 m31101| 2015-07-09T14:15:19.630-0400 I COMMAND [repl writer worker 7] CMD: drop db55.tmp.mrs.coll55_1436465718_64 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.631-0400 m31100| 2015-07-09T14:15:19.631-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_200 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.632-0400 m31100| 2015-07-09T14:15:19.632-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_207 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.643-0400 m31100| 2015-07-09T14:15:19.643-0400 I COMMAND [conn176] command db55.tmp.mrs.coll55_1436465718_65 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.644-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.644-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.645-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.646-0400 m31100| values...., out: "tmp.mrs.coll55_1436465718_65", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 4, W: 1 }, timeAcquiringMicros: { r: 68257, w: 180136, W: 9099 } }, Database: { acquireCount: { r: 27, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 7, w: 37, R: 12, W: 7 }, timeAcquiringMicros: { r: 69373, w: 597080, R: 45003, W: 42097 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 1262ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.646-0400 m31100| 2015-07-09T14:15:19.644-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_208 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.743-0400 m31100| 2015-07-09T14:15:19.743-0400 I COMMAND [conn182] CMD: drop db55.tmp.mrs.coll55_1436465719_59 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.749-0400 m31100| 2015-07-09T14:15:19.749-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_203 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.750-0400 m31100| 2015-07-09T14:15:19.750-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_203 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.752-0400 m31100| 2015-07-09T14:15:19.752-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_203 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.756-0400 m31102| 2015-07-09T14:15:19.755-0400 I COMMAND [repl writer worker 12] CMD: drop db55.tmp.mr.coll55_205 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.761-0400 m31102| 2015-07-09T14:15:19.760-0400 I COMMAND [repl writer worker 15] CMD: drop db55.tmp.mrs.coll55_1436465718_64 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.770-0400 m31100| 2015-07-09T14:15:19.769-0400 I COMMAND [conn182] command db55.tmp.mrs.coll55_1436465719_59 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.770-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.770-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.770-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.771-0400 m31100| values...., out: "tmp.mrs.coll55_1436465719_59", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 4, w: 3, W: 1 }, timeAcquiringMicros: { r: 129608, w: 37913, W: 79 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 8, w: 21, R: 12, W: 7 }, timeAcquiringMicros: { r: 38965, w: 192804, R: 86125, W: 28908 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 742ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.773-0400 m31100| 2015-07-09T14:15:19.773-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_209 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.786-0400 m31200| 2015-07-09T14:15:19.786-0400 I COMMAND [conn37] CMD: drop db55.tmp.mrs.coll55_1436465719_67 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.796-0400 m31200| 2015-07-09T14:15:19.795-0400 I COMMAND [conn37] CMD: drop db55.tmp.mr.coll55_173 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.797-0400 m31200| 2015-07-09T14:15:19.797-0400 I COMMAND [conn37] CMD: drop db55.tmp.mr.coll55_173 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.799-0400 m31200| 2015-07-09T14:15:19.798-0400 I COMMAND [conn37] CMD: drop db55.tmp.mr.coll55_173 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.799-0400 m31200| 2015-07-09T14:15:19.798-0400 I COMMAND [conn37] command db55.tmp.mrs.coll55_1436465719_67 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.799-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.799-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.800-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.800-0400 m31200| values...., out: "tmp.mrs.coll55_1436465719_67", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 181ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.856-0400 m31100| 2015-07-09T14:15:19.856-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_208 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.862-0400 m31100| 2015-07-09T14:15:19.861-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_208 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.865-0400 m31101| 2015-07-09T14:15:19.864-0400 I COMMAND [repl writer worker 1] CMD: drop db55.tmp.mr.coll55_208 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.870-0400 m31102| 2015-07-09T14:15:19.869-0400 I COMMAND [repl writer worker 7] CMD: drop db55.tmp.mr.coll55_208 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.871-0400 m31100| 2015-07-09T14:15:19.870-0400 I COMMAND [conn176] command db55.map_reduce_reduce4 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.871-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.871-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.871-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.871-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.871-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.872-0400 m31100| }, out: { reduce: "map_reduce_reduce4" } }, inputDB: "db55", shardedOutputCollection: "tmp.mrs.coll55_1436465718_65", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll55_1436465718_65", timeMillis: 1248, counts: { input: 1001, emit: 1001, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465719000|110, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll55_1436465718_65", timeMillis: 159, counts: { input: 999, emit: 999, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465718000|83, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1001, emit: 1001, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 999, emit: 999, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:221 locks:{ Global: { acquireCount: { r: 96, w: 67, W: 21 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 6257, W: 1877 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 13, W: 4 }, timeAcquiringMicros: { w: 88685, W: 33647 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 226ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.873-0400 m31100| 2015-07-09T14:15:19.873-0400 I COMMAND [conn32] CMD: drop db55.tmp.mrs.coll55_1436465718_65 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.875-0400 m31200| 2015-07-09T14:15:19.875-0400 I COMMAND [conn65] CMD: drop db55.tmp.mrs.coll55_1436465718_65 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.879-0400 m31201| 2015-07-09T14:15:19.878-0400 I COMMAND [repl writer worker 2] CMD: drop db55.tmp.mrs.coll55_1436465718_65 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.879-0400 m31202| 2015-07-09T14:15:19.879-0400 I COMMAND [repl writer worker 8] CMD: drop db55.tmp.mrs.coll55_1436465718_65 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.883-0400 m31200| 2015-07-09T14:15:19.883-0400 I COMMAND [conn52] CMD: drop db55.tmp.mr.coll55_174 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.884-0400 m31101| 2015-07-09T14:15:19.884-0400 I COMMAND [repl writer worker 7] CMD: drop db55.tmp.mrs.coll55_1436465718_65 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.885-0400 m31100| 2015-07-09T14:15:19.885-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_210 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.886-0400 m31102| 2015-07-09T14:15:19.886-0400 I COMMAND [repl writer worker 10] CMD: drop db55.tmp.mrs.coll55_1436465718_65 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.888-0400 m31100| 2015-07-09T14:15:19.888-0400 I COMMAND [conn50] CMD: drop db55.tmp.mrs.coll55_1436465719_66 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.895-0400 m31100| 2015-07-09T14:15:19.894-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_204 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.895-0400 m31100| 2015-07-09T14:15:19.895-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_204 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.924-0400 m31100| 2015-07-09T14:15:19.924-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_204 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.926-0400 m31100| 2015-07-09T14:15:19.926-0400 I COMMAND [conn50] command db55.tmp.mrs.coll55_1436465719_66 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.927-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.927-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.928-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.929-0400 m31100| values...., out: "tmp.mrs.coll55_1436465719_66", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 5, W: 1 }, timeAcquiringMicros: { r: 83763, w: 84104, W: 815 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 8, w: 27, R: 12, W: 9 }, timeAcquiringMicros: { r: 28964, w: 205514, R: 93024, W: 99593 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 834ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.929-0400 m31100| 2015-07-09T14:15:19.928-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_211 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.982-0400 m31100| 2015-07-09T14:15:19.982-0400 I COMMAND [conn45] CMD: drop db55.tmp.mrs.coll55_1436465719_60 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.987-0400 m31100| 2015-07-09T14:15:19.986-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_206 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.988-0400 m31100| 2015-07-09T14:15:19.987-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_206 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.988-0400 m31100| 2015-07-09T14:15:19.988-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_206 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.998-0400 m31100| 2015-07-09T14:15:19.998-0400 I COMMAND [conn45] command db55.tmp.mrs.coll55_1436465719_60 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.998-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.998-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.999-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:19.999-0400 m31100| values...., out: "tmp.mrs.coll55_1436465719_60", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 4, w: 1, W: 1 }, timeAcquiringMicros: { r: 77644, w: 7306, W: 5990 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 6, w: 33, R: 12, W: 7 }, timeAcquiringMicros: { r: 27752, w: 220290, R: 41755, W: 50418 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 660ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.000-0400 m31100| 2015-07-09T14:15:20.000-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_212 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.032-0400 m31200| 2015-07-09T14:15:20.032-0400 I COMMAND [conn52] CMD: drop db55.tmp.mrs.coll55_1436465719_68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.038-0400 m31200| 2015-07-09T14:15:20.037-0400 I COMMAND [conn52] CMD: drop db55.tmp.mr.coll55_174 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.038-0400 m31200| 2015-07-09T14:15:20.038-0400 I COMMAND [conn52] CMD: drop db55.tmp.mr.coll55_174 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.039-0400 m31200| 2015-07-09T14:15:20.039-0400 I COMMAND [conn52] CMD: drop db55.tmp.mr.coll55_174 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.039-0400 m31100| 2015-07-09T14:15:20.039-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_209 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.042-0400 m31200| 2015-07-09T14:15:20.042-0400 I COMMAND [conn52] command db55.tmp.mrs.coll55_1436465719_68 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.043-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.043-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.043-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.044-0400 m31200| values...., out: "tmp.mrs.coll55_1436465719_68", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 159ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.047-0400 m31100| 2015-07-09T14:15:20.046-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_209 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.052-0400 m31100| 2015-07-09T14:15:20.052-0400 I COMMAND [conn182] command db55.map_reduce_reduce1 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.052-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.053-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.053-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.053-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.053-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.054-0400 m31100| }, out: { reduce: "map_reduce_reduce1" } }, inputDB: "db55", shardedOutputCollection: "tmp.mrs.coll55_1436465719_59", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll55_1436465719_59", timeMillis: 723, counts: { input: 1001, emit: 1001, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465719000|141, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll55_1436465719_59", timeMillis: 319, counts: { input: 999, emit: 999, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465719000|25, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1001, emit: 1001, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 999, emit: 999, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:221 locks:{ Global: { acquireCount: { r: 96, w: 67, W: 21 }, acquireWaitCount: { w: 4, W: 1 }, timeAcquiringMicros: { w: 48456, W: 13837 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 13, W: 4 }, timeAcquiringMicros: { w: 97455, W: 10537 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 279ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.054-0400 m31101| 2015-07-09T14:15:20.053-0400 I COMMAND [repl writer worker 14] CMD: drop db55.tmp.mr.coll55_209 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.054-0400 m31100| 2015-07-09T14:15:20.052-0400 I COMMAND [conn15] CMD: drop db55.tmp.mrs.coll55_1436465719_59 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.061-0400 m31102| 2015-07-09T14:15:20.060-0400 I COMMAND [repl writer worker 12] CMD: drop db55.tmp.mr.coll55_209 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.069-0400 m31200| 2015-07-09T14:15:20.068-0400 I COMMAND [conn18] CMD: drop db55.tmp.mrs.coll55_1436465719_59 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.074-0400 m31202| 2015-07-09T14:15:20.072-0400 I COMMAND [repl writer worker 8] CMD: drop db55.tmp.mrs.coll55_1436465719_59 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.075-0400 m31201| 2015-07-09T14:15:20.072-0400 I COMMAND [repl writer worker 2] CMD: drop db55.tmp.mrs.coll55_1436465719_59 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.082-0400 m31200| 2015-07-09T14:15:20.082-0400 I COMMAND [conn41] CMD: drop db55.tmp.mr.coll55_175 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.084-0400 m31102| 2015-07-09T14:15:20.083-0400 I COMMAND [repl writer worker 4] CMD: drop db55.tmp.mrs.coll55_1436465719_59 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.086-0400 m31101| 2015-07-09T14:15:20.086-0400 I COMMAND [repl writer worker 7] CMD: drop db55.tmp.mrs.coll55_1436465719_59 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.088-0400 m31100| 2015-07-09T14:15:20.087-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_213 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.210-0400 m31100| 2015-07-09T14:15:20.210-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_211 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.213-0400 m31100| 2015-07-09T14:15:20.213-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_211 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.222-0400 m31101| 2015-07-09T14:15:20.222-0400 I COMMAND [repl writer worker 2] CMD: drop db55.tmp.mr.coll55_211 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.223-0400 m31100| 2015-07-09T14:15:20.223-0400 I COMMAND [conn185] CMD: drop db55.tmp.mrs.coll55_1436465719_67 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.224-0400 m31100| 2015-07-09T14:15:20.224-0400 I COMMAND [conn50] command db55.map_reduce_reduce0 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.225-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.225-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.225-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.225-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.226-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.227-0400 m31100| }, out: { reduce: "map_reduce_reduce0" } }, inputDB: "db55", shardedOutputCollection: "tmp.mrs.coll55_1436465719_66", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll55_1436465719_66", timeMillis: 803, counts: { input: 1001, emit: 1001, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465719000|221, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll55_1436465719_66", timeMillis: 372, counts: { input: 999, emit: 999, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465719000|47, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1001, emit: 1001, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 999, emit: 999, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:221 locks:{ Global: { acquireCount: { r: 96, w: 67, W: 21 }, acquireWaitCount: { w: 4, W: 1 }, timeAcquiringMicros: { w: 50738, W: 12041 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 14, W: 3 }, timeAcquiringMicros: { w: 98910, W: 32047 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 295ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.227-0400 m31100| 2015-07-09T14:15:20.224-0400 I COMMAND [conn32] CMD: drop db55.tmp.mrs.coll55_1436465719_66 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.227-0400 m31102| 2015-07-09T14:15:20.226-0400 I COMMAND [repl writer worker 15] CMD: drop db55.tmp.mr.coll55_211 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.228-0400 m31100| 2015-07-09T14:15:20.228-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_207 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.231-0400 m31100| 2015-07-09T14:15:20.229-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_207 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.232-0400 m31200| 2015-07-09T14:15:20.232-0400 I COMMAND [conn65] CMD: drop db55.tmp.mrs.coll55_1436465719_66 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.234-0400 m31100| 2015-07-09T14:15:20.234-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_207 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.236-0400 m31101| 2015-07-09T14:15:20.235-0400 I COMMAND [repl writer worker 11] CMD: drop db55.tmp.mrs.coll55_1436465719_66 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.240-0400 m31201| 2015-07-09T14:15:20.239-0400 I COMMAND [repl writer worker 5] CMD: drop db55.tmp.mrs.coll55_1436465719_66 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.240-0400 m31202| 2015-07-09T14:15:20.239-0400 I COMMAND [repl writer worker 14] CMD: drop db55.tmp.mrs.coll55_1436465719_66 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.241-0400 m31102| 2015-07-09T14:15:20.240-0400 I COMMAND [repl writer worker 13] CMD: drop db55.tmp.mrs.coll55_1436465719_66 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.246-0400 m31200| 2015-07-09T14:15:20.246-0400 I COMMAND [conn41] CMD: drop db55.tmp.mrs.coll55_1436465720_61 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.251-0400 m31200| 2015-07-09T14:15:20.250-0400 I COMMAND [conn41] CMD: drop db55.tmp.mr.coll55_175 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.251-0400 m31200| 2015-07-09T14:15:20.250-0400 I COMMAND [conn41] CMD: drop db55.tmp.mr.coll55_175 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.252-0400 m31200| 2015-07-09T14:15:20.251-0400 I COMMAND [conn41] CMD: drop db55.tmp.mr.coll55_175 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.252-0400 m31200| 2015-07-09T14:15:20.252-0400 I COMMAND [conn41] command db55.tmp.mrs.coll55_1436465720_61 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.252-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.252-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.252-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.253-0400 m31200| values...., out: "tmp.mrs.coll55_1436465720_61", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 4054 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 170ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.265-0400 m31100| 2015-07-09T14:15:20.264-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_212 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.265-0400 m31100| 2015-07-09T14:15:20.265-0400 I COMMAND [conn185] command db55.tmp.mrs.coll55_1436465719_67 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.266-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.266-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.266-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.266-0400 m31100| values...., out: "tmp.mrs.coll55_1436465719_67", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 4, w: 6, W: 1 }, timeAcquiringMicros: { r: 45387, w: 121530, W: 1073 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 5, w: 30, R: 12, W: 9 }, timeAcquiringMicros: { r: 33073, w: 177175, R: 27472, W: 44844 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 646ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.268-0400 m31200| 2015-07-09T14:15:20.267-0400 I COMMAND [conn80] CMD: drop db55.tmp.mr.coll55_176 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.269-0400 m31100| 2015-07-09T14:15:20.269-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_215 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.274-0400 m31100| 2015-07-09T14:15:20.274-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_212 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.275-0400 m31100| 2015-07-09T14:15:20.275-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_214 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.278-0400 m31102| 2015-07-09T14:15:20.277-0400 I COMMAND [repl writer worker 3] CMD: drop db55.tmp.mr.coll55_212 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.279-0400 m31101| 2015-07-09T14:15:20.278-0400 I COMMAND [repl writer worker 14] CMD: drop db55.tmp.mr.coll55_212 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.281-0400 m31100| 2015-07-09T14:15:20.280-0400 I COMMAND [conn45] command db55.map_reduce_reduce3 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.281-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.281-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.281-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.281-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.281-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.284-0400 m31100| }, out: { reduce: "map_reduce_reduce3" } }, inputDB: "db55", shardedOutputCollection: "tmp.mrs.coll55_1436465719_60", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll55_1436465719_60", timeMillis: 648, counts: { input: 1001, emit: 1001, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465719000|238, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll55_1436465719_60", timeMillis: 238, counts: { input: 999, emit: 999, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465719000|68, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1001, emit: 1001, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 999, emit: 999, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:221 locks:{ Global: { acquireCount: { r: 96, w: 67, W: 21 }, acquireWaitCount: { w: 4, W: 1 }, timeAcquiringMicros: { w: 80444, W: 149 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 18, W: 4 }, timeAcquiringMicros: { w: 91115, W: 20110 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 280ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.284-0400 m31100| 2015-07-09T14:15:20.282-0400 I COMMAND [conn15] CMD: drop db55.tmp.mrs.coll55_1436465719_60 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.289-0400 m31200| 2015-07-09T14:15:20.289-0400 I COMMAND [conn18] CMD: drop db55.tmp.mrs.coll55_1436465719_60 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.299-0400 m31101| 2015-07-09T14:15:20.299-0400 I COMMAND [repl writer worker 1] CMD: drop db55.tmp.mrs.coll55_1436465719_60 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.302-0400 m31102| 2015-07-09T14:15:20.302-0400 I COMMAND [repl writer worker 8] CMD: drop db55.tmp.mrs.coll55_1436465719_60 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.308-0400 m31200| 2015-07-09T14:15:20.308-0400 I COMMAND [conn32] CMD: drop db55.tmp.mr.coll55_177 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.311-0400 m31202| 2015-07-09T14:15:20.310-0400 I COMMAND [repl writer worker 14] CMD: drop db55.tmp.mrs.coll55_1436465719_60 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.315-0400 m31201| 2015-07-09T14:15:20.313-0400 I COMMAND [repl writer worker 5] CMD: drop db55.tmp.mrs.coll55_1436465719_60 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.330-0400 m31100| 2015-07-09T14:15:20.330-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_216 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.444-0400 m31200| 2015-07-09T14:15:20.444-0400 I COMMAND [conn80] CMD: drop db55.tmp.mrs.coll55_1436465720_69 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.450-0400 m31200| 2015-07-09T14:15:20.449-0400 I COMMAND [conn80] CMD: drop db55.tmp.mr.coll55_176 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.450-0400 m31200| 2015-07-09T14:15:20.450-0400 I COMMAND [conn80] CMD: drop db55.tmp.mr.coll55_176 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.451-0400 m31200| 2015-07-09T14:15:20.450-0400 I COMMAND [conn80] CMD: drop db55.tmp.mr.coll55_176 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.451-0400 m31200| 2015-07-09T14:15:20.450-0400 I COMMAND [conn80] command db55.tmp.mrs.coll55_1436465720_69 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.451-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.451-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.451-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.452-0400 m31200| values...., out: "tmp.mrs.coll55_1436465720_69", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 1, w: 2, R: 4 }, timeAcquiringMicros: { r: 3438, w: 5798, R: 35339 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 183ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.465-0400 m31100| 2015-07-09T14:15:20.463-0400 I COMMAND [conn176] CMD: drop db55.tmp.mrs.coll55_1436465719_68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.468-0400 m31100| 2015-07-09T14:15:20.467-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_210 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.468-0400 m31100| 2015-07-09T14:15:20.468-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_210 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.475-0400 m31200| 2015-07-09T14:15:20.474-0400 I COMMAND [conn32] CMD: drop db55.tmp.mrs.coll55_1436465720_62 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.476-0400 m31100| 2015-07-09T14:15:20.476-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_210 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.481-0400 m31200| 2015-07-09T14:15:20.481-0400 I COMMAND [conn32] CMD: drop db55.tmp.mr.coll55_177 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.482-0400 m31200| 2015-07-09T14:15:20.481-0400 I COMMAND [conn32] CMD: drop db55.tmp.mr.coll55_177 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.482-0400 m31200| 2015-07-09T14:15:20.482-0400 I COMMAND [conn32] CMD: drop db55.tmp.mr.coll55_177 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.483-0400 m31200| 2015-07-09T14:15:20.482-0400 I COMMAND [conn32] command db55.tmp.mrs.coll55_1436465720_62 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.483-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.483-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.483-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.484-0400 m31200| values...., out: "tmp.mrs.coll55_1436465720_62", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 4478, W: 73 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { w: 2, R: 1, W: 2 }, timeAcquiringMicros: { w: 9651, R: 243, W: 11235 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 174ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.500-0400 m31100| 2015-07-09T14:15:20.500-0400 I COMMAND [conn176] command db55.tmp.mrs.coll55_1436465719_68 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.501-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.501-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.501-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.501-0400 m31100| values...., out: "tmp.mrs.coll55_1436465719_68", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 5, W: 1 }, timeAcquiringMicros: { r: 35289, w: 82283, W: 1103 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 11, w: 25, R: 12, W: 9 }, timeAcquiringMicros: { r: 47288, w: 185503, R: 45678, W: 39481 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 617ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.502-0400 m31100| 2015-07-09T14:15:20.502-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_217 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.582-0400 m31100| 2015-07-09T14:15:20.581-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_215 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.584-0400 m31100| 2015-07-09T14:15:20.584-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_215 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.591-0400 m31101| 2015-07-09T14:15:20.591-0400 I COMMAND [repl writer worker 15] CMD: drop db55.tmp.mr.coll55_215 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.591-0400 m31102| 2015-07-09T14:15:20.591-0400 I COMMAND [repl writer worker 9] CMD: drop db55.tmp.mr.coll55_215 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.593-0400 m31100| 2015-07-09T14:15:20.592-0400 I COMMAND [conn185] command db55.map_reduce_reduce2 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.593-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.593-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.593-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.593-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.594-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.595-0400 m31100| }, out: { reduce: "map_reduce_reduce2" } }, inputDB: "db55", shardedOutputCollection: "tmp.mrs.coll55_1436465719_67", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll55_1436465719_67", timeMillis: 610, counts: { input: 1001, emit: 1001, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465720000|100, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll55_1436465719_67", timeMillis: 179, counts: { input: 999, emit: 999, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465719000|91, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1001, emit: 1001, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 999, emit: 999, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:221 locks:{ Global: { acquireCount: { r: 96, w: 67, W: 21 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 3837, W: 136 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 20, W: 4 }, timeAcquiringMicros: { w: 178349, W: 42083 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 323ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.596-0400 m31100| 2015-07-09T14:15:20.594-0400 I COMMAND [conn32] CMD: drop db55.tmp.mrs.coll55_1436465719_67 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.598-0400 m31200| 2015-07-09T14:15:20.598-0400 I COMMAND [conn65] CMD: drop db55.tmp.mrs.coll55_1436465719_67 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.602-0400 m31100| 2015-07-09T14:15:20.602-0400 I COMMAND [conn182] CMD: drop db55.tmp.mrs.coll55_1436465720_61 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.603-0400 m31102| 2015-07-09T14:15:20.603-0400 I COMMAND [repl writer worker 12] CMD: drop db55.tmp.mrs.coll55_1436465719_67 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.604-0400 m31101| 2015-07-09T14:15:20.604-0400 I COMMAND [repl writer worker 7] CMD: drop db55.tmp.mrs.coll55_1436465719_67 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.606-0400 m31201| 2015-07-09T14:15:20.606-0400 I COMMAND [repl writer worker 11] CMD: drop db55.tmp.mrs.coll55_1436465719_67 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.607-0400 m31202| 2015-07-09T14:15:20.607-0400 I COMMAND [repl writer worker 9] CMD: drop db55.tmp.mrs.coll55_1436465719_67 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.608-0400 m31100| 2015-07-09T14:15:20.607-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_213 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.611-0400 m31100| 2015-07-09T14:15:20.608-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_213 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.611-0400 m31200| 2015-07-09T14:15:20.611-0400 I COMMAND [conn37] CMD: drop db55.tmp.mr.coll55_178 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.612-0400 m31100| 2015-07-09T14:15:20.612-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_213 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.617-0400 m31100| 2015-07-09T14:15:20.616-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_218 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.620-0400 m31100| 2015-07-09T14:15:20.619-0400 I COMMAND [conn182] command db55.tmp.mrs.coll55_1436465720_61 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.621-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.621-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.621-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.622-0400 m31100| values...., out: "tmp.mrs.coll55_1436465720_61", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 3, w: 2, W: 1 }, timeAcquiringMicros: { r: 54092, w: 42997, W: 1219 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 7, w: 22, R: 12, W: 9 }, timeAcquiringMicros: { r: 17174, w: 179770, R: 47770, W: 26244 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 537ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.622-0400 m31100| 2015-07-09T14:15:20.620-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_219 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.741-0400 m31200| 2015-07-09T14:15:20.740-0400 I COMMAND [conn37] CMD: drop db55.tmp.mrs.coll55_1436465720_70 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.746-0400 m31200| 2015-07-09T14:15:20.746-0400 I COMMAND [conn37] CMD: drop db55.tmp.mr.coll55_178 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.747-0400 m31200| 2015-07-09T14:15:20.746-0400 I COMMAND [conn37] CMD: drop db55.tmp.mr.coll55_178 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.748-0400 m31200| 2015-07-09T14:15:20.747-0400 I COMMAND [conn37] CMD: drop db55.tmp.mr.coll55_178 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.748-0400 m31200| 2015-07-09T14:15:20.748-0400 I COMMAND [conn37] command db55.tmp.mrs.coll55_1436465720_70 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.748-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.748-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.748-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.748-0400 m31200| values...., out: "tmp.mrs.coll55_1436465720_70", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 137ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.750-0400 m31100| 2015-07-09T14:15:20.749-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_217 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.758-0400 m31100| 2015-07-09T14:15:20.757-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_217 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.759-0400 m31101| 2015-07-09T14:15:20.759-0400 I COMMAND [repl writer worker 13] CMD: drop db55.tmp.mr.coll55_217 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.763-0400 m31100| 2015-07-09T14:15:20.763-0400 I COMMAND [conn176] command db55.map_reduce_reduce4 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.764-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.764-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.764-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.764-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.764-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.765-0400 m31100| }, out: { reduce: "map_reduce_reduce4" } }, inputDB: "db55", shardedOutputCollection: "tmp.mrs.coll55_1436465719_68", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll55_1436465719_68", timeMillis: 585, counts: { input: 1001, emit: 1001, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465720000|160, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll55_1436465719_68", timeMillis: 155, counts: { input: 999, emit: 999, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465720000|21, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1001, emit: 1001, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 999, emit: 999, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:221 locks:{ Global: { acquireCount: { r: 96, w: 67, W: 21 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 45243, W: 1285 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 15, W: 3 }, timeAcquiringMicros: { w: 125509, W: 6184 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 261ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.765-0400 m31102| 2015-07-09T14:15:20.763-0400 I COMMAND [repl writer worker 7] CMD: drop db55.tmp.mr.coll55_217 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.765-0400 m31100| 2015-07-09T14:15:20.763-0400 I COMMAND [conn32] CMD: drop db55.tmp.mrs.coll55_1436465719_68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.770-0400 m31200| 2015-07-09T14:15:20.769-0400 I COMMAND [conn65] CMD: drop db55.tmp.mrs.coll55_1436465719_68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.772-0400 m31101| 2015-07-09T14:15:20.772-0400 I COMMAND [repl writer worker 7] CMD: drop db55.tmp.mrs.coll55_1436465719_68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.773-0400 m31202| 2015-07-09T14:15:20.773-0400 I COMMAND [repl writer worker 3] CMD: drop db55.tmp.mrs.coll55_1436465719_68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.773-0400 m31201| 2015-07-09T14:15:20.773-0400 I COMMAND [repl writer worker 1] CMD: drop db55.tmp.mrs.coll55_1436465719_68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.773-0400 m31102| 2015-07-09T14:15:20.773-0400 I COMMAND [repl writer worker 0] CMD: drop db55.tmp.mrs.coll55_1436465719_68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.778-0400 m31200| 2015-07-09T14:15:20.778-0400 I COMMAND [conn52] CMD: drop db55.tmp.mr.coll55_179 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.779-0400 m31100| 2015-07-09T14:15:20.778-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_220 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.873-0400 m31100| 2015-07-09T14:15:20.872-0400 I COMMAND [conn50] CMD: drop db55.tmp.mrs.coll55_1436465720_69 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.875-0400 m31100| 2015-07-09T14:15:20.875-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_214 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.876-0400 m31100| 2015-07-09T14:15:20.875-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_214 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.894-0400 m31200| 2015-07-09T14:15:20.894-0400 I COMMAND [conn52] CMD: drop db55.tmp.mrs.coll55_1436465720_71 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.899-0400 m31200| 2015-07-09T14:15:20.898-0400 I COMMAND [conn52] CMD: drop db55.tmp.mr.coll55_179 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.899-0400 m31200| 2015-07-09T14:15:20.898-0400 I COMMAND [conn52] CMD: drop db55.tmp.mr.coll55_179 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.899-0400 m31200| 2015-07-09T14:15:20.899-0400 I COMMAND [conn52] CMD: drop db55.tmp.mr.coll55_179 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.899-0400 m31200| 2015-07-09T14:15:20.899-0400 I COMMAND [conn52] command db55.tmp.mrs.coll55_1436465720_71 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.899-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.899-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.900-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.900-0400 m31200| values...., out: "tmp.mrs.coll55_1436465720_71", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 121ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.908-0400 m31100| 2015-07-09T14:15:20.908-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_219 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.909-0400 m31100| 2015-07-09T14:15:20.909-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_214 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.919-0400 m31100| 2015-07-09T14:15:20.919-0400 I COMMAND [conn182] CMD: drop db55.tmp.mr.coll55_219 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.924-0400 m31102| 2015-07-09T14:15:20.924-0400 I COMMAND [repl writer worker 8] CMD: drop db55.tmp.mr.coll55_219 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.925-0400 m31100| 2015-07-09T14:15:20.924-0400 I COMMAND [conn182] command db55.map_reduce_reduce1 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.925-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.925-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.925-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.925-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.925-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.927-0400 m31100| }, out: { reduce: "map_reduce_reduce1" } }, inputDB: "db55", shardedOutputCollection: "tmp.mrs.coll55_1436465720_61", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll55_1436465720_61", timeMillis: 526, counts: { input: 1001, emit: 1001, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465720000|221, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll55_1436465720_61", timeMillis: 169, counts: { input: 999, emit: 999, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465720000|45, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1001, emit: 1001, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 999, emit: 999, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:221 locks:{ Global: { acquireCount: { r: 96, w: 67, W: 21 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 25133, W: 16902 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 17, W: 4 }, timeAcquiringMicros: { w: 154469, W: 11690 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 303ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.927-0400 m31100| 2015-07-09T14:15:20.925-0400 I COMMAND [conn15] CMD: drop db55.tmp.mrs.coll55_1436465720_61 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.927-0400 m31101| 2015-07-09T14:15:20.925-0400 I COMMAND [repl writer worker 15] CMD: drop db55.tmp.mr.coll55_219 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.929-0400 m31100| 2015-07-09T14:15:20.928-0400 I COMMAND [conn50] command db55.tmp.mrs.coll55_1436465720_69 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.929-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.929-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.929-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.930-0400 m31100| values...., out: "tmp.mrs.coll55_1436465720_69", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:212 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 3, W: 1 }, timeAcquiringMicros: { r: 33605, w: 64809, W: 6202 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 8, w: 35, R: 13, W: 7 }, timeAcquiringMicros: { r: 36774, w: 188289, R: 64462, W: 34984 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 659ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.931-0400 m31100| 2015-07-09T14:15:20.930-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_221 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.931-0400 m31200| 2015-07-09T14:15:20.930-0400 I COMMAND [conn18] CMD: drop db55.tmp.mrs.coll55_1436465720_61 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.939-0400 m31202| 2015-07-09T14:15:20.934-0400 I COMMAND [repl writer worker 3] CMD: drop db55.tmp.mrs.coll55_1436465720_61 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.939-0400 m31201| 2015-07-09T14:15:20.937-0400 I COMMAND [repl writer worker 1] CMD: drop db55.tmp.mrs.coll55_1436465720_61 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.939-0400 m31100| 2015-07-09T14:15:20.937-0400 I COMMAND [conn45] CMD: drop db55.tmp.mrs.coll55_1436465720_62 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.940-0400 m30999| 2015-07-09T14:15:20.938-0400 I NETWORK [conn356] end connection 127.0.0.1:63810 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.940-0400 m31101| 2015-07-09T14:15:20.939-0400 I COMMAND [repl writer worker 7] CMD: drop db55.tmp.mrs.coll55_1436465720_61 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.945-0400 m31102| 2015-07-09T14:15:20.944-0400 I COMMAND [repl writer worker 0] CMD: drop db55.tmp.mrs.coll55_1436465720_61 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.946-0400 m31100| 2015-07-09T14:15:20.946-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_216 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.947-0400 m31100| 2015-07-09T14:15:20.946-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_216 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.947-0400 m31100| 2015-07-09T14:15:20.947-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_216 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.977-0400 m31100| 2015-07-09T14:15:20.976-0400 I COMMAND [conn45] command db55.tmp.mrs.coll55_1436465720_62 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.977-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.978-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.978-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.979-0400 m31100| values...., out: "tmp.mrs.coll55_1436465720_62", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:212 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { r: 3, w: 3, W: 1 }, timeAcquiringMicros: { r: 22994, w: 71991, W: 6676 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 9, w: 37, R: 13, W: 9 }, timeAcquiringMicros: { r: 51673, w: 229610, R: 29222, W: 56508 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 667ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:20.979-0400 m31100| 2015-07-09T14:15:20.979-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_222 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.067-0400 m31100| 2015-07-09T14:15:21.066-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_222 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.070-0400 m31100| 2015-07-09T14:15:21.070-0400 I COMMAND [conn45] CMD: drop db55.tmp.mr.coll55_222 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.072-0400 m31100| 2015-07-09T14:15:21.072-0400 I COMMAND [conn15] CMD: drop db55.tmp.mrs.coll55_1436465720_62 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.073-0400 m31102| 2015-07-09T14:15:21.073-0400 I COMMAND [repl writer worker 4] CMD: drop db55.tmp.mr.coll55_222 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.106-0400 m31100| 2015-07-09T14:15:21.106-0400 I COMMAND [conn185] CMD: drop db55.tmp.mrs.coll55_1436465720_70 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.107-0400 m31100| 2015-07-09T14:15:21.106-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_221 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.110-0400 m31100| 2015-07-09T14:15:21.110-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_218 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.111-0400 m31100| 2015-07-09T14:15:21.110-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_218 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.113-0400 m31101| 2015-07-09T14:15:21.113-0400 I COMMAND [repl writer worker 1] CMD: drop db55.tmp.mr.coll55_222 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.115-0400 m31200| 2015-07-09T14:15:21.115-0400 I COMMAND [conn18] CMD: drop db55.tmp.mrs.coll55_1436465720_62 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.120-0400 m31201| 2015-07-09T14:15:21.119-0400 I COMMAND [repl writer worker 14] CMD: drop db55.tmp.mrs.coll55_1436465720_62 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.122-0400 m31202| 2015-07-09T14:15:21.121-0400 I COMMAND [repl writer worker 0] CMD: drop db55.tmp.mrs.coll55_1436465720_62 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.124-0400 m30999| 2015-07-09T14:15:21.123-0400 I NETWORK [conn355] end connection 127.0.0.1:63808 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.126-0400 m31100| 2015-07-09T14:15:21.126-0400 I COMMAND [conn50] CMD: drop db55.tmp.mr.coll55_221 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.127-0400 m31100| 2015-07-09T14:15:21.126-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_218 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.128-0400 m31100| 2015-07-09T14:15:21.127-0400 I COMMAND [conn50] command db55.map_reduce_reduce0 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.128-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.128-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.128-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.129-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.129-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.132-0400 m31100| }, out: { reduce: "map_reduce_reduce0" } }, inputDB: "db55", shardedOutputCollection: "tmp.mrs.coll55_1436465720_69", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll55_1436465720_69", timeMillis: 607, counts: { input: 1001, emit: 1001, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465720000|317, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll55_1436465720_69", timeMillis: 182, counts: { input: 999, emit: 999, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465720000|69, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1001, emit: 1001, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 999, emit: 999, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:221 locks:{ Global: { acquireCount: { r: 96, w: 67, W: 21 }, acquireWaitCount: { w: 3, W: 1 }, timeAcquiringMicros: { w: 40166, W: 3770 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 8, W: 4 }, timeAcquiringMicros: { w: 41243, W: 12978 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 197ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.132-0400 m31102| 2015-07-09T14:15:21.127-0400 I COMMAND [repl writer worker 4] CMD: drop db55.tmp.mrs.coll55_1436465720_62 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.132-0400 m31100| 2015-07-09T14:15:21.128-0400 I COMMAND [conn32] CMD: drop db55.tmp.mrs.coll55_1436465720_69 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.132-0400 m31100| 2015-07-09T14:15:21.129-0400 I COMMAND [conn185] command db55.tmp.mrs.coll55_1436465720_70 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.132-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.133-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.133-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.134-0400 m31100| values...., out: "tmp.mrs.coll55_1436465720_70", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 3, W: 1 }, timeAcquiringMicros: { r: 50641, w: 51503, W: 39692 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 1, w: 14, R: 12, W: 9 }, timeAcquiringMicros: { r: 2056, w: 107086, R: 58787, W: 27984 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 519ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.134-0400 m31102| 2015-07-09T14:15:21.133-0400 I COMMAND [repl writer worker 6] CMD: drop db55.tmp.mr.coll55_221 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.134-0400 m31100| 2015-07-09T14:15:21.134-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_223 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.134-0400 m31100| 2015-07-09T14:15:21.134-0400 I COMMAND [conn176] CMD: drop db55.tmp.mrs.coll55_1436465720_71 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.135-0400 m31200| 2015-07-09T14:15:21.135-0400 I COMMAND [conn65] CMD: drop db55.tmp.mrs.coll55_1436465720_69 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.140-0400 m31202| 2015-07-09T14:15:21.140-0400 I COMMAND [repl writer worker 7] CMD: drop db55.tmp.mrs.coll55_1436465720_69 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.144-0400 m31201| 2015-07-09T14:15:21.140-0400 I COMMAND [repl writer worker 2] CMD: drop db55.tmp.mrs.coll55_1436465720_69 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.145-0400 m30998| 2015-07-09T14:15:21.145-0400 I NETWORK [conn354] end connection 127.0.0.1:63806 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.145-0400 m31100| 2015-07-09T14:15:21.145-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_220 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.148-0400 m31100| 2015-07-09T14:15:21.145-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_220 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.148-0400 m31100| 2015-07-09T14:15:21.147-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_220 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.149-0400 m31101| 2015-07-09T14:15:21.149-0400 I COMMAND [repl writer worker 2] CMD: drop db55.tmp.mrs.coll55_1436465720_62 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.150-0400 m31102| 2015-07-09T14:15:21.149-0400 I COMMAND [repl writer worker 12] CMD: drop db55.tmp.mrs.coll55_1436465720_69 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.155-0400 m31101| 2015-07-09T14:15:21.154-0400 I COMMAND [repl writer worker 8] CMD: drop db55.tmp.mr.coll55_221 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.160-0400 m31100| 2015-07-09T14:15:21.159-0400 I COMMAND [conn176] command db55.tmp.mrs.coll55_1436465720_71 command: mapReduce { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.160-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.160-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.160-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.161-0400 m31100| values...., out: "tmp.mrs.coll55_1436465720_71", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 3, w: 2, W: 1 }, timeAcquiringMicros: { r: 45864, w: 65725, W: 21 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 3, w: 5, R: 12, W: 6 }, timeAcquiringMicros: { r: 13111, w: 12452, R: 49498, W: 28014 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 381ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.163-0400 m31100| 2015-07-09T14:15:21.162-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_224 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.164-0400 m31101| 2015-07-09T14:15:21.164-0400 I COMMAND [repl writer worker 15] CMD: drop db55.tmp.mrs.coll55_1436465720_69 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.248-0400 m31100| 2015-07-09T14:15:21.248-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_223 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.250-0400 m31100| 2015-07-09T14:15:21.250-0400 I COMMAND [conn185] CMD: drop db55.tmp.mr.coll55_223 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.252-0400 m31100| 2015-07-09T14:15:21.250-0400 I COMMAND [conn185] command db55.map_reduce_reduce2 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.252-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.252-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.252-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.252-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.252-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.253-0400 m31100| }, out: { reduce: "map_reduce_reduce2" } }, inputDB: "db55", shardedOutputCollection: "tmp.mrs.coll55_1436465720_70", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll55_1436465720_70", timeMillis: 500, counts: { input: 1001, emit: 1001, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465721000|117, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll55_1436465720_70", timeMillis: 135, counts: { input: 999, emit: 999, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465720000|113, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1001, emit: 1001, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 999, emit: 999, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:221 locks:{ Global: { acquireCount: { r: 96, w: 67, W: 21 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 11377, W: 67 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 3, W: 3 }, timeAcquiringMicros: { w: 19304, W: 699 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 116ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.253-0400 m31100| 2015-07-09T14:15:21.251-0400 I COMMAND [conn32] CMD: drop db55.tmp.mrs.coll55_1436465720_70 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.255-0400 m31200| 2015-07-09T14:15:21.254-0400 I COMMAND [conn65] CMD: drop db55.tmp.mrs.coll55_1436465720_70 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.258-0400 m31101| 2015-07-09T14:15:21.258-0400 I COMMAND [repl writer worker 11] CMD: drop db55.tmp.mr.coll55_223 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.258-0400 m31202| 2015-07-09T14:15:21.258-0400 I COMMAND [repl writer worker 8] CMD: drop db55.tmp.mrs.coll55_1436465720_70 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.260-0400 m31201| 2015-07-09T14:15:21.259-0400 I COMMAND [repl writer worker 12] CMD: drop db55.tmp.mrs.coll55_1436465720_70 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.260-0400 m30998| 2015-07-09T14:15:21.259-0400 I NETWORK [conn355] end connection 127.0.0.1:63807 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.263-0400 m31101| 2015-07-09T14:15:21.263-0400 I COMMAND [repl writer worker 14] CMD: drop db55.tmp.mrs.coll55_1436465720_70 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.272-0400 m31102| 2015-07-09T14:15:21.272-0400 I COMMAND [repl writer worker 1] CMD: drop db55.tmp.mr.coll55_223 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.276-0400 m31102| 2015-07-09T14:15:21.276-0400 I COMMAND [repl writer worker 3] CMD: drop db55.tmp.mrs.coll55_1436465720_70 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.288-0400 m31100| 2015-07-09T14:15:21.287-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_224 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.289-0400 m31100| 2015-07-09T14:15:21.289-0400 I COMMAND [conn176] CMD: drop db55.tmp.mr.coll55_224 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.291-0400 m31100| 2015-07-09T14:15:21.289-0400 I COMMAND [conn176] command db55.map_reduce_reduce4 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll55", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.291-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.291-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.291-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.292-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.292-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.294-0400 m31100| }, out: { reduce: "map_reduce_reduce4" } }, inputDB: "db55", shardedOutputCollection: "tmp.mrs.coll55_1436465720_71", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll55_1436465720_71", timeMillis: 367, counts: { input: 1001, emit: 1001, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465721000|124, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll55_1436465720_71", timeMillis: 120, counts: { input: 999, emit: 999, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465720000|136, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1001, emit: 1001, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 999, emit: 999, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:221 locks:{ Global: { acquireCount: { r: 96, w: 67, W: 21 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 32964 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 2, W: 2 }, timeAcquiringMicros: { w: 2893, W: 3540 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 126ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.294-0400 m31100| 2015-07-09T14:15:21.290-0400 I COMMAND [conn32] CMD: drop db55.tmp.mrs.coll55_1436465720_71 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.294-0400 m31200| 2015-07-09T14:15:21.294-0400 I COMMAND [conn65] CMD: drop db55.tmp.mrs.coll55_1436465720_71 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.296-0400 m31101| 2015-07-09T14:15:21.296-0400 I COMMAND [repl writer worker 13] CMD: drop db55.tmp.mr.coll55_224 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.297-0400 m31202| 2015-07-09T14:15:21.297-0400 I COMMAND [repl writer worker 12] CMD: drop db55.tmp.mrs.coll55_1436465720_71 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.298-0400 m31201| 2015-07-09T14:15:21.298-0400 I COMMAND [repl writer worker 9] CMD: drop db55.tmp.mrs.coll55_1436465720_71 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.299-0400 m31102| 2015-07-09T14:15:21.299-0400 I COMMAND [repl writer worker 8] CMD: drop db55.tmp.mr.coll55_224 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.300-0400 m30998| 2015-07-09T14:15:21.299-0400 I NETWORK [conn356] end connection 127.0.0.1:63809 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.318-0400 m30999| 2015-07-09T14:15:21.318-0400 I COMMAND [conn1] DROP: db55.map_reduce_reduce0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.318-0400 m30999| 2015-07-09T14:15:21.318-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.318-0400 m31100| 2015-07-09T14:15:21.318-0400 I COMMAND [conn45] CMD: drop db55.map_reduce_reduce0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.320-0400 m31101| 2015-07-09T14:15:21.319-0400 I COMMAND [repl writer worker 14] CMD: drop db55.tmp.mrs.coll55_1436465720_71 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.320-0400 m31102| 2015-07-09T14:15:21.320-0400 I COMMAND [repl writer worker 5] CMD: drop db55.tmp.mrs.coll55_1436465720_71 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.320-0400 m30999| 2015-07-09T14:15:21.320-0400 I COMMAND [conn1] DROP: db55.map_reduce_reduce1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.321-0400 m30999| 2015-07-09T14:15:21.320-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.321-0400 m31100| 2015-07-09T14:15:21.320-0400 I COMMAND [conn45] CMD: drop db55.map_reduce_reduce1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.322-0400 m31102| 2015-07-09T14:15:21.322-0400 I COMMAND [repl writer worker 12] CMD: drop db55.map_reduce_reduce0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.322-0400 m31101| 2015-07-09T14:15:21.322-0400 I COMMAND [repl writer worker 7] CMD: drop db55.map_reduce_reduce0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.323-0400 m30999| 2015-07-09T14:15:21.322-0400 I COMMAND [conn1] DROP: db55.map_reduce_reduce2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.323-0400 m30999| 2015-07-09T14:15:21.322-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.323-0400 m31100| 2015-07-09T14:15:21.322-0400 I COMMAND [conn45] CMD: drop db55.map_reduce_reduce2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.325-0400 m30999| 2015-07-09T14:15:21.324-0400 I COMMAND [conn1] DROP: db55.map_reduce_reduce3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.325-0400 m30999| 2015-07-09T14:15:21.324-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.325-0400 m31100| 2015-07-09T14:15:21.324-0400 I COMMAND [conn45] CMD: drop db55.map_reduce_reduce3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.325-0400 m31102| 2015-07-09T14:15:21.325-0400 I COMMAND [repl writer worker 7] CMD: drop db55.map_reduce_reduce1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.326-0400 m31101| 2015-07-09T14:15:21.325-0400 I COMMAND [repl writer worker 1] CMD: drop db55.map_reduce_reduce1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.327-0400 m30999| 2015-07-09T14:15:21.326-0400 I COMMAND [conn1] DROP: db55.map_reduce_reduce4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.327-0400 m31101| 2015-07-09T14:15:21.326-0400 I COMMAND [repl writer worker 15] CMD: drop db55.map_reduce_reduce2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.327-0400 m30999| 2015-07-09T14:15:21.326-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.327-0400 m31100| 2015-07-09T14:15:21.327-0400 I COMMAND [conn45] CMD: drop db55.map_reduce_reduce4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.328-0400 m31101| 2015-07-09T14:15:21.328-0400 I COMMAND [repl writer worker 2] CMD: drop db55.map_reduce_reduce3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.328-0400 m31102| 2015-07-09T14:15:21.328-0400 I COMMAND [repl writer worker 9] CMD: drop db55.map_reduce_reduce2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.329-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.329-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.329-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.329-0400 jstests/concurrency/fsm_workloads/map_reduce_reduce.js: Workload completed in 10055 ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.329-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.329-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.329-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.330-0400 m30999| 2015-07-09T14:15:21.328-0400 I COMMAND [conn1] DROP: db55.coll55 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.330-0400 m30999| 2015-07-09T14:15:21.329-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:21.329-0400-559eba39ca4787b9985d1e3e", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465721329), what: "dropCollection.start", ns: "db55.coll55", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.330-0400 m31102| 2015-07-09T14:15:21.329-0400 I COMMAND [repl writer worker 14] CMD: drop db55.map_reduce_reduce3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.331-0400 m31101| 2015-07-09T14:15:21.330-0400 I COMMAND [repl writer worker 11] CMD: drop db55.map_reduce_reduce4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.332-0400 m31102| 2015-07-09T14:15:21.332-0400 I COMMAND [repl writer worker 0] CMD: drop db55.map_reduce_reduce4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.388-0400 m30999| 2015-07-09T14:15:21.388-0400 I SHARDING [conn1] distributed lock 'db55.coll55/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba39ca4787b9985d1e3f [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.389-0400 m31100| 2015-07-09T14:15:21.389-0400 I COMMAND [conn15] CMD: drop db55.coll55 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.392-0400 m31200| 2015-07-09T14:15:21.391-0400 I COMMAND [conn18] CMD: drop db55.coll55 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.393-0400 m31102| 2015-07-09T14:15:21.393-0400 I COMMAND [repl writer worker 15] CMD: drop db55.coll55 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.394-0400 m31101| 2015-07-09T14:15:21.393-0400 I COMMAND [repl writer worker 3] CMD: drop db55.coll55 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.398-0400 m31202| 2015-07-09T14:15:21.398-0400 I COMMAND [repl writer worker 11] CMD: drop db55.coll55 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.402-0400 m31201| 2015-07-09T14:15:21.401-0400 I COMMAND [repl writer worker 0] CMD: drop db55.coll55 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.450-0400 m31100| 2015-07-09T14:15:21.449-0400 I SHARDING [conn15] remotely refreshing metadata for db55.coll55 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559eba2eca4787b9985d1e3c, current metadata version is 2|3||559eba2eca4787b9985d1e3c [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.451-0400 m31100| 2015-07-09T14:15:21.451-0400 W SHARDING [conn15] no chunks found when reloading db55.coll55, previous version was 0|0||559eba2eca4787b9985d1e3c, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.451-0400 m31100| 2015-07-09T14:15:21.451-0400 I SHARDING [conn15] dropping metadata for db55.coll55 at shard version 2|3||559eba2eca4787b9985d1e3c, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.454-0400 m31200| 2015-07-09T14:15:21.453-0400 I SHARDING [conn18] remotely refreshing metadata for db55.coll55 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559eba2eca4787b9985d1e3c, current metadata version is 2|5||559eba2eca4787b9985d1e3c [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.455-0400 m31200| 2015-07-09T14:15:21.454-0400 W SHARDING [conn18] no chunks found when reloading db55.coll55, previous version was 0|0||559eba2eca4787b9985d1e3c, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.455-0400 m31200| 2015-07-09T14:15:21.455-0400 I SHARDING [conn18] dropping metadata for db55.coll55 at shard version 2|5||559eba2eca4787b9985d1e3c, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.456-0400 m30999| 2015-07-09T14:15:21.456-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:21.456-0400-559eba39ca4787b9985d1e40", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465721456), what: "dropCollection", ns: "db55.coll55", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.511-0400 m30999| 2015-07-09T14:15:21.511-0400 I SHARDING [conn1] distributed lock 'db55.coll55/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.567-0400 m30999| 2015-07-09T14:15:21.566-0400 I COMMAND [conn1] DROP DATABASE: db55 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.567-0400 m30999| 2015-07-09T14:15:21.567-0400 I SHARDING [conn1] DBConfig::dropDatabase: db55 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.567-0400 m30999| 2015-07-09T14:15:21.567-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:21.567-0400-559eba39ca4787b9985d1e41", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465721567), what: "dropDatabase.start", ns: "db55", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.673-0400 m30999| 2015-07-09T14:15:21.673-0400 I SHARDING [conn1] DBConfig::dropDatabase: db55 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.674-0400 m31100| 2015-07-09T14:15:21.674-0400 I COMMAND [conn160] dropDatabase db55 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.674-0400 m31100| 2015-07-09T14:15:21.674-0400 I COMMAND [conn160] dropDatabase db55 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.675-0400 m30999| 2015-07-09T14:15:21.674-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:21.674-0400-559eba39ca4787b9985d1e42", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465721674), what: "dropDatabase", ns: "db55", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.675-0400 m31102| 2015-07-09T14:15:21.675-0400 I COMMAND [repl writer worker 10] dropDatabase db55 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.675-0400 m31102| 2015-07-09T14:15:21.675-0400 I COMMAND [repl writer worker 10] dropDatabase db55 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.675-0400 m31101| 2015-07-09T14:15:21.675-0400 I COMMAND [repl writer worker 8] dropDatabase db55 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.675-0400 m31101| 2015-07-09T14:15:21.675-0400 I COMMAND [repl writer worker 8] dropDatabase db55 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.767-0400 m31100| 2015-07-09T14:15:21.767-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.772-0400 m31101| 2015-07-09T14:15:21.771-0400 I COMMAND [repl writer worker 0] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.772-0400 m31102| 2015-07-09T14:15:21.772-0400 I COMMAND [repl writer worker 4] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.813-0400 m31200| 2015-07-09T14:15:21.812-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.816-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.816-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.816-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.816-0400 jstests/concurrency/fsm_workloads/touch_index.js [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.816-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.817-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.817-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.817-0400 m31201| 2015-07-09T14:15:21.816-0400 I COMMAND [repl writer worker 4] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.817-0400 m31202| 2015-07-09T14:15:21.817-0400 I COMMAND [repl writer worker 13] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.825-0400 m30999| 2015-07-09T14:15:21.824-0400 I SHARDING [conn1] distributed lock 'db56/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba39ca4787b9985d1e43 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.829-0400 m30999| 2015-07-09T14:15:21.828-0400 I SHARDING [conn1] Placing [db56] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.829-0400 m30999| 2015-07-09T14:15:21.828-0400 I SHARDING [conn1] Enabling sharding for database [db56] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.883-0400 m30999| 2015-07-09T14:15:21.883-0400 I SHARDING [conn1] distributed lock 'db56/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.905-0400 m31100| 2015-07-09T14:15:21.904-0400 I INDEX [conn69] build index on: db56.coll56 properties: { v: 1, key: { tid: 1.0 }, name: "tid_1", ns: "db56.coll56" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.905-0400 m31100| 2015-07-09T14:15:21.904-0400 I INDEX [conn69] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.917-0400 m31100| 2015-07-09T14:15:21.917-0400 I INDEX [conn69] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.919-0400 m30999| 2015-07-09T14:15:21.919-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db56.coll56", key: { tid: 1.0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.923-0400 m30999| 2015-07-09T14:15:21.922-0400 I SHARDING [conn1] distributed lock 'db56.coll56/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba39ca4787b9985d1e44 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.924-0400 m30999| 2015-07-09T14:15:21.923-0400 I SHARDING [conn1] enable sharding on: db56.coll56 with shard key: { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.925-0400 m30999| 2015-07-09T14:15:21.924-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:21.924-0400-559eba39ca4787b9985d1e45", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465721924), what: "shardCollection.start", ns: "db56.coll56", details: { shardKey: { tid: 1.0 }, collection: "db56.coll56", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 1 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.936-0400 m31102| 2015-07-09T14:15:21.935-0400 I INDEX [repl writer worker 2] build index on: db56.coll56 properties: { v: 1, key: { tid: 1.0 }, name: "tid_1", ns: "db56.coll56" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.937-0400 m31102| 2015-07-09T14:15:21.935-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.938-0400 m31101| 2015-07-09T14:15:21.936-0400 I INDEX [repl writer worker 6] build index on: db56.coll56 properties: { v: 1, key: { tid: 1.0 }, name: "tid_1", ns: "db56.coll56" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.938-0400 m31101| 2015-07-09T14:15:21.936-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.945-0400 m31101| 2015-07-09T14:15:21.945-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.948-0400 m31102| 2015-07-09T14:15:21.947-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:21.978-0400 m30999| 2015-07-09T14:15:21.978-0400 I SHARDING [conn1] going to create 1 chunk(s) for: db56.coll56 using new epoch 559eba39ca4787b9985d1e46 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.033-0400 m30999| 2015-07-09T14:15:22.032-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db56.coll56: 0ms sequenceNumber: 250 version: 1|0||559eba39ca4787b9985d1e46 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.089-0400 m30999| 2015-07-09T14:15:22.088-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db56.coll56: 0ms sequenceNumber: 251 version: 1|0||559eba39ca4787b9985d1e46 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.090-0400 m31100| 2015-07-09T14:15:22.090-0400 I SHARDING [conn45] remotely refreshing metadata for db56.coll56 with requested shard version 1|0||559eba39ca4787b9985d1e46, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.092-0400 m31100| 2015-07-09T14:15:22.092-0400 I SHARDING [conn45] collection db56.coll56 was previously unsharded, new metadata loaded with shard version 1|0||559eba39ca4787b9985d1e46 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.092-0400 m31100| 2015-07-09T14:15:22.092-0400 I SHARDING [conn45] collection version was loaded at version 1|0||559eba39ca4787b9985d1e46, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.093-0400 m30999| 2015-07-09T14:15:22.092-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:22.092-0400-559eba3aca4787b9985d1e47", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465722092), what: "shardCollection", ns: "db56.coll56", details: { version: "1|0||559eba39ca4787b9985d1e46" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.147-0400 m30999| 2015-07-09T14:15:22.146-0400 I SHARDING [conn1] distributed lock 'db56.coll56/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.156-0400 m31100| 2015-07-09T14:15:22.156-0400 I INDEX [conn45] build index on: db56.coll56 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db56.coll56" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.157-0400 m31100| 2015-07-09T14:15:22.156-0400 I INDEX [conn45] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.163-0400 m31100| 2015-07-09T14:15:22.162-0400 I INDEX [conn45] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.168-0400 m31200| 2015-07-09T14:15:22.168-0400 I INDEX [conn32] build index on: db56.coll56 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db56.coll56" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.169-0400 m31200| 2015-07-09T14:15:22.168-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.176-0400 m31102| 2015-07-09T14:15:22.175-0400 I INDEX [repl writer worker 1] build index on: db56.coll56 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db56.coll56" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.177-0400 m31102| 2015-07-09T14:15:22.176-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.184-0400 m31101| 2015-07-09T14:15:22.184-0400 I INDEX [repl writer worker 9] build index on: db56.coll56 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db56.coll56" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.185-0400 m31101| 2015-07-09T14:15:22.184-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.186-0400 m31102| 2015-07-09T14:15:22.184-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.187-0400 m31200| 2015-07-09T14:15:22.186-0400 I INDEX [conn32] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.189-0400 Using 10 threads (requested 10) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.213-0400 m31101| 2015-07-09T14:15:22.213-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.224-0400 m31202| 2015-07-09T14:15:22.223-0400 I INDEX [repl writer worker 4] build index on: db56.coll56 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db56.coll56" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.224-0400 m31202| 2015-07-09T14:15:22.223-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.285-0400 m30999| 2015-07-09T14:15:22.282-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63817 #357 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.303-0400 m30998| 2015-07-09T14:15:22.293-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63818 #357 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.325-0400 m31201| 2015-07-09T14:15:22.325-0400 I INDEX [repl writer worker 15] build index on: db56.coll56 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db56.coll56" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.325-0400 m31201| 2015-07-09T14:15:22.325-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.326-0400 m31202| 2015-07-09T14:15:22.326-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.327-0400 m30999| 2015-07-09T14:15:22.327-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63819 #358 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.330-0400 m30999| 2015-07-09T14:15:22.330-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63820 #359 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.331-0400 m30998| 2015-07-09T14:15:22.330-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63822 #358 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.332-0400 m30998| 2015-07-09T14:15:22.332-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63823 #359 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.332-0400 m30998| 2015-07-09T14:15:22.332-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63824 #360 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.333-0400 m30998| 2015-07-09T14:15:22.333-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63826 #361 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.335-0400 m30999| 2015-07-09T14:15:22.335-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63821 #360 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.336-0400 m30999| 2015-07-09T14:15:22.336-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63825 #361 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.349-0400 m31201| 2015-07-09T14:15:22.348-0400 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.359-0400 setting random seed: 2483471077866 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.359-0400 setting random seed: 3041737857274 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.359-0400 setting random seed: 9110424658283 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.359-0400 setting random seed: 1431407779455 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.366-0400 setting random seed: 7670560502447 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.366-0400 setting random seed: 3708754787221 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.369-0400 setting random seed: 8907303069718 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.369-0400 setting random seed: 2488191020675 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.370-0400 setting random seed: 9698425140231 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.371-0400 setting random seed: 5042762462981 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.378-0400 m30998| 2015-07-09T14:15:22.378-0400 I SHARDING [conn361] ChunkManager: time to load chunks for db56.coll56: 0ms sequenceNumber: 66 version: 1|0||559eba39ca4787b9985d1e46 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.471-0400 m31100| 2015-07-09T14:15:22.468-0400 I COMMAND [conn69] command db56.$cmd command: insert { insert: "coll56", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eba39ca4787b9985d1e46') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 106, w: 106 } }, Database: { acquireCount: { w: 106 } }, Collection: { acquireCount: { w: 6 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 101ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.478-0400 m31100| 2015-07-09T14:15:22.476-0400 I COMMAND [conn68] command db56.$cmd command: insert { insert: "coll56", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eba39ca4787b9985d1e46') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 105, w: 105 } }, Database: { acquireCount: { w: 105 } }, Collection: { acquireCount: { w: 5 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 106ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.478-0400 m31100| 2015-07-09T14:15:22.477-0400 I SHARDING [conn187] request split points lookup for chunk db56.coll56 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.479-0400 m31100| 2015-07-09T14:15:22.478-0400 I SHARDING [conn32] request split points lookup for chunk db56.coll56 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.480-0400 m31100| 2015-07-09T14:15:22.479-0400 W SHARDING [conn187] possible low cardinality key detected in db56.coll56 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.480-0400 m31100| 2015-07-09T14:15:22.479-0400 W SHARDING [conn187] possible low cardinality key detected in db56.coll56 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.480-0400 m31100| 2015-07-09T14:15:22.479-0400 W SHARDING [conn187] possible low cardinality key detected in db56.coll56 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.480-0400 m31100| 2015-07-09T14:15:22.479-0400 W SHARDING [conn187] possible low cardinality key detected in db56.coll56 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.480-0400 m31100| 2015-07-09T14:15:22.479-0400 W SHARDING [conn187] possible low cardinality key detected in db56.coll56 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.480-0400 m31100| 2015-07-09T14:15:22.479-0400 W SHARDING [conn187] possible low cardinality key detected in db56.coll56 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.480-0400 m31100| 2015-07-09T14:15:22.479-0400 W SHARDING [conn187] possible low cardinality key detected in db56.coll56 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.481-0400 m31100| 2015-07-09T14:15:22.479-0400 W SHARDING [conn187] possible low cardinality key detected in db56.coll56 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.481-0400 m31100| 2015-07-09T14:15:22.479-0400 W SHARDING [conn187] possible low cardinality key detected in db56.coll56 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.481-0400 m31100| 2015-07-09T14:15:22.479-0400 W SHARDING [conn187] possible low cardinality key detected in db56.coll56 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.481-0400 m31100| 2015-07-09T14:15:22.480-0400 W SHARDING [conn32] possible low cardinality key detected in db56.coll56 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.481-0400 m31100| 2015-07-09T14:15:22.481-0400 W SHARDING [conn32] possible low cardinality key detected in db56.coll56 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.482-0400 m31100| 2015-07-09T14:15:22.481-0400 W SHARDING [conn32] possible low cardinality key detected in db56.coll56 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.482-0400 m31100| 2015-07-09T14:15:22.481-0400 W SHARDING [conn32] possible low cardinality key detected in db56.coll56 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.482-0400 m31100| 2015-07-09T14:15:22.481-0400 W SHARDING [conn32] possible low cardinality key detected in db56.coll56 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.482-0400 m31100| 2015-07-09T14:15:22.481-0400 W SHARDING [conn32] possible low cardinality key detected in db56.coll56 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.483-0400 m31100| 2015-07-09T14:15:22.481-0400 W SHARDING [conn32] possible low cardinality key detected in db56.coll56 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.483-0400 m31100| 2015-07-09T14:15:22.481-0400 W SHARDING [conn32] possible low cardinality key detected in db56.coll56 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.483-0400 m31100| 2015-07-09T14:15:22.481-0400 W SHARDING [conn32] possible low cardinality key detected in db56.coll56 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.483-0400 m31100| 2015-07-09T14:15:22.481-0400 W SHARDING [conn32] possible low cardinality key detected in db56.coll56 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.483-0400 m31100| 2015-07-09T14:15:22.481-0400 I SHARDING [conn187] received splitChunk request: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.484-0400 m31100| 2015-07-09T14:15:22.482-0400 I SHARDING [conn32] received splitChunk request: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.484-0400 m31100| 2015-07-09T14:15:22.482-0400 I COMMAND [conn25] command db56.$cmd command: insert { insert: "coll56", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eba39ca4787b9985d1e46') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 105, w: 105 } }, Database: { acquireCount: { w: 105 } }, Collection: { acquireCount: { w: 5 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 101ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.484-0400 m31100| 2015-07-09T14:15:22.483-0400 I SHARDING [conn39] request split points lookup for chunk db56.coll56 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.485-0400 m31100| 2015-07-09T14:15:22.484-0400 I SHARDING [conn39] received splitChunk request: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.486-0400 m31100| 2015-07-09T14:15:22.485-0400 I COMMAND [conn29] command db56.$cmd command: insert { insert: "coll56", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eba39ca4787b9985d1e46') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 106, w: 106 } }, Database: { acquireCount: { w: 106 } }, Collection: { acquireCount: { w: 6 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 109ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.486-0400 m31100| 2015-07-09T14:15:22.486-0400 I SHARDING [conn15] request split points lookup for chunk db56.coll56 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.486-0400 m31100| 2015-07-09T14:15:22.486-0400 I SHARDING [conn187] could not acquire lock 'db56.coll56/bs-osx108-8:31100:1436464536:197041335' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.486-0400 m31100| 2015-07-09T14:15:22.486-0400 I SHARDING [conn187] distributed lock 'db56.coll56/bs-osx108-8:31100:1436464536:197041335' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.487-0400 m31100| 2015-07-09T14:15:22.486-0400 W SHARDING [conn187] could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db56.coll56 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.488-0400 m31100| 2015-07-09T14:15:22.487-0400 I COMMAND [conn30] command db56.$cmd command: insert { insert: "coll56", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eba39ca4787b9985d1e46') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 106, w: 106 } }, Database: { acquireCount: { w: 106 } }, Collection: { acquireCount: { w: 6 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 118ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.489-0400 m31100| 2015-07-09T14:15:22.487-0400 I SHARDING [conn39] could not acquire lock 'db56.coll56/bs-osx108-8:31100:1436464536:197041335' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.489-0400 m31100| 2015-07-09T14:15:22.487-0400 I SHARDING [conn39] distributed lock 'db56.coll56/bs-osx108-8:31100:1436464536:197041335' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.489-0400 m31100| 2015-07-09T14:15:22.487-0400 W SHARDING [conn39] could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db56.coll56 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.490-0400 m30999| 2015-07-09T14:15:22.488-0400 W SHARDING [conn357] splitChunk failed - cmd: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.490-0400 m31100| 2015-07-09T14:15:22.488-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.490-0400 m30998| 2015-07-09T14:15:22.488-0400 W SHARDING [conn358] splitChunk failed - cmd: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.491-0400 m31100| 2015-07-09T14:15:22.488-0400 I SHARDING [conn37] request split points lookup for chunk db56.coll56 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.491-0400 m31100| 2015-07-09T14:15:22.489-0400 W SHARDING [conn15] could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db56.coll56 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.491-0400 m31100| 2015-07-09T14:15:22.490-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.492-0400 m30999| 2015-07-09T14:15:22.491-0400 W SHARDING [conn360] splitChunk failed - cmd: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.492-0400 m31100| 2015-07-09T14:15:22.491-0400 W SHARDING [conn37] could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db56.coll56 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.492-0400 m30999| 2015-07-09T14:15:22.491-0400 W SHARDING [conn359] splitChunk failed - cmd: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.498-0400 m31100| 2015-07-09T14:15:22.496-0400 I SHARDING [conn32] distributed lock 'db56.coll56/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eba3a792e00bb67274a36 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.498-0400 m31100| 2015-07-09T14:15:22.497-0400 I SHARDING [conn32] remotely refreshing metadata for db56.coll56 based on current shard version 1|0||559eba39ca4787b9985d1e46, current metadata version is 1|0||559eba39ca4787b9985d1e46 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.509-0400 m31100| 2015-07-09T14:15:22.508-0400 I SHARDING [conn32] metadata of collection db56.coll56 already up to date (shard version : 1|0||559eba39ca4787b9985d1e46, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.509-0400 m31100| 2015-07-09T14:15:22.508-0400 I SHARDING [conn32] splitChunk accepted at version 1|0||559eba39ca4787b9985d1e46 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.513-0400 m31100| 2015-07-09T14:15:22.513-0400 I COMMAND [conn146] command db56.$cmd command: insert { insert: "coll56", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eba39ca4787b9985d1e46') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 106, w: 106 } }, Database: { acquireCount: { w: 106 } }, Collection: { acquireCount: { w: 6 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 4682 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 126ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.513-0400 m31100| 2015-07-09T14:15:22.513-0400 I SHARDING [conn39] request split points lookup for chunk db56.coll56 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.514-0400 m31100| 2015-07-09T14:15:22.513-0400 I COMMAND [conn27] command db56.$cmd command: insert { insert: "coll56", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eba39ca4787b9985d1e46') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 107, w: 107 } }, Database: { acquireCount: { w: 107 } }, Collection: { acquireCount: { w: 7 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 129ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.514-0400 m31100| 2015-07-09T14:15:22.514-0400 I SHARDING [conn35] request split points lookup for chunk db56.coll56 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.515-0400 m31100| 2015-07-09T14:15:22.514-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.515-0400 m31100| 2015-07-09T14:15:22.514-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.515-0400 m31100| 2015-07-09T14:15:22.514-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.515-0400 m31100| 2015-07-09T14:15:22.515-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.515-0400 m31100| 2015-07-09T14:15:22.515-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.516-0400 m31100| 2015-07-09T14:15:22.515-0400 W SHARDING [conn39] possible low cardinality key detected in db56.coll56 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.516-0400 m31100| 2015-07-09T14:15:22.515-0400 W SHARDING [conn39] possible low cardinality key detected in db56.coll56 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.518-0400 m31100| 2015-07-09T14:15:22.516-0400 W SHARDING [conn39] possible low cardinality key detected in db56.coll56 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.519-0400 m31100| 2015-07-09T14:15:22.516-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.519-0400 m31100| 2015-07-09T14:15:22.518-0400 W SHARDING [conn39] possible low cardinality key detected in db56.coll56 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.519-0400 m31100| 2015-07-09T14:15:22.518-0400 W SHARDING [conn39] possible low cardinality key detected in db56.coll56 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.520-0400 m31100| 2015-07-09T14:15:22.518-0400 W SHARDING [conn39] possible low cardinality key detected in db56.coll56 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.520-0400 m31100| 2015-07-09T14:15:22.518-0400 W SHARDING [conn39] possible low cardinality key detected in db56.coll56 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.520-0400 m31100| 2015-07-09T14:15:22.518-0400 W SHARDING [conn39] possible low cardinality key detected in db56.coll56 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.520-0400 m31100| 2015-07-09T14:15:22.518-0400 W SHARDING [conn39] possible low cardinality key detected in db56.coll56 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.520-0400 m31100| 2015-07-09T14:15:22.518-0400 W SHARDING [conn39] possible low cardinality key detected in db56.coll56 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.521-0400 m31100| 2015-07-09T14:15:22.519-0400 I SHARDING [conn39] received splitChunk request: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.528-0400 m31100| 2015-07-09T14:15:22.527-0400 W SHARDING [conn39] could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db56.coll56 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.528-0400 m31100| 2015-07-09T14:15:22.527-0400 W SHARDING [conn35] could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db56.coll56 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.529-0400 m30998| 2015-07-09T14:15:22.528-0400 W SHARDING [conn360] splitChunk failed - cmd: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.530-0400 m30998| 2015-07-09T14:15:22.528-0400 W SHARDING [conn359] splitChunk failed - cmd: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.540-0400 m31100| 2015-07-09T14:15:22.539-0400 I SHARDING [conn32] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:22.539-0400-559eba3a792e00bb67274a38", server: "bs-osx108-8", clientAddr: "127.0.0.1:62634", time: new Date(1436465722539), what: "multi-split", ns: "db56.coll56", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 1, of: 10, chunk: { min: { tid: MinKey }, max: { tid: 0.0 }, lastmod: Timestamp 1000|1, lastmodEpoch: ObjectId('559eba39ca4787b9985d1e46') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.585-0400 m31100| 2015-07-09T14:15:22.585-0400 I SHARDING [conn37] request split points lookup for chunk db56.coll56 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.585-0400 m31100| 2015-07-09T14:15:22.585-0400 I SHARDING [conn15] request split points lookup for chunk db56.coll56 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.588-0400 m31100| 2015-07-09T14:15:22.587-0400 W SHARDING [conn15] possible low cardinality key detected in db56.coll56 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.588-0400 m31100| 2015-07-09T14:15:22.587-0400 W SHARDING [conn15] possible low cardinality key detected in db56.coll56 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.588-0400 m31100| 2015-07-09T14:15:22.587-0400 W SHARDING [conn15] possible low cardinality key detected in db56.coll56 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.589-0400 m31100| 2015-07-09T14:15:22.587-0400 W SHARDING [conn15] possible low cardinality key detected in db56.coll56 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.589-0400 m31100| 2015-07-09T14:15:22.587-0400 W SHARDING [conn15] possible low cardinality key detected in db56.coll56 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.589-0400 m31100| 2015-07-09T14:15:22.587-0400 W SHARDING [conn15] possible low cardinality key detected in db56.coll56 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.589-0400 m31100| 2015-07-09T14:15:22.587-0400 W SHARDING [conn15] possible low cardinality key detected in db56.coll56 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.590-0400 m31100| 2015-07-09T14:15:22.587-0400 W SHARDING [conn15] possible low cardinality key detected in db56.coll56 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.590-0400 m31100| 2015-07-09T14:15:22.587-0400 W SHARDING [conn15] possible low cardinality key detected in db56.coll56 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.590-0400 m31100| 2015-07-09T14:15:22.588-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.590-0400 m31100| 2015-07-09T14:15:22.588-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.590-0400 m31100| 2015-07-09T14:15:22.588-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.590-0400 m31100| 2015-07-09T14:15:22.588-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.591-0400 m31100| 2015-07-09T14:15:22.588-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.591-0400 m31100| 2015-07-09T14:15:22.588-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.591-0400 m31100| 2015-07-09T14:15:22.588-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.591-0400 m31100| 2015-07-09T14:15:22.588-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.591-0400 m31100| 2015-07-09T14:15:22.588-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.592-0400 m31100| 2015-07-09T14:15:22.588-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.592-0400 m31100| 2015-07-09T14:15:22.588-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.592-0400 m31100| 2015-07-09T14:15:22.589-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.592-0400 m31100| 2015-07-09T14:15:22.589-0400 W SHARDING [conn15] could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db56.coll56 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.593-0400 m30999| 2015-07-09T14:15:22.589-0400 W SHARDING [conn361] splitChunk failed - cmd: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.593-0400 m31100| 2015-07-09T14:15:22.590-0400 W SHARDING [conn37] could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db56.coll56 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.593-0400 m30999| 2015-07-09T14:15:22.590-0400 W SHARDING [conn360] splitChunk failed - cmd: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.598-0400 m31100| 2015-07-09T14:15:22.598-0400 I SHARDING [conn32] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:22.598-0400-559eba3a792e00bb67274a39", server: "bs-osx108-8", clientAddr: "127.0.0.1:62634", time: new Date(1436465722598), what: "multi-split", ns: "db56.coll56", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 2, of: 10, chunk: { min: { tid: 0.0 }, max: { tid: 2.0 }, lastmod: Timestamp 1000|2, lastmodEpoch: ObjectId('559eba39ca4787b9985d1e46') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.603-0400 m31100| 2015-07-09T14:15:22.602-0400 I COMMAND [conn147] command db56.$cmd command: insert { insert: "coll56", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eba39ca4787b9985d1e46') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 107, w: 107 } }, Database: { acquireCount: { w: 107 } }, Collection: { acquireCount: { w: 7 }, acquireWaitCount: { w: 2 }, timeAcquiringMicros: { w: 9353 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 127ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.603-0400 m31100| 2015-07-09T14:15:22.602-0400 I SHARDING [conn35] request split points lookup for chunk db56.coll56 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.605-0400 m31100| 2015-07-09T14:15:22.604-0400 I COMMAND [conn30] command db56.$cmd command: insert { insert: "coll56", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eba39ca4787b9985d1e46') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 106, w: 106 } }, Database: { acquireCount: { w: 106 } }, Collection: { acquireCount: { w: 6 }, acquireWaitCount: { w: 2 }, timeAcquiringMicros: { w: 10269 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 105ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.609-0400 m31100| 2015-07-09T14:15:22.605-0400 I SHARDING [conn37] request split points lookup for chunk db56.coll56 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.610-0400 m31100| 2015-07-09T14:15:22.606-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.610-0400 m31100| 2015-07-09T14:15:22.606-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.610-0400 m31100| 2015-07-09T14:15:22.606-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.610-0400 m31100| 2015-07-09T14:15:22.606-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.610-0400 m31100| 2015-07-09T14:15:22.606-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.611-0400 m31100| 2015-07-09T14:15:22.606-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.611-0400 m31100| 2015-07-09T14:15:22.606-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.611-0400 m31100| 2015-07-09T14:15:22.606-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.611-0400 m31100| 2015-07-09T14:15:22.606-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.611-0400 m31100| 2015-07-09T14:15:22.606-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.611-0400 m31100| 2015-07-09T14:15:22.606-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.612-0400 m31100| 2015-07-09T14:15:22.607-0400 W SHARDING [conn35] could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db56.coll56 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.612-0400 m30998| 2015-07-09T14:15:22.608-0400 W SHARDING [conn361] splitChunk failed - cmd: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.612-0400 m31100| 2015-07-09T14:15:22.608-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.612-0400 m31100| 2015-07-09T14:15:22.608-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.612-0400 m31100| 2015-07-09T14:15:22.608-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.615-0400 m31100| 2015-07-09T14:15:22.608-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.616-0400 m31100| 2015-07-09T14:15:22.608-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.616-0400 m31100| 2015-07-09T14:15:22.608-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.616-0400 m31100| 2015-07-09T14:15:22.608-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.616-0400 m31100| 2015-07-09T14:15:22.608-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.616-0400 m31100| 2015-07-09T14:15:22.608-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.616-0400 m31100| 2015-07-09T14:15:22.608-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.617-0400 m31100| 2015-07-09T14:15:22.609-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.617-0400 m31100| 2015-07-09T14:15:22.610-0400 W SHARDING [conn37] could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db56.coll56 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.617-0400 m30999| 2015-07-09T14:15:22.610-0400 W SHARDING [conn359] splitChunk failed - cmd: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.618-0400 m31100| 2015-07-09T14:15:22.614-0400 I COMMAND [conn69] command db56.$cmd command: insert { insert: "coll56", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eba39ca4787b9985d1e46') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 106, w: 106 } }, Database: { acquireCount: { w: 106 } }, Collection: { acquireCount: { w: 6 }, acquireWaitCount: { w: 2 }, timeAcquiringMicros: { w: 5151 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 109ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.619-0400 m31100| 2015-07-09T14:15:22.615-0400 I SHARDING [conn37] request split points lookup for chunk db56.coll56 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.619-0400 m31100| 2015-07-09T14:15:22.618-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.620-0400 m31100| 2015-07-09T14:15:22.618-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.620-0400 m31100| 2015-07-09T14:15:22.618-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.620-0400 m31100| 2015-07-09T14:15:22.618-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.620-0400 m31100| 2015-07-09T14:15:22.618-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.620-0400 m31100| 2015-07-09T14:15:22.618-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.621-0400 m31100| 2015-07-09T14:15:22.618-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.621-0400 m31100| 2015-07-09T14:15:22.618-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.621-0400 m31100| 2015-07-09T14:15:22.618-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.621-0400 m31100| 2015-07-09T14:15:22.618-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.622-0400 m31100| 2015-07-09T14:15:22.619-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.622-0400 m31100| 2015-07-09T14:15:22.621-0400 W SHARDING [conn37] could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db56.coll56 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.622-0400 m30999| 2015-07-09T14:15:22.621-0400 W SHARDING [conn358] splitChunk failed - cmd: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.630-0400 m31100| 2015-07-09T14:15:22.626-0400 I COMMAND [conn22] command db56.$cmd command: insert { insert: "coll56", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eba39ca4787b9985d1e46') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 105, w: 105 } }, Database: { acquireCount: { w: 105 } }, Collection: { acquireCount: { w: 5 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 106ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.630-0400 m31100| 2015-07-09T14:15:22.626-0400 I SHARDING [conn37] request split points lookup for chunk db56.coll56 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.630-0400 m31100| 2015-07-09T14:15:22.629-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.631-0400 m31100| 2015-07-09T14:15:22.629-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.631-0400 m31100| 2015-07-09T14:15:22.630-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.631-0400 m31100| 2015-07-09T14:15:22.630-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.631-0400 m31100| 2015-07-09T14:15:22.630-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.632-0400 m31100| 2015-07-09T14:15:22.630-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.632-0400 m31100| 2015-07-09T14:15:22.630-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.632-0400 m31100| 2015-07-09T14:15:22.630-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.632-0400 m31100| 2015-07-09T14:15:22.630-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.632-0400 m31100| 2015-07-09T14:15:22.630-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.632-0400 m31100| 2015-07-09T14:15:22.630-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.633-0400 m31100| 2015-07-09T14:15:22.631-0400 W SHARDING [conn37] could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db56.coll56 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.633-0400 m30999| 2015-07-09T14:15:22.631-0400 W SHARDING [conn357] splitChunk failed - cmd: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.638-0400 m31100| 2015-07-09T14:15:22.638-0400 I SHARDING [conn35] request split points lookup for chunk db56.coll56 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.641-0400 m31100| 2015-07-09T14:15:22.641-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.641-0400 m31100| 2015-07-09T14:15:22.641-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.642-0400 m31100| 2015-07-09T14:15:22.641-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.642-0400 m31100| 2015-07-09T14:15:22.641-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.642-0400 m31100| 2015-07-09T14:15:22.641-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.642-0400 m31100| 2015-07-09T14:15:22.641-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.642-0400 m31100| 2015-07-09T14:15:22.641-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.642-0400 m31100| 2015-07-09T14:15:22.641-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.642-0400 m31100| 2015-07-09T14:15:22.641-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.643-0400 m31100| 2015-07-09T14:15:22.641-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.643-0400 m31100| 2015-07-09T14:15:22.642-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.643-0400 m31100| 2015-07-09T14:15:22.643-0400 W SHARDING [conn35] could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db56.coll56 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.644-0400 m30998| 2015-07-09T14:15:22.643-0400 W SHARDING [conn360] splitChunk failed - cmd: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.644-0400 m31100| 2015-07-09T14:15:22.643-0400 I COMMAND [conn27] command db56.$cmd command: insert { insert: "coll56", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eba39ca4787b9985d1e46') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 106, w: 106 } }, Database: { acquireCount: { w: 106 } }, Collection: { acquireCount: { w: 6 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 459 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 119ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.644-0400 m31100| 2015-07-09T14:15:22.644-0400 I SHARDING [conn35] request split points lookup for chunk db56.coll56 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.648-0400 m31100| 2015-07-09T14:15:22.647-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.648-0400 m31100| 2015-07-09T14:15:22.647-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.648-0400 m31100| 2015-07-09T14:15:22.647-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.648-0400 m31100| 2015-07-09T14:15:22.647-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.648-0400 m31100| 2015-07-09T14:15:22.647-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.648-0400 m31100| 2015-07-09T14:15:22.647-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.649-0400 m31100| 2015-07-09T14:15:22.647-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.649-0400 m31100| 2015-07-09T14:15:22.648-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.649-0400 m31100| 2015-07-09T14:15:22.648-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.649-0400 m31100| 2015-07-09T14:15:22.648-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.649-0400 m31100| 2015-07-09T14:15:22.648-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.650-0400 m31100| 2015-07-09T14:15:22.650-0400 W SHARDING [conn35] could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db56.coll56 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.650-0400 m31100| 2015-07-09T14:15:22.650-0400 I SHARDING [conn32] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:22.650-0400-559eba3a792e00bb67274a3a", server: "bs-osx108-8", clientAddr: "127.0.0.1:62634", time: new Date(1436465722650), what: "multi-split", ns: "db56.coll56", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 3, of: 10, chunk: { min: { tid: 2.0 }, max: { tid: 3.0 }, lastmod: Timestamp 1000|3, lastmodEpoch: ObjectId('559eba39ca4787b9985d1e46') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.651-0400 m30998| 2015-07-09T14:15:22.650-0400 W SHARDING [conn358] splitChunk failed - cmd: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.654-0400 m31100| 2015-07-09T14:15:22.653-0400 I COMMAND [conn25] command db56.$cmd command: insert { insert: "coll56", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eba39ca4787b9985d1e46') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 106, w: 106 } }, Database: { acquireCount: { w: 106 } }, Collection: { acquireCount: { w: 6 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 108ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.654-0400 m31100| 2015-07-09T14:15:22.654-0400 I SHARDING [conn35] request split points lookup for chunk db56.coll56 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.657-0400 m31100| 2015-07-09T14:15:22.657-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.657-0400 m31100| 2015-07-09T14:15:22.657-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.658-0400 m31100| 2015-07-09T14:15:22.657-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.658-0400 m31100| 2015-07-09T14:15:22.657-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.658-0400 m31100| 2015-07-09T14:15:22.657-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.658-0400 m31100| 2015-07-09T14:15:22.657-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.659-0400 m31100| 2015-07-09T14:15:22.657-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.659-0400 m31100| 2015-07-09T14:15:22.657-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.659-0400 m31100| 2015-07-09T14:15:22.657-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.659-0400 m31100| 2015-07-09T14:15:22.657-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.659-0400 m31100| 2015-07-09T14:15:22.658-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.659-0400 m31100| 2015-07-09T14:15:22.659-0400 W SHARDING [conn35] could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db56.coll56 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.660-0400 m30998| 2015-07-09T14:15:22.659-0400 W SHARDING [conn359] splitChunk failed - cmd: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.694-0400 m31100| 2015-07-09T14:15:22.694-0400 I SHARDING [conn37] request split points lookup for chunk db56.coll56 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.696-0400 m31100| 2015-07-09T14:15:22.696-0400 I SHARDING [conn15] request split points lookup for chunk db56.coll56 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.700-0400 m31100| 2015-07-09T14:15:22.700-0400 W SHARDING [conn15] possible low cardinality key detected in db56.coll56 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.701-0400 m31100| 2015-07-09T14:15:22.700-0400 W SHARDING [conn15] possible low cardinality key detected in db56.coll56 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.701-0400 m31100| 2015-07-09T14:15:22.700-0400 W SHARDING [conn15] possible low cardinality key detected in db56.coll56 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.701-0400 m31100| 2015-07-09T14:15:22.700-0400 W SHARDING [conn15] possible low cardinality key detected in db56.coll56 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.701-0400 m31100| 2015-07-09T14:15:22.700-0400 W SHARDING [conn15] possible low cardinality key detected in db56.coll56 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.702-0400 m31100| 2015-07-09T14:15:22.700-0400 W SHARDING [conn15] possible low cardinality key detected in db56.coll56 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.702-0400 m31100| 2015-07-09T14:15:22.700-0400 W SHARDING [conn15] possible low cardinality key detected in db56.coll56 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.702-0400 m31100| 2015-07-09T14:15:22.700-0400 W SHARDING [conn15] possible low cardinality key detected in db56.coll56 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.702-0400 m31100| 2015-07-09T14:15:22.700-0400 W SHARDING [conn15] possible low cardinality key detected in db56.coll56 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.704-0400 m31100| 2015-07-09T14:15:22.702-0400 I SHARDING [conn32] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:22.702-0400-559eba3a792e00bb67274a3b", server: "bs-osx108-8", clientAddr: "127.0.0.1:62634", time: new Date(1436465722702), what: "multi-split", ns: "db56.coll56", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 4, of: 10, chunk: { min: { tid: 3.0 }, max: { tid: 4.0 }, lastmod: Timestamp 1000|4, lastmodEpoch: ObjectId('559eba39ca4787b9985d1e46') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.704-0400 m31100| 2015-07-09T14:15:22.703-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.704-0400 m31100| 2015-07-09T14:15:22.703-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.704-0400 m31100| 2015-07-09T14:15:22.703-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.704-0400 m31100| 2015-07-09T14:15:22.703-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.704-0400 m31100| 2015-07-09T14:15:22.703-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.705-0400 m31100| 2015-07-09T14:15:22.703-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.705-0400 m31100| 2015-07-09T14:15:22.703-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.705-0400 m31100| 2015-07-09T14:15:22.703-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.705-0400 m31100| 2015-07-09T14:15:22.703-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.705-0400 m31100| 2015-07-09T14:15:22.703-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.706-0400 m31100| 2015-07-09T14:15:22.703-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.706-0400 m31100| 2015-07-09T14:15:22.705-0400 W SHARDING [conn15] could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db56.coll56 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.706-0400 m30999| 2015-07-09T14:15:22.705-0400 W SHARDING [conn361] splitChunk failed - cmd: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.707-0400 m31100| 2015-07-09T14:15:22.706-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.708-0400 m31100| 2015-07-09T14:15:22.707-0400 W SHARDING [conn37] could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db56.coll56 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.708-0400 m30999| 2015-07-09T14:15:22.707-0400 W SHARDING [conn360] splitChunk failed - cmd: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.710-0400 m30999| 2015-07-09T14:15:22.709-0400 I NETWORK [conn361] end connection 127.0.0.1:63825 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.737-0400 m31100| 2015-07-09T14:15:22.736-0400 I SHARDING [conn37] request split points lookup for chunk db56.coll56 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.738-0400 m31100| 2015-07-09T14:15:22.737-0400 I COMMAND [conn22] command db56.$cmd command: insert { insert: "coll56", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eba39ca4787b9985d1e46') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 106, w: 106 } }, Database: { acquireCount: { w: 106 } }, Collection: { acquireCount: { w: 6 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 110ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.739-0400 m31100| 2015-07-09T14:15:22.738-0400 I COMMAND [conn69] command db56.$cmd command: insert { insert: "coll56", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eba39ca4787b9985d1e46') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 105, w: 105 } }, Database: { acquireCount: { w: 105 } }, Collection: { acquireCount: { w: 5 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 101ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.739-0400 m31100| 2015-07-09T14:15:22.738-0400 I SHARDING [conn15] request split points lookup for chunk db56.coll56 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.739-0400 m31100| 2015-07-09T14:15:22.738-0400 I SHARDING [conn187] request split points lookup for chunk db56.coll56 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.740-0400 m31100| 2015-07-09T14:15:22.739-0400 I COMMAND [conn147] command db56.$cmd command: insert { insert: "coll56", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eba39ca4787b9985d1e46') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 106, w: 106 } }, Database: { acquireCount: { w: 106 } }, Collection: { acquireCount: { w: 6 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 115ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.743-0400 m31100| 2015-07-09T14:15:22.742-0400 W SHARDING [conn187] possible low cardinality key detected in db56.coll56 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.743-0400 m31100| 2015-07-09T14:15:22.742-0400 W SHARDING [conn187] possible low cardinality key detected in db56.coll56 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.744-0400 m31100| 2015-07-09T14:15:22.742-0400 W SHARDING [conn187] possible low cardinality key detected in db56.coll56 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.744-0400 m31100| 2015-07-09T14:15:22.742-0400 W SHARDING [conn187] possible low cardinality key detected in db56.coll56 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.744-0400 m31100| 2015-07-09T14:15:22.742-0400 W SHARDING [conn187] possible low cardinality key detected in db56.coll56 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.744-0400 m31100| 2015-07-09T14:15:22.742-0400 W SHARDING [conn187] possible low cardinality key detected in db56.coll56 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.744-0400 m31100| 2015-07-09T14:15:22.742-0400 W SHARDING [conn187] possible low cardinality key detected in db56.coll56 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.744-0400 m31100| 2015-07-09T14:15:22.742-0400 W SHARDING [conn187] possible low cardinality key detected in db56.coll56 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.745-0400 m31100| 2015-07-09T14:15:22.742-0400 W SHARDING [conn187] possible low cardinality key detected in db56.coll56 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.745-0400 m31100| 2015-07-09T14:15:22.742-0400 I SHARDING [conn187] received splitChunk request: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.745-0400 m31100| 2015-07-09T14:15:22.742-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.745-0400 m31100| 2015-07-09T14:15:22.742-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.746-0400 m31100| 2015-07-09T14:15:22.742-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.748-0400 m31100| 2015-07-09T14:15:22.742-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.749-0400 m31100| 2015-07-09T14:15:22.743-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.749-0400 m31100| 2015-07-09T14:15:22.743-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.749-0400 m31100| 2015-07-09T14:15:22.743-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.749-0400 m31100| 2015-07-09T14:15:22.743-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.750-0400 m31100| 2015-07-09T14:15:22.743-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.750-0400 m31100| 2015-07-09T14:15:22.743-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.750-0400 m31100| 2015-07-09T14:15:22.743-0400 I SHARDING [conn35] request split points lookup for chunk db56.coll56 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.750-0400 m31100| 2015-07-09T14:15:22.743-0400 W SHARDING [conn187] could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db56.coll56 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.751-0400 m30999| 2015-07-09T14:15:22.744-0400 W SHARDING [conn358] splitChunk failed - cmd: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.751-0400 m31100| 2015-07-09T14:15:22.745-0400 W SHARDING [conn15] possible low cardinality key detected in db56.coll56 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.752-0400 m31100| 2015-07-09T14:15:22.745-0400 W SHARDING [conn15] possible low cardinality key detected in db56.coll56 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.752-0400 m31100| 2015-07-09T14:15:22.745-0400 W SHARDING [conn15] possible low cardinality key detected in db56.coll56 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.752-0400 m31100| 2015-07-09T14:15:22.745-0400 W SHARDING [conn15] possible low cardinality key detected in db56.coll56 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.752-0400 m31100| 2015-07-09T14:15:22.745-0400 W SHARDING [conn15] possible low cardinality key detected in db56.coll56 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.753-0400 m31100| 2015-07-09T14:15:22.745-0400 W SHARDING [conn15] possible low cardinality key detected in db56.coll56 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.753-0400 m31100| 2015-07-09T14:15:22.745-0400 W SHARDING [conn15] possible low cardinality key detected in db56.coll56 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.753-0400 m31100| 2015-07-09T14:15:22.745-0400 W SHARDING [conn15] possible low cardinality key detected in db56.coll56 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.753-0400 m31100| 2015-07-09T14:15:22.745-0400 W SHARDING [conn15] possible low cardinality key detected in db56.coll56 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.753-0400 m31100| 2015-07-09T14:15:22.746-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.754-0400 m31100| 2015-07-09T14:15:22.747-0400 W SHARDING [conn15] could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db56.coll56 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.754-0400 m31100| 2015-07-09T14:15:22.748-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.754-0400 m31100| 2015-07-09T14:15:22.748-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.754-0400 m31100| 2015-07-09T14:15:22.748-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.754-0400 m31100| 2015-07-09T14:15:22.748-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.754-0400 m31100| 2015-07-09T14:15:22.748-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.755-0400 m31100| 2015-07-09T14:15:22.748-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.755-0400 m31100| 2015-07-09T14:15:22.748-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.755-0400 m31100| 2015-07-09T14:15:22.748-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.755-0400 m31100| 2015-07-09T14:15:22.748-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.755-0400 m31100| 2015-07-09T14:15:22.748-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.755-0400 m30999| 2015-07-09T14:15:22.748-0400 W SHARDING [conn359] splitChunk failed - cmd: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.756-0400 m31100| 2015-07-09T14:15:22.748-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.756-0400 m31100| 2015-07-09T14:15:22.748-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.756-0400 m31100| 2015-07-09T14:15:22.749-0400 W SHARDING [conn35] could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db56.coll56 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.757-0400 m30998| 2015-07-09T14:15:22.751-0400 W SHARDING [conn361] splitChunk failed - cmd: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.757-0400 m31100| 2015-07-09T14:15:22.752-0400 W SHARDING [conn37] could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db56.coll56 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.757-0400 m30999| 2015-07-09T14:15:22.752-0400 W SHARDING [conn357] splitChunk failed - cmd: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.758-0400 m31100| 2015-07-09T14:15:22.754-0400 I SHARDING [conn32] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:22.754-0400-559eba3a792e00bb67274a3c", server: "bs-osx108-8", clientAddr: "127.0.0.1:62634", time: new Date(1436465722754), what: "multi-split", ns: "db56.coll56", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 5, of: 10, chunk: { min: { tid: 4.0 }, max: { tid: 5.0 }, lastmod: Timestamp 1000|5, lastmodEpoch: ObjectId('559eba39ca4787b9985d1e46') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.758-0400 m31100| 2015-07-09T14:15:22.757-0400 I COMMAND [conn25] command db56.$cmd command: insert { insert: "coll56", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eba39ca4787b9985d1e46') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 105, w: 105 } }, Database: { acquireCount: { w: 105 } }, Collection: { acquireCount: { w: 5 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 101ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.758-0400 m31100| 2015-07-09T14:15:22.758-0400 I SHARDING [conn35] request split points lookup for chunk db56.coll56 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.764-0400 m31100| 2015-07-09T14:15:22.764-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.764-0400 m31100| 2015-07-09T14:15:22.764-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.765-0400 m31100| 2015-07-09T14:15:22.764-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.765-0400 m31100| 2015-07-09T14:15:22.764-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.765-0400 m31100| 2015-07-09T14:15:22.764-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.766-0400 m31100| 2015-07-09T14:15:22.764-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.766-0400 m31100| 2015-07-09T14:15:22.764-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.766-0400 m31100| 2015-07-09T14:15:22.764-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.766-0400 m31100| 2015-07-09T14:15:22.764-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.768-0400 m30998| 2015-07-09T14:15:22.767-0400 W SHARDING [conn358] splitChunk failed - cmd: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.769-0400 m30999| 2015-07-09T14:15:22.769-0400 I NETWORK [conn357] end connection 127.0.0.1:63817 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.769-0400 m31100| 2015-07-09T14:15:22.764-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.769-0400 m31100| 2015-07-09T14:15:22.765-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.770-0400 m31100| 2015-07-09T14:15:22.767-0400 W SHARDING [conn35] could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db56.coll56 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.770-0400 m31100| 2015-07-09T14:15:22.769-0400 I COMMAND [conn27] command db56.$cmd command: insert { insert: "coll56", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eba39ca4787b9985d1e46') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 106, w: 106 } }, Database: { acquireCount: { w: 106 } }, Collection: { acquireCount: { w: 6 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 122ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.776-0400 m31100| 2015-07-09T14:15:22.770-0400 I SHARDING [conn35] request split points lookup for chunk db56.coll56 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.776-0400 m31100| 2015-07-09T14:15:22.775-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.776-0400 m31100| 2015-07-09T14:15:22.775-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.776-0400 m31100| 2015-07-09T14:15:22.775-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.776-0400 m31100| 2015-07-09T14:15:22.775-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.776-0400 m31100| 2015-07-09T14:15:22.775-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.777-0400 m31100| 2015-07-09T14:15:22.775-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.777-0400 m31100| 2015-07-09T14:15:22.775-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.777-0400 m31100| 2015-07-09T14:15:22.775-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.777-0400 m31100| 2015-07-09T14:15:22.775-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.777-0400 m31100| 2015-07-09T14:15:22.775-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.780-0400 m31100| 2015-07-09T14:15:22.777-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.781-0400 m31100| 2015-07-09T14:15:22.779-0400 W SHARDING [conn35] could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db56.coll56 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.781-0400 m30998| 2015-07-09T14:15:22.779-0400 W SHARDING [conn360] splitChunk failed - cmd: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.785-0400 m31100| 2015-07-09T14:15:22.784-0400 I COMMAND [conn146] command db56.$cmd command: insert { insert: "coll56", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eba39ca4787b9985d1e46') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 106, w: 106 } }, Database: { acquireCount: { w: 106 } }, Collection: { acquireCount: { w: 6 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 122ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.785-0400 m31100| 2015-07-09T14:15:22.785-0400 I SHARDING [conn35] request split points lookup for chunk db56.coll56 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.791-0400 m31100| 2015-07-09T14:15:22.791-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.792-0400 m31100| 2015-07-09T14:15:22.791-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.792-0400 m31100| 2015-07-09T14:15:22.791-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.792-0400 m31100| 2015-07-09T14:15:22.791-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.792-0400 m31100| 2015-07-09T14:15:22.791-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.792-0400 m31100| 2015-07-09T14:15:22.791-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.792-0400 m31100| 2015-07-09T14:15:22.791-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.792-0400 m31100| 2015-07-09T14:15:22.791-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.793-0400 m31100| 2015-07-09T14:15:22.791-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.793-0400 m31100| 2015-07-09T14:15:22.791-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.793-0400 m31100| 2015-07-09T14:15:22.793-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.796-0400 m31100| 2015-07-09T14:15:22.794-0400 W SHARDING [conn35] could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db56.coll56 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.796-0400 m30998| 2015-07-09T14:15:22.795-0400 W SHARDING [conn359] splitChunk failed - cmd: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.807-0400 m31100| 2015-07-09T14:15:22.807-0400 I SHARDING [conn32] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:22.807-0400-559eba3a792e00bb67274a3d", server: "bs-osx108-8", clientAddr: "127.0.0.1:62634", time: new Date(1436465722807), what: "multi-split", ns: "db56.coll56", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 6, of: 10, chunk: { min: { tid: 5.0 }, max: { tid: 6.0 }, lastmod: Timestamp 1000|6, lastmodEpoch: ObjectId('559eba39ca4787b9985d1e46') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.826-0400 m30998| 2015-07-09T14:15:22.820-0400 I NETWORK [conn359] end connection 127.0.0.1:63823 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.860-0400 m31100| 2015-07-09T14:15:22.858-0400 I COMMAND [conn68] command db56.$cmd command: insert { insert: "coll56", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eba39ca4787b9985d1e46') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 107, w: 107 } }, Database: { acquireCount: { w: 107 } }, Collection: { acquireCount: { w: 7 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 140ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.861-0400 m31100| 2015-07-09T14:15:22.858-0400 I SHARDING [conn37] request split points lookup for chunk db56.coll56 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.861-0400 m31100| 2015-07-09T14:15:22.859-0400 I SHARDING [conn32] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:22.859-0400-559eba3a792e00bb67274a3e", server: "bs-osx108-8", clientAddr: "127.0.0.1:62634", time: new Date(1436465722859), what: "multi-split", ns: "db56.coll56", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 7, of: 10, chunk: { min: { tid: 6.0 }, max: { tid: 7.0 }, lastmod: Timestamp 1000|7, lastmodEpoch: ObjectId('559eba39ca4787b9985d1e46') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.864-0400 m31100| 2015-07-09T14:15:22.864-0400 I COMMAND [conn69] command db56.$cmd command: insert { insert: "coll56", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eba39ca4787b9985d1e46') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 106, w: 106 } }, Database: { acquireCount: { w: 106 } }, Collection: { acquireCount: { w: 6 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 116ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.864-0400 m31100| 2015-07-09T14:15:22.864-0400 I SHARDING [conn15] request split points lookup for chunk db56.coll56 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.866-0400 m31100| 2015-07-09T14:15:22.866-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.866-0400 m31100| 2015-07-09T14:15:22.866-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.866-0400 m31100| 2015-07-09T14:15:22.866-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.866-0400 m31100| 2015-07-09T14:15:22.866-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.867-0400 m31100| 2015-07-09T14:15:22.866-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.867-0400 m31100| 2015-07-09T14:15:22.866-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.867-0400 m31100| 2015-07-09T14:15:22.866-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.867-0400 m31100| 2015-07-09T14:15:22.866-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.867-0400 m31100| 2015-07-09T14:15:22.866-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.868-0400 m31100| 2015-07-09T14:15:22.866-0400 W SHARDING [conn37] possible low cardinality key detected in db56.coll56 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.868-0400 m31100| 2015-07-09T14:15:22.868-0400 I COMMAND [conn147] command db56.$cmd command: insert { insert: "coll56", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eba39ca4787b9985d1e46') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 106, w: 106 } }, Database: { acquireCount: { w: 106 } }, Collection: { acquireCount: { w: 6 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 111ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.870-0400 m31100| 2015-07-09T14:15:22.869-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.870-0400 m31100| 2015-07-09T14:15:22.869-0400 I SHARDING [conn35] request split points lookup for chunk db56.coll56 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.870-0400 m31100| 2015-07-09T14:15:22.870-0400 W SHARDING [conn15] possible low cardinality key detected in db56.coll56 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.871-0400 m31100| 2015-07-09T14:15:22.870-0400 W SHARDING [conn15] possible low cardinality key detected in db56.coll56 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.871-0400 m31100| 2015-07-09T14:15:22.870-0400 W SHARDING [conn15] possible low cardinality key detected in db56.coll56 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.871-0400 m31100| 2015-07-09T14:15:22.870-0400 W SHARDING [conn15] possible low cardinality key detected in db56.coll56 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.871-0400 m31100| 2015-07-09T14:15:22.870-0400 W SHARDING [conn15] possible low cardinality key detected in db56.coll56 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.871-0400 m31100| 2015-07-09T14:15:22.870-0400 W SHARDING [conn15] possible low cardinality key detected in db56.coll56 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.872-0400 m31100| 2015-07-09T14:15:22.870-0400 W SHARDING [conn15] possible low cardinality key detected in db56.coll56 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.872-0400 m31100| 2015-07-09T14:15:22.870-0400 W SHARDING [conn15] possible low cardinality key detected in db56.coll56 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.872-0400 m31100| 2015-07-09T14:15:22.870-0400 W SHARDING [conn15] possible low cardinality key detected in db56.coll56 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.872-0400 m31100| 2015-07-09T14:15:22.870-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.876-0400 m31100| 2015-07-09T14:15:22.870-0400 I COMMAND [conn22] command db56.$cmd command: insert { insert: "coll56", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eba39ca4787b9985d1e46') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 106, w: 106 } }, Database: { acquireCount: { w: 106 } }, Collection: { acquireCount: { w: 6 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 118ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.876-0400 m31100| 2015-07-09T14:15:22.872-0400 W SHARDING [conn37] could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db56.coll56 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.876-0400 m31100| 2015-07-09T14:15:22.872-0400 I SHARDING [conn187] request split points lookup for chunk db56.coll56 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.879-0400 m30999| 2015-07-09T14:15:22.873-0400 W SHARDING [conn360] splitChunk failed - cmd: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.879-0400 m31100| 2015-07-09T14:15:22.876-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.879-0400 m31100| 2015-07-09T14:15:22.876-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.879-0400 m31100| 2015-07-09T14:15:22.876-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.879-0400 m31100| 2015-07-09T14:15:22.876-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.880-0400 m31100| 2015-07-09T14:15:22.876-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.880-0400 m31100| 2015-07-09T14:15:22.876-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.880-0400 m31100| 2015-07-09T14:15:22.876-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.880-0400 m31100| 2015-07-09T14:15:22.876-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.880-0400 m30999| 2015-07-09T14:15:22.877-0400 W SHARDING [conn358] splitChunk failed - cmd: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.881-0400 m31100| 2015-07-09T14:15:22.876-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.881-0400 m31100| 2015-07-09T14:15:22.876-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.881-0400 m31100| 2015-07-09T14:15:22.877-0400 W SHARDING [conn15] could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db56.coll56 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.881-0400 m31100| 2015-07-09T14:15:22.877-0400 W SHARDING [conn187] possible low cardinality key detected in db56.coll56 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.881-0400 m31100| 2015-07-09T14:15:22.877-0400 W SHARDING [conn187] possible low cardinality key detected in db56.coll56 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.881-0400 m31100| 2015-07-09T14:15:22.877-0400 W SHARDING [conn187] possible low cardinality key detected in db56.coll56 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.881-0400 m31100| 2015-07-09T14:15:22.877-0400 W SHARDING [conn187] possible low cardinality key detected in db56.coll56 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.882-0400 m31100| 2015-07-09T14:15:22.878-0400 W SHARDING [conn187] possible low cardinality key detected in db56.coll56 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.882-0400 m31100| 2015-07-09T14:15:22.878-0400 W SHARDING [conn187] possible low cardinality key detected in db56.coll56 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.882-0400 m31100| 2015-07-09T14:15:22.878-0400 W SHARDING [conn187] possible low cardinality key detected in db56.coll56 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.882-0400 m31100| 2015-07-09T14:15:22.878-0400 W SHARDING [conn187] possible low cardinality key detected in db56.coll56 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.882-0400 m31100| 2015-07-09T14:15:22.878-0400 W SHARDING [conn187] possible low cardinality key detected in db56.coll56 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.883-0400 m31100| 2015-07-09T14:15:22.878-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.883-0400 m31100| 2015-07-09T14:15:22.881-0400 I SHARDING [conn187] received splitChunk request: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.884-0400 m31100| 2015-07-09T14:15:22.883-0400 W SHARDING [conn35] could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db56.coll56 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.884-0400 m30998| 2015-07-09T14:15:22.884-0400 W SHARDING [conn361] splitChunk failed - cmd: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.884-0400 m30999| 2015-07-09T14:15:22.884-0400 I NETWORK [conn358] end connection 127.0.0.1:63819 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.885-0400 m31100| 2015-07-09T14:15:22.885-0400 W SHARDING [conn187] could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db56.coll56 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.885-0400 m30999| 2015-07-09T14:15:22.885-0400 W SHARDING [conn359] splitChunk failed - cmd: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.898-0400 m30998| 2015-07-09T14:15:22.897-0400 I NETWORK [conn361] end connection 127.0.0.1:63826 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.902-0400 m30999| 2015-07-09T14:15:22.900-0400 I NETWORK [conn359] end connection 127.0.0.1:63820 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.913-0400 m31100| 2015-07-09T14:15:22.912-0400 I COMMAND [conn27] command db56.$cmd command: insert { insert: "coll56", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eba39ca4787b9985d1e46') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 106, w: 106 } }, Database: { acquireCount: { w: 106 } }, Collection: { acquireCount: { w: 6 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 134ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.913-0400 m31100| 2015-07-09T14:15:22.912-0400 I SHARDING [conn35] request split points lookup for chunk db56.coll56 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.914-0400 m31100| 2015-07-09T14:15:22.913-0400 I SHARDING [conn32] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:22.913-0400-559eba3a792e00bb67274a3f", server: "bs-osx108-8", clientAddr: "127.0.0.1:62634", time: new Date(1436465722913), what: "multi-split", ns: "db56.coll56", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 8, of: 10, chunk: { min: { tid: 7.0 }, max: { tid: 8.0 }, lastmod: Timestamp 1000|8, lastmodEpoch: ObjectId('559eba39ca4787b9985d1e46') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.916-0400 m31100| 2015-07-09T14:15:22.915-0400 I COMMAND [conn146] command db56.$cmd command: insert { insert: "coll56", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559eba39ca4787b9985d1e46') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 107, w: 107 } }, Database: { acquireCount: { w: 107 } }, Collection: { acquireCount: { w: 7 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 124ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.916-0400 m31100| 2015-07-09T14:15:22.916-0400 I SHARDING [conn39] request split points lookup for chunk db56.coll56 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.921-0400 m31100| 2015-07-09T14:15:22.920-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.921-0400 m31100| 2015-07-09T14:15:22.920-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.922-0400 m31100| 2015-07-09T14:15:22.920-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.922-0400 m31100| 2015-07-09T14:15:22.920-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.923-0400 m31100| 2015-07-09T14:15:22.920-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.923-0400 m31100| 2015-07-09T14:15:22.920-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.923-0400 m31100| 2015-07-09T14:15:22.920-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.923-0400 m31100| 2015-07-09T14:15:22.921-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.923-0400 m31100| 2015-07-09T14:15:22.921-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.923-0400 m31100| 2015-07-09T14:15:22.921-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.923-0400 m31100| 2015-07-09T14:15:22.921-0400 W SHARDING [conn39] possible low cardinality key detected in db56.coll56 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.924-0400 m31100| 2015-07-09T14:15:22.921-0400 W SHARDING [conn39] possible low cardinality key detected in db56.coll56 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.924-0400 m31100| 2015-07-09T14:15:22.921-0400 W SHARDING [conn39] possible low cardinality key detected in db56.coll56 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.924-0400 m31100| 2015-07-09T14:15:22.921-0400 W SHARDING [conn39] possible low cardinality key detected in db56.coll56 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.924-0400 m31100| 2015-07-09T14:15:22.921-0400 W SHARDING [conn39] possible low cardinality key detected in db56.coll56 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.924-0400 m31100| 2015-07-09T14:15:22.921-0400 W SHARDING [conn39] possible low cardinality key detected in db56.coll56 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.924-0400 m31100| 2015-07-09T14:15:22.921-0400 W SHARDING [conn39] possible low cardinality key detected in db56.coll56 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.924-0400 m31100| 2015-07-09T14:15:22.921-0400 W SHARDING [conn39] possible low cardinality key detected in db56.coll56 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.925-0400 m31100| 2015-07-09T14:15:22.921-0400 W SHARDING [conn39] possible low cardinality key detected in db56.coll56 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.925-0400 m31100| 2015-07-09T14:15:22.924-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.926-0400 m31100| 2015-07-09T14:15:22.925-0400 I SHARDING [conn39] received splitChunk request: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.927-0400 m31100| 2015-07-09T14:15:22.926-0400 W SHARDING [conn39] could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db56.coll56 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.928-0400 m31100| 2015-07-09T14:15:22.926-0400 W SHARDING [conn35] could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db56.coll56 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.928-0400 m30998| 2015-07-09T14:15:22.926-0400 W SHARDING [conn360] splitChunk failed - cmd: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.928-0400 m30998| 2015-07-09T14:15:22.927-0400 W SHARDING [conn358] splitChunk failed - cmd: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.937-0400 m30998| 2015-07-09T14:15:22.937-0400 I NETWORK [conn358] end connection 127.0.0.1:63822 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.958-0400 m31100| 2015-07-09T14:15:22.955-0400 I SHARDING [conn187] request split points lookup for chunk db56.coll56 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.962-0400 m31100| 2015-07-09T14:15:22.961-0400 W SHARDING [conn187] possible low cardinality key detected in db56.coll56 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.962-0400 m31100| 2015-07-09T14:15:22.961-0400 W SHARDING [conn187] possible low cardinality key detected in db56.coll56 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.962-0400 m31100| 2015-07-09T14:15:22.961-0400 W SHARDING [conn187] possible low cardinality key detected in db56.coll56 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.962-0400 m31100| 2015-07-09T14:15:22.961-0400 W SHARDING [conn187] possible low cardinality key detected in db56.coll56 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.963-0400 m31100| 2015-07-09T14:15:22.961-0400 W SHARDING [conn187] possible low cardinality key detected in db56.coll56 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.963-0400 m31100| 2015-07-09T14:15:22.961-0400 W SHARDING [conn187] possible low cardinality key detected in db56.coll56 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.963-0400 m31100| 2015-07-09T14:15:22.961-0400 W SHARDING [conn187] possible low cardinality key detected in db56.coll56 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.963-0400 m31100| 2015-07-09T14:15:22.961-0400 W SHARDING [conn187] possible low cardinality key detected in db56.coll56 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.963-0400 m31100| 2015-07-09T14:15:22.961-0400 W SHARDING [conn187] possible low cardinality key detected in db56.coll56 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.963-0400 m31100| 2015-07-09T14:15:22.961-0400 W SHARDING [conn187] possible low cardinality key detected in db56.coll56 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.964-0400 m31100| 2015-07-09T14:15:22.962-0400 I SHARDING [conn187] received splitChunk request: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.965-0400 m31100| 2015-07-09T14:15:22.964-0400 W SHARDING [conn187] could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db56.coll56 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.966-0400 m30999| 2015-07-09T14:15:22.964-0400 W SHARDING [conn360] splitChunk failed - cmd: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.966-0400 m31100| 2015-07-09T14:15:22.965-0400 I SHARDING [conn32] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:22.965-0400-559eba3a792e00bb67274a40", server: "bs-osx108-8", clientAddr: "127.0.0.1:62634", time: new Date(1436465722965), what: "multi-split", ns: "db56.coll56", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 9, of: 10, chunk: { min: { tid: 8.0 }, max: { tid: 9.0 }, lastmod: Timestamp 1000|9, lastmodEpoch: ObjectId('559eba39ca4787b9985d1e46') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.969-0400 m30999| 2015-07-09T14:15:22.969-0400 I NETWORK [conn360] end connection 127.0.0.1:63821 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.969-0400 m31100| 2015-07-09T14:15:22.969-0400 I SHARDING [conn35] request split points lookup for chunk db56.coll56 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.976-0400 m31100| 2015-07-09T14:15:22.976-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.976-0400 m31100| 2015-07-09T14:15:22.976-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.976-0400 m31100| 2015-07-09T14:15:22.976-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.976-0400 m31100| 2015-07-09T14:15:22.976-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.977-0400 m31100| 2015-07-09T14:15:22.976-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.977-0400 m31100| 2015-07-09T14:15:22.976-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.977-0400 m31100| 2015-07-09T14:15:22.976-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.977-0400 m31100| 2015-07-09T14:15:22.976-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.977-0400 m31100| 2015-07-09T14:15:22.976-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.977-0400 m31100| 2015-07-09T14:15:22.976-0400 W SHARDING [conn35] possible low cardinality key detected in db56.coll56 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.978-0400 m31100| 2015-07-09T14:15:22.978-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.980-0400 m31100| 2015-07-09T14:15:22.980-0400 W SHARDING [conn35] could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db56.coll56 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.981-0400 m30998| 2015-07-09T14:15:22.980-0400 W SHARDING [conn360] splitChunk failed - cmd: { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db56.coll56 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:22.985-0400 m30998| 2015-07-09T14:15:22.985-0400 I NETWORK [conn360] end connection 127.0.0.1:63824 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.020-0400 m31100| 2015-07-09T14:15:23.019-0400 I SHARDING [conn32] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:23.019-0400-559eba3b792e00bb67274a41", server: "bs-osx108-8", clientAddr: "127.0.0.1:62634", time: new Date(1436465723019), what: "multi-split", ns: "db56.coll56", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 10, of: 10, chunk: { min: { tid: 9.0 }, max: { tid: MaxKey }, lastmod: Timestamp 1000|10, lastmodEpoch: ObjectId('559eba39ca4787b9985d1e46') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.074-0400 m31100| 2015-07-09T14:15:23.073-0400 I SHARDING [conn32] distributed lock 'db56.coll56/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.074-0400 m31100| 2015-07-09T14:15:23.073-0400 I COMMAND [conn32] command db56.coll56 command: splitChunk { splitChunk: "db56.coll56", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba39ca4787b9985d1e46') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 4, w: 2 } }, Database: { acquireCount: { r: 1, w: 2 } }, Collection: { acquireCount: { r: 1, W: 2 }, acquireWaitCount: { W: 2 }, timeAcquiringMicros: { W: 23227 } } } protocol:op_command 591ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.076-0400 m30998| 2015-07-09T14:15:23.076-0400 I SHARDING [conn357] ChunkManager: time to load chunks for db56.coll56: 1ms sequenceNumber: 67 version: 1|10||559eba39ca4787b9985d1e46 based on: 1|0||559eba39ca4787b9985d1e46 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.077-0400 m30998| 2015-07-09T14:15:23.077-0400 I SHARDING [conn357] autosplitted db56.coll56 shard: ns: db56.coll56, shard: test-rs0, lastmod: 1|0||000000000000000000000000, min: { tid: MinKey }, max: { tid: MaxKey } into 10 (splitThreshold 921) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.150-0400 m30998| 2015-07-09T14:15:23.150-0400 I NETWORK [conn357] end connection 127.0.0.1:63818 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.169-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.169-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.169-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.170-0400 jstests/concurrency/fsm_workloads/touch_index.js: Workload completed in 980 ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.170-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.170-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.170-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.170-0400 m30999| 2015-07-09T14:15:23.169-0400 I COMMAND [conn1] DROP: db56.coll56 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.171-0400 m30999| 2015-07-09T14:15:23.169-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:23.169-0400-559eba3bca4787b9985d1e48", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465723169), what: "dropCollection.start", ns: "db56.coll56", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.228-0400 m30999| 2015-07-09T14:15:23.227-0400 I SHARDING [conn1] distributed lock 'db56.coll56/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba3bca4787b9985d1e49 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.229-0400 m31100| 2015-07-09T14:15:23.229-0400 I COMMAND [conn187] CMD: drop db56.coll56 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.232-0400 m31200| 2015-07-09T14:15:23.232-0400 I COMMAND [conn18] CMD: drop db56.coll56 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.233-0400 m31101| 2015-07-09T14:15:23.233-0400 I COMMAND [repl writer worker 8] CMD: drop db56.coll56 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.233-0400 m31102| 2015-07-09T14:15:23.233-0400 I COMMAND [repl writer worker 7] CMD: drop db56.coll56 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.236-0400 m31201| 2015-07-09T14:15:23.235-0400 I COMMAND [repl writer worker 7] CMD: drop db56.coll56 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.236-0400 m31202| 2015-07-09T14:15:23.236-0400 I COMMAND [repl writer worker 6] CMD: drop db56.coll56 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.289-0400 m31100| 2015-07-09T14:15:23.289-0400 I SHARDING [conn187] remotely refreshing metadata for db56.coll56 with requested shard version 0|0||000000000000000000000000, current shard version is 1|10||559eba39ca4787b9985d1e46, current metadata version is 1|10||559eba39ca4787b9985d1e46 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.291-0400 m31100| 2015-07-09T14:15:23.291-0400 W SHARDING [conn187] no chunks found when reloading db56.coll56, previous version was 0|0||559eba39ca4787b9985d1e46, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.291-0400 m31100| 2015-07-09T14:15:23.291-0400 I SHARDING [conn187] dropping metadata for db56.coll56 at shard version 1|10||559eba39ca4787b9985d1e46, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.293-0400 m30999| 2015-07-09T14:15:23.293-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:23.293-0400-559eba3bca4787b9985d1e4a", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465723293), what: "dropCollection", ns: "db56.coll56", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.347-0400 m30999| 2015-07-09T14:15:23.347-0400 I SHARDING [conn1] distributed lock 'db56.coll56/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.404-0400 m30999| 2015-07-09T14:15:23.404-0400 I COMMAND [conn1] DROP DATABASE: db56 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.405-0400 m30999| 2015-07-09T14:15:23.404-0400 I SHARDING [conn1] DBConfig::dropDatabase: db56 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.405-0400 m30999| 2015-07-09T14:15:23.404-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:23.404-0400-559eba3bca4787b9985d1e4b", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465723404), what: "dropDatabase.start", ns: "db56", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.512-0400 m30999| 2015-07-09T14:15:23.512-0400 I SHARDING [conn1] DBConfig::dropDatabase: db56 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.513-0400 m31100| 2015-07-09T14:15:23.512-0400 I COMMAND [conn160] dropDatabase db56 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.513-0400 m31100| 2015-07-09T14:15:23.512-0400 I COMMAND [conn160] dropDatabase db56 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.513-0400 m30999| 2015-07-09T14:15:23.513-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:23.513-0400-559eba3bca4787b9985d1e4c", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465723513), what: "dropDatabase", ns: "db56", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.514-0400 m31102| 2015-07-09T14:15:23.514-0400 I COMMAND [repl writer worker 9] dropDatabase db56 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.514-0400 m31102| 2015-07-09T14:15:23.514-0400 I COMMAND [repl writer worker 9] dropDatabase db56 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.514-0400 m31101| 2015-07-09T14:15:23.514-0400 I COMMAND [repl writer worker 3] dropDatabase db56 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.514-0400 m31101| 2015-07-09T14:15:23.514-0400 I COMMAND [repl writer worker 3] dropDatabase db56 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.608-0400 m31100| 2015-07-09T14:15:23.608-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.612-0400 m31102| 2015-07-09T14:15:23.611-0400 I COMMAND [repl writer worker 15] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.612-0400 m31101| 2015-07-09T14:15:23.611-0400 I COMMAND [repl writer worker 5] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.645-0400 m31200| 2015-07-09T14:15:23.644-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.648-0400 m31202| 2015-07-09T14:15:23.648-0400 I COMMAND [repl writer worker 2] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.648-0400 m31201| 2015-07-09T14:15:23.648-0400 I COMMAND [repl writer worker 8] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.649-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.649-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.649-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.649-0400 jstests/concurrency/fsm_workloads/create_capped_collection_maxdocs.js [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.649-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.649-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.649-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.657-0400 m30999| 2015-07-09T14:15:23.656-0400 I SHARDING [conn1] distributed lock 'db57/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba3bca4787b9985d1e4d [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.661-0400 m30999| 2015-07-09T14:15:23.661-0400 I SHARDING [conn1] Placing [db57] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.661-0400 m30999| 2015-07-09T14:15:23.661-0400 I SHARDING [conn1] Enabling sharding for database [db57] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.714-0400 m30999| 2015-07-09T14:15:23.714-0400 I SHARDING [conn1] distributed lock 'db57/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.737-0400 m31100| 2015-07-09T14:15:23.737-0400 I INDEX [conn22] build index on: db57.coll57 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db57.coll57" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.737-0400 m31100| 2015-07-09T14:15:23.737-0400 I INDEX [conn22] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.746-0400 m31100| 2015-07-09T14:15:23.745-0400 I INDEX [conn22] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.747-0400 m30999| 2015-07-09T14:15:23.746-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db57.coll57", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.749-0400 m30999| 2015-07-09T14:15:23.749-0400 I SHARDING [conn1] distributed lock 'db57.coll57/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba3bca4787b9985d1e4e [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.751-0400 m30999| 2015-07-09T14:15:23.750-0400 I SHARDING [conn1] enable sharding on: db57.coll57 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.752-0400 m30999| 2015-07-09T14:15:23.750-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:23.750-0400-559eba3bca4787b9985d1e4f", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465723750), what: "shardCollection.start", ns: "db57.coll57", details: { shardKey: { _id: "hashed" }, collection: "db57.coll57", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.762-0400 m31101| 2015-07-09T14:15:23.761-0400 I INDEX [repl writer worker 12] build index on: db57.coll57 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db57.coll57" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.762-0400 m31101| 2015-07-09T14:15:23.761-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.765-0400 m31102| 2015-07-09T14:15:23.765-0400 I INDEX [repl writer worker 13] build index on: db57.coll57 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db57.coll57" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.766-0400 m31102| 2015-07-09T14:15:23.765-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.771-0400 m31101| 2015-07-09T14:15:23.771-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.774-0400 m31102| 2015-07-09T14:15:23.773-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.804-0400 m30999| 2015-07-09T14:15:23.804-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db57.coll57 using new epoch 559eba3bca4787b9985d1e50 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.911-0400 m30999| 2015-07-09T14:15:23.911-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db57.coll57: 0ms sequenceNumber: 252 version: 1|1||559eba3bca4787b9985d1e50 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.968-0400 m30999| 2015-07-09T14:15:23.967-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db57.coll57: 0ms sequenceNumber: 253 version: 1|1||559eba3bca4787b9985d1e50 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.970-0400 m31100| 2015-07-09T14:15:23.969-0400 I SHARDING [conn182] remotely refreshing metadata for db57.coll57 with requested shard version 1|1||559eba3bca4787b9985d1e50, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.971-0400 m31100| 2015-07-09T14:15:23.971-0400 I SHARDING [conn182] collection db57.coll57 was previously unsharded, new metadata loaded with shard version 1|1||559eba3bca4787b9985d1e50 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.972-0400 m31100| 2015-07-09T14:15:23.971-0400 I SHARDING [conn182] collection version was loaded at version 1|1||559eba3bca4787b9985d1e50, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:23.972-0400 m30999| 2015-07-09T14:15:23.971-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:23.971-0400-559eba3bca4787b9985d1e51", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465723971), what: "shardCollection", ns: "db57.coll57", details: { version: "1|1||559eba3bca4787b9985d1e50" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.027-0400 m30999| 2015-07-09T14:15:24.027-0400 I SHARDING [conn1] distributed lock 'db57.coll57/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.028-0400 m30999| 2015-07-09T14:15:24.028-0400 I SHARDING [conn1] moving chunk ns: db57.coll57 moving ( ns: db57.coll57, shard: test-rs0, lastmod: 1|1||000000000000000000000000, min: { _id: 0 }, max: { _id: MaxKey }) test-rs0 -> test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.029-0400 m31100| 2015-07-09T14:15:24.029-0400 I SHARDING [conn187] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.030-0400 m31100| 2015-07-09T14:15:24.030-0400 I SHARDING [conn187] received moveChunk request: { moveChunk: "db57.coll57", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eba3bca4787b9985d1e50') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.034-0400 m31100| 2015-07-09T14:15:24.034-0400 I SHARDING [conn187] distributed lock 'db57.coll57/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eba3c792e00bb67274a43 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.034-0400 m31100| 2015-07-09T14:15:24.034-0400 I SHARDING [conn187] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:24.034-0400-559eba3c792e00bb67274a44", server: "bs-osx108-8", clientAddr: "127.0.0.1:63761", time: new Date(1436465724034), what: "moveChunk.start", ns: "db57.coll57", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.087-0400 m31100| 2015-07-09T14:15:24.087-0400 I SHARDING [conn187] remotely refreshing metadata for db57.coll57 based on current shard version 1|1||559eba3bca4787b9985d1e50, current metadata version is 1|1||559eba3bca4787b9985d1e50 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.089-0400 m31100| 2015-07-09T14:15:24.089-0400 I SHARDING [conn187] metadata of collection db57.coll57 already up to date (shard version : 1|1||559eba3bca4787b9985d1e50, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.089-0400 m31100| 2015-07-09T14:15:24.089-0400 I SHARDING [conn187] moveChunk request accepted at version 1|1||559eba3bca4787b9985d1e50 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.090-0400 m31100| 2015-07-09T14:15:24.089-0400 I SHARDING [conn187] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.090-0400 m31200| 2015-07-09T14:15:24.090-0400 I SHARDING [conn16] remotely refreshing metadata for db57.coll57, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.092-0400 m31200| 2015-07-09T14:15:24.091-0400 I SHARDING [conn16] collection db57.coll57 was previously unsharded, new metadata loaded with shard version 0|0||559eba3bca4787b9985d1e50 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.092-0400 m31200| 2015-07-09T14:15:24.091-0400 I SHARDING [conn16] collection version was loaded at version 1|1||559eba3bca4787b9985d1e50, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.092-0400 m31200| 2015-07-09T14:15:24.092-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: 0 } -> { _id: MaxKey } for collection db57.coll57 from test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 at epoch 559eba3bca4787b9985d1e50 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.094-0400 m31100| 2015-07-09T14:15:24.093-0400 I SHARDING [conn187] moveChunk data transfer progress: { active: true, ns: "db57.coll57", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.097-0400 m31100| 2015-07-09T14:15:24.097-0400 I SHARDING [conn187] moveChunk data transfer progress: { active: true, ns: "db57.coll57", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.102-0400 m31100| 2015-07-09T14:15:24.101-0400 I SHARDING [conn187] moveChunk data transfer progress: { active: true, ns: "db57.coll57", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.111-0400 m31200| 2015-07-09T14:15:24.110-0400 I INDEX [migrateThread] build index on: db57.coll57 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db57.coll57" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.111-0400 m31200| 2015-07-09T14:15:24.110-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.112-0400 m31100| 2015-07-09T14:15:24.111-0400 I SHARDING [conn187] moveChunk data transfer progress: { active: true, ns: "db57.coll57", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.122-0400 m31200| 2015-07-09T14:15:24.121-0400 I INDEX [migrateThread] build index on: db57.coll57 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db57.coll57" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.122-0400 m31200| 2015-07-09T14:15:24.122-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.128-0400 m31100| 2015-07-09T14:15:24.127-0400 I SHARDING [conn187] moveChunk data transfer progress: { active: true, ns: "db57.coll57", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.136-0400 m31200| 2015-07-09T14:15:24.136-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.138-0400 m31200| 2015-07-09T14:15:24.137-0400 I SHARDING [migrateThread] Deleter starting delete for: db57.coll57 from { _id: 0 } -> { _id: MaxKey }, with opId: 91805 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.138-0400 m31200| 2015-07-09T14:15:24.137-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db57.coll57 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.146-0400 m31201| 2015-07-09T14:15:24.145-0400 I INDEX [repl writer worker 14] build index on: db57.coll57 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db57.coll57" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.147-0400 m31201| 2015-07-09T14:15:24.145-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.147-0400 m31202| 2015-07-09T14:15:24.145-0400 I INDEX [repl writer worker 0] build index on: db57.coll57 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db57.coll57" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.148-0400 m31202| 2015-07-09T14:15:24.145-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.151-0400 m31201| 2015-07-09T14:15:24.150-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.154-0400 m31202| 2015-07-09T14:15:24.153-0400 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.154-0400 m31200| 2015-07-09T14:15:24.154-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.154-0400 m31200| 2015-07-09T14:15:24.154-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db57.coll57' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.161-0400 m31100| 2015-07-09T14:15:24.161-0400 I SHARDING [conn187] moveChunk data transfer progress: { active: true, ns: "db57.coll57", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.162-0400 m31100| 2015-07-09T14:15:24.161-0400 I SHARDING [conn187] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.162-0400 m31100| 2015-07-09T14:15:24.162-0400 I SHARDING [conn187] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.162-0400 m31100| 2015-07-09T14:15:24.162-0400 I SHARDING [conn187] moveChunk setting version to: 2|0||559eba3bca4787b9985d1e50 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.167-0400 m31200| 2015-07-09T14:15:24.166-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db57.coll57' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.167-0400 m31200| 2015-07-09T14:15:24.166-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:24.166-0400-559eba3cd5a107a5b9c0db5e", server: "bs-osx108-8", clientAddr: "", time: new Date(1436465724166), what: "moveChunk.to", ns: "db57.coll57", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 5: 44, step 2 of 5: 15, step 3 of 5: 0, step 4 of 5: 1, step 5 of 5: 12, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.220-0400 m31100| 2015-07-09T14:15:24.220-0400 I SHARDING [conn187] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db57.coll57", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.220-0400 m31100| 2015-07-09T14:15:24.220-0400 I SHARDING [conn187] moveChunk updating self version to: 2|1||559eba3bca4787b9985d1e50 through { _id: MinKey } -> { _id: 0 } for collection 'db57.coll57' [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.222-0400 m31100| 2015-07-09T14:15:24.221-0400 I SHARDING [conn187] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:24.221-0400-559eba3c792e00bb67274a45", server: "bs-osx108-8", clientAddr: "127.0.0.1:63761", time: new Date(1436465724221), what: "moveChunk.commit", ns: "db57.coll57", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.275-0400 m31100| 2015-07-09T14:15:24.275-0400 I SHARDING [conn187] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.276-0400 m31100| 2015-07-09T14:15:24.275-0400 I SHARDING [conn187] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.276-0400 m31100| 2015-07-09T14:15:24.275-0400 I SHARDING [conn187] Deleter starting delete for: db57.coll57 from { _id: 0 } -> { _id: MaxKey }, with opId: 143111 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.276-0400 m31100| 2015-07-09T14:15:24.276-0400 I SHARDING [conn187] rangeDeleter deleted 0 documents for db57.coll57 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.276-0400 m31100| 2015-07-09T14:15:24.276-0400 I SHARDING [conn187] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.277-0400 m31100| 2015-07-09T14:15:24.277-0400 I SHARDING [conn187] distributed lock 'db57.coll57/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.278-0400 m31100| 2015-07-09T14:15:24.277-0400 I SHARDING [conn187] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:24.277-0400-559eba3c792e00bb67274a46", server: "bs-osx108-8", clientAddr: "127.0.0.1:63761", time: new Date(1436465724277), what: "moveChunk.from", ns: "db57.coll57", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 6: 0, step 2 of 6: 59, step 3 of 6: 2, step 4 of 6: 69, step 5 of 6: 114, step 6 of 6: 0, to: "test-rs1", from: "test-rs0", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.332-0400 m31100| 2015-07-09T14:15:24.332-0400 I COMMAND [conn187] command db57.coll57 command: moveChunk { moveChunk: "db57.coll57", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eba3bca4787b9985d1e50') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 302ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.334-0400 m30999| 2015-07-09T14:15:24.334-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db57.coll57: 0ms sequenceNumber: 254 version: 2|1||559eba3bca4787b9985d1e50 based on: 1|1||559eba3bca4787b9985d1e50 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.335-0400 m31100| 2015-07-09T14:15:24.335-0400 I SHARDING [conn187] received splitChunk request: { splitChunk: "db57.coll57", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba3bca4787b9985d1e50') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.340-0400 m31100| 2015-07-09T14:15:24.339-0400 I SHARDING [conn187] distributed lock 'db57.coll57/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eba3c792e00bb67274a47 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.340-0400 m31100| 2015-07-09T14:15:24.339-0400 I SHARDING [conn187] remotely refreshing metadata for db57.coll57 based on current shard version 2|0||559eba3bca4787b9985d1e50, current metadata version is 2|0||559eba3bca4787b9985d1e50 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.341-0400 m31100| 2015-07-09T14:15:24.341-0400 I SHARDING [conn187] updating metadata for db57.coll57 from shard version 2|0||559eba3bca4787b9985d1e50 to shard version 2|1||559eba3bca4787b9985d1e50 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.341-0400 m31100| 2015-07-09T14:15:24.341-0400 I SHARDING [conn187] collection version was loaded at version 2|1||559eba3bca4787b9985d1e50, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.341-0400 m31100| 2015-07-09T14:15:24.341-0400 I SHARDING [conn187] splitChunk accepted at version 2|1||559eba3bca4787b9985d1e50 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.343-0400 m31100| 2015-07-09T14:15:24.342-0400 I SHARDING [conn187] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:24.342-0400-559eba3c792e00bb67274a48", server: "bs-osx108-8", clientAddr: "127.0.0.1:63761", time: new Date(1436465724342), what: "split", ns: "db57.coll57", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559eba3bca4787b9985d1e50') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559eba3bca4787b9985d1e50') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.397-0400 m31100| 2015-07-09T14:15:24.397-0400 I SHARDING [conn187] distributed lock 'db57.coll57/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.400-0400 m30999| 2015-07-09T14:15:24.400-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db57.coll57: 0ms sequenceNumber: 255 version: 2|3||559eba3bca4787b9985d1e50 based on: 2|1||559eba3bca4787b9985d1e50 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.401-0400 m31200| 2015-07-09T14:15:24.400-0400 I SHARDING [conn18] received splitChunk request: { splitChunk: "db57.coll57", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba3bca4787b9985d1e50') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.405-0400 m31200| 2015-07-09T14:15:24.405-0400 I SHARDING [conn18] distributed lock 'db57.coll57/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eba3cd5a107a5b9c0db5f [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.406-0400 m31200| 2015-07-09T14:15:24.405-0400 I SHARDING [conn18] remotely refreshing metadata for db57.coll57 based on current shard version 0|0||559eba3bca4787b9985d1e50, current metadata version is 1|1||559eba3bca4787b9985d1e50 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.408-0400 m31200| 2015-07-09T14:15:24.407-0400 I SHARDING [conn18] updating metadata for db57.coll57 from shard version 0|0||559eba3bca4787b9985d1e50 to shard version 2|0||559eba3bca4787b9985d1e50 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.408-0400 m31200| 2015-07-09T14:15:24.408-0400 I SHARDING [conn18] collection version was loaded at version 2|3||559eba3bca4787b9985d1e50, took 2ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.408-0400 m31200| 2015-07-09T14:15:24.408-0400 I SHARDING [conn18] splitChunk accepted at version 2|0||559eba3bca4787b9985d1e50 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.410-0400 m31200| 2015-07-09T14:15:24.409-0400 I SHARDING [conn18] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:24.408-0400-559eba3cd5a107a5b9c0db60", server: "bs-osx108-8", clientAddr: "127.0.0.1:62585", time: new Date(1436465724408), what: "split", ns: "db57.coll57", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559eba3bca4787b9985d1e50') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559eba3bca4787b9985d1e50') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.464-0400 m31200| 2015-07-09T14:15:24.463-0400 I SHARDING [conn18] distributed lock 'db57.coll57/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.466-0400 m30999| 2015-07-09T14:15:24.466-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db57.coll57: 0ms sequenceNumber: 256 version: 2|5||559eba3bca4787b9985d1e50 based on: 2|3||559eba3bca4787b9985d1e50 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.468-0400 Using 5 threads (requested 5) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.520-0400 m30999| 2015-07-09T14:15:24.520-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63827 #362 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.527-0400 m30999| 2015-07-09T14:15:24.527-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63828 #363 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.527-0400 m30998| 2015-07-09T14:15:24.527-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63829 #362 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.530-0400 m30998| 2015-07-09T14:15:24.530-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63830 #363 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.530-0400 m30999| 2015-07-09T14:15:24.530-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63831 #364 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.542-0400 setting random seed: 7625712817534 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.542-0400 setting random seed: 6847600741311 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.542-0400 setting random seed: 8452026909217 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.543-0400 setting random seed: 3216329361312 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.543-0400 setting random seed: 9026503954082 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:24.546-0400 m30998| 2015-07-09T14:15:24.545-0400 I SHARDING [conn363] ChunkManager: time to load chunks for db57.coll57: 1ms sequenceNumber: 68 version: 2|5||559eba3bca4787b9985d1e50 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:34.574-0400 m30999| 2015-07-09T14:15:34.574-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:15:34.571-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30999:1436464534:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:35.983-0400 m30998| 2015-07-09T14:15:35.983-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:15:35.981-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30998:1436464535:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:36.899-0400 m31100| 2015-07-09T14:15:36.898-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:15:36.896-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31100:1436464536:197041335', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:37.311-0400 m31200| 2015-07-09T14:15:37.311-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:15:37.310-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31200:1436464537:809424560', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.272-0400 m30998| 2015-07-09T14:15:39.272-0400 I NETWORK [conn362] end connection 127.0.0.1:63829 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.324-0400 m30999| 2015-07-09T14:15:39.324-0400 I NETWORK [conn362] end connection 127.0.0.1:63827 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.361-0400 m30999| 2015-07-09T14:15:39.360-0400 I NETWORK [conn363] end connection 127.0.0.1:63828 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.368-0400 m30998| 2015-07-09T14:15:39.368-0400 I NETWORK [conn363] end connection 127.0.0.1:63830 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.375-0400 m30999| 2015-07-09T14:15:39.374-0400 I NETWORK [conn364] end connection 127.0.0.1:63831 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.404-0400 m30999| 2015-07-09T14:15:39.403-0400 I COMMAND [conn1] DROP: db57.create_capped_collection_maxdocs0_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.404-0400 m30999| 2015-07-09T14:15:39.404-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.404-0400 m31100| 2015-07-09T14:15:39.404-0400 I COMMAND [conn57] CMD: drop db57.create_capped_collection_maxdocs0_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.407-0400 m30999| 2015-07-09T14:15:39.407-0400 I COMMAND [conn1] DROP: db57.create_capped_collection_maxdocs0_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.407-0400 m30999| 2015-07-09T14:15:39.407-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.407-0400 m31100| 2015-07-09T14:15:39.407-0400 I COMMAND [conn57] CMD: drop db57.create_capped_collection_maxdocs0_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.408-0400 m31102| 2015-07-09T14:15:39.408-0400 I COMMAND [repl writer worker 4] CMD: drop db57.create_capped_collection_maxdocs0_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.408-0400 m31101| 2015-07-09T14:15:39.408-0400 I COMMAND [repl writer worker 10] CMD: drop db57.create_capped_collection_maxdocs0_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.412-0400 m30999| 2015-07-09T14:15:39.412-0400 I COMMAND [conn1] DROP: db57.create_capped_collection_maxdocs0_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.412-0400 m30999| 2015-07-09T14:15:39.412-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.412-0400 m31100| 2015-07-09T14:15:39.412-0400 I COMMAND [conn57] CMD: drop db57.create_capped_collection_maxdocs0_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.413-0400 m31102| 2015-07-09T14:15:39.413-0400 I COMMAND [repl writer worker 13] CMD: drop db57.create_capped_collection_maxdocs0_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.413-0400 m30999| 2015-07-09T14:15:39.413-0400 I COMMAND [conn1] DROP: db57.create_capped_collection_maxdocs0_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.413-0400 m30999| 2015-07-09T14:15:39.413-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.414-0400 m31101| 2015-07-09T14:15:39.413-0400 I COMMAND [repl writer worker 11] CMD: drop db57.create_capped_collection_maxdocs0_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.414-0400 m31100| 2015-07-09T14:15:39.414-0400 I COMMAND [conn57] CMD: drop db57.create_capped_collection_maxdocs0_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.416-0400 m30999| 2015-07-09T14:15:39.415-0400 I COMMAND [conn1] DROP: db57.create_capped_collection_maxdocs1_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.416-0400 m30999| 2015-07-09T14:15:39.415-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.416-0400 m31100| 2015-07-09T14:15:39.416-0400 I COMMAND [conn57] CMD: drop db57.create_capped_collection_maxdocs1_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.416-0400 m31101| 2015-07-09T14:15:39.416-0400 I COMMAND [repl writer worker 6] CMD: drop db57.create_capped_collection_maxdocs0_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.417-0400 m31101| 2015-07-09T14:15:39.417-0400 I COMMAND [repl writer worker 9] CMD: drop db57.create_capped_collection_maxdocs0_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.417-0400 m30999| 2015-07-09T14:15:39.417-0400 I COMMAND [conn1] DROP: db57.create_capped_collection_maxdocs1_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.418-0400 m30999| 2015-07-09T14:15:39.417-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.418-0400 m31100| 2015-07-09T14:15:39.418-0400 I COMMAND [conn57] CMD: drop db57.create_capped_collection_maxdocs1_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.419-0400 m31101| 2015-07-09T14:15:39.419-0400 I COMMAND [repl writer worker 8] CMD: drop db57.create_capped_collection_maxdocs1_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.419-0400 m31102| 2015-07-09T14:15:39.419-0400 I COMMAND [repl writer worker 0] CMD: drop db57.create_capped_collection_maxdocs0_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.420-0400 m30999| 2015-07-09T14:15:39.420-0400 I COMMAND [conn1] DROP: db57.create_capped_collection_maxdocs1_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.420-0400 m30999| 2015-07-09T14:15:39.420-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.420-0400 m31100| 2015-07-09T14:15:39.420-0400 I COMMAND [conn57] CMD: drop db57.create_capped_collection_maxdocs1_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.421-0400 m31102| 2015-07-09T14:15:39.420-0400 I COMMAND [repl writer worker 1] CMD: drop db57.create_capped_collection_maxdocs0_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.421-0400 m30999| 2015-07-09T14:15:39.421-0400 I COMMAND [conn1] DROP: db57.create_capped_collection_maxdocs1_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.421-0400 m30999| 2015-07-09T14:15:39.421-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.422-0400 m31101| 2015-07-09T14:15:39.421-0400 I COMMAND [repl writer worker 1] CMD: drop db57.create_capped_collection_maxdocs1_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.422-0400 m31100| 2015-07-09T14:15:39.422-0400 I COMMAND [conn57] CMD: drop db57.create_capped_collection_maxdocs1_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.423-0400 m31101| 2015-07-09T14:15:39.422-0400 I COMMAND [repl writer worker 12] CMD: drop db57.create_capped_collection_maxdocs1_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.423-0400 m31102| 2015-07-09T14:15:39.423-0400 I COMMAND [repl writer worker 6] CMD: drop db57.create_capped_collection_maxdocs1_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.423-0400 m30999| 2015-07-09T14:15:39.423-0400 I COMMAND [conn1] DROP: db57.create_capped_collection_maxdocs2_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.423-0400 m30999| 2015-07-09T14:15:39.423-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.423-0400 m31100| 2015-07-09T14:15:39.423-0400 I COMMAND [conn57] CMD: drop db57.create_capped_collection_maxdocs2_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.424-0400 m31101| 2015-07-09T14:15:39.424-0400 I COMMAND [repl writer worker 2] CMD: drop db57.create_capped_collection_maxdocs1_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.425-0400 m31102| 2015-07-09T14:15:39.425-0400 I COMMAND [repl writer worker 9] CMD: drop db57.create_capped_collection_maxdocs1_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.425-0400 m30999| 2015-07-09T14:15:39.425-0400 I COMMAND [conn1] DROP: db57.create_capped_collection_maxdocs2_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.426-0400 m30999| 2015-07-09T14:15:39.425-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.426-0400 m31100| 2015-07-09T14:15:39.426-0400 I COMMAND [conn57] CMD: drop db57.create_capped_collection_maxdocs2_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.426-0400 m31102| 2015-07-09T14:15:39.426-0400 I COMMAND [repl writer worker 15] CMD: drop db57.create_capped_collection_maxdocs1_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.427-0400 m31101| 2015-07-09T14:15:39.427-0400 I COMMAND [repl writer worker 7] CMD: drop db57.create_capped_collection_maxdocs2_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.428-0400 m31102| 2015-07-09T14:15:39.428-0400 I COMMAND [repl writer worker 7] CMD: drop db57.create_capped_collection_maxdocs1_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.428-0400 m30999| 2015-07-09T14:15:39.428-0400 I COMMAND [conn1] DROP: db57.create_capped_collection_maxdocs2_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.428-0400 m30999| 2015-07-09T14:15:39.428-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.428-0400 m31100| 2015-07-09T14:15:39.428-0400 I COMMAND [conn57] CMD: drop db57.create_capped_collection_maxdocs2_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.429-0400 m31102| 2015-07-09T14:15:39.429-0400 I COMMAND [repl writer worker 2] CMD: drop db57.create_capped_collection_maxdocs2_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.429-0400 m30999| 2015-07-09T14:15:39.429-0400 I COMMAND [conn1] DROP: db57.create_capped_collection_maxdocs2_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.430-0400 m30999| 2015-07-09T14:15:39.429-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.430-0400 m31101| 2015-07-09T14:15:39.429-0400 I COMMAND [repl writer worker 3] CMD: drop db57.create_capped_collection_maxdocs2_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.430-0400 m31100| 2015-07-09T14:15:39.430-0400 I COMMAND [conn57] CMD: drop db57.create_capped_collection_maxdocs2_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.431-0400 m31102| 2015-07-09T14:15:39.431-0400 I COMMAND [repl writer worker 3] CMD: drop db57.create_capped_collection_maxdocs2_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.431-0400 m31101| 2015-07-09T14:15:39.431-0400 I COMMAND [repl writer worker 5] CMD: drop db57.create_capped_collection_maxdocs2_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.432-0400 m30999| 2015-07-09T14:15:39.432-0400 I COMMAND [conn1] DROP: db57.create_capped_collection_maxdocs3_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.432-0400 m30999| 2015-07-09T14:15:39.432-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.433-0400 m31100| 2015-07-09T14:15:39.432-0400 I COMMAND [conn57] CMD: drop db57.create_capped_collection_maxdocs3_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.434-0400 m31102| 2015-07-09T14:15:39.433-0400 I COMMAND [repl writer worker 5] CMD: drop db57.create_capped_collection_maxdocs2_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.434-0400 m31101| 2015-07-09T14:15:39.433-0400 I COMMAND [repl writer worker 0] CMD: drop db57.create_capped_collection_maxdocs2_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.435-0400 m30999| 2015-07-09T14:15:39.435-0400 I COMMAND [conn1] DROP: db57.create_capped_collection_maxdocs3_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.435-0400 m30999| 2015-07-09T14:15:39.435-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.435-0400 m31100| 2015-07-09T14:15:39.435-0400 I COMMAND [conn57] CMD: drop db57.create_capped_collection_maxdocs3_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.435-0400 m31102| 2015-07-09T14:15:39.435-0400 I COMMAND [repl writer worker 11] CMD: drop db57.create_capped_collection_maxdocs2_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.436-0400 m30999| 2015-07-09T14:15:39.436-0400 I COMMAND [conn1] DROP: db57.create_capped_collection_maxdocs3_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.437-0400 m30999| 2015-07-09T14:15:39.436-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.437-0400 m31100| 2015-07-09T14:15:39.437-0400 I COMMAND [conn57] CMD: drop db57.create_capped_collection_maxdocs3_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.437-0400 m31102| 2015-07-09T14:15:39.437-0400 I COMMAND [repl writer worker 14] CMD: drop db57.create_capped_collection_maxdocs3_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.437-0400 m31101| 2015-07-09T14:15:39.437-0400 I COMMAND [repl writer worker 15] CMD: drop db57.create_capped_collection_maxdocs3_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.438-0400 m30999| 2015-07-09T14:15:39.438-0400 I COMMAND [conn1] DROP: db57.create_capped_collection_maxdocs3_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.439-0400 m30999| 2015-07-09T14:15:39.438-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.439-0400 m31100| 2015-07-09T14:15:39.438-0400 I COMMAND [conn57] CMD: drop db57.create_capped_collection_maxdocs3_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.439-0400 m31102| 2015-07-09T14:15:39.439-0400 I COMMAND [repl writer worker 8] CMD: drop db57.create_capped_collection_maxdocs3_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.440-0400 m31101| 2015-07-09T14:15:39.440-0400 I COMMAND [repl writer worker 13] CMD: drop db57.create_capped_collection_maxdocs3_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.441-0400 m30999| 2015-07-09T14:15:39.440-0400 I COMMAND [conn1] DROP: db57.create_capped_collection_maxdocs4_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.441-0400 m30999| 2015-07-09T14:15:39.440-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.441-0400 m31100| 2015-07-09T14:15:39.441-0400 I COMMAND [conn57] CMD: drop db57.create_capped_collection_maxdocs4_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.441-0400 m31102| 2015-07-09T14:15:39.441-0400 I COMMAND [repl writer worker 10] CMD: drop db57.create_capped_collection_maxdocs3_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.443-0400 m31102| 2015-07-09T14:15:39.442-0400 I COMMAND [repl writer worker 12] CMD: drop db57.create_capped_collection_maxdocs3_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.443-0400 m31101| 2015-07-09T14:15:39.443-0400 I COMMAND [repl writer worker 14] CMD: drop db57.create_capped_collection_maxdocs3_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.443-0400 m30999| 2015-07-09T14:15:39.443-0400 I COMMAND [conn1] DROP: db57.create_capped_collection_maxdocs4_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.444-0400 m30999| 2015-07-09T14:15:39.443-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.444-0400 m31100| 2015-07-09T14:15:39.443-0400 I COMMAND [conn57] CMD: drop db57.create_capped_collection_maxdocs4_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.444-0400 m31101| 2015-07-09T14:15:39.444-0400 I COMMAND [repl writer worker 4] CMD: drop db57.create_capped_collection_maxdocs3_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.445-0400 m30999| 2015-07-09T14:15:39.444-0400 I COMMAND [conn1] DROP: db57.create_capped_collection_maxdocs4_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.445-0400 m30999| 2015-07-09T14:15:39.444-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.445-0400 m31100| 2015-07-09T14:15:39.445-0400 I COMMAND [conn57] CMD: drop db57.create_capped_collection_maxdocs4_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.445-0400 m31102| 2015-07-09T14:15:39.445-0400 I COMMAND [repl writer worker 4] CMD: drop db57.create_capped_collection_maxdocs4_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.446-0400 m31101| 2015-07-09T14:15:39.446-0400 I COMMAND [repl writer worker 10] CMD: drop db57.create_capped_collection_maxdocs4_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.447-0400 m31102| 2015-07-09T14:15:39.446-0400 I COMMAND [repl writer worker 13] CMD: drop db57.create_capped_collection_maxdocs4_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.447-0400 m30999| 2015-07-09T14:15:39.447-0400 I COMMAND [conn1] DROP: db57.create_capped_collection_maxdocs4_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.448-0400 m30999| 2015-07-09T14:15:39.447-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.448-0400 m31100| 2015-07-09T14:15:39.447-0400 I COMMAND [conn57] CMD: drop db57.create_capped_collection_maxdocs4_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.448-0400 m31101| 2015-07-09T14:15:39.448-0400 I COMMAND [repl writer worker 11] CMD: drop db57.create_capped_collection_maxdocs4_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.449-0400 m31102| 2015-07-09T14:15:39.448-0400 I COMMAND [repl writer worker 0] CMD: drop db57.create_capped_collection_maxdocs4_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.449-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.449-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.449-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.449-0400 jstests/concurrency/fsm_workloads/create_capped_collection_maxdocs.js: Workload completed in 14934 ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.449-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.450-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.450-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.450-0400 m30999| 2015-07-09T14:15:39.449-0400 I COMMAND [conn1] DROP: db57.coll57 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.450-0400 m30999| 2015-07-09T14:15:39.449-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:39.449-0400-559eba4bca4787b9985d1e52", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465739449), what: "dropCollection.start", ns: "db57.coll57", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.451-0400 m31101| 2015-07-09T14:15:39.450-0400 I COMMAND [repl writer worker 6] CMD: drop db57.create_capped_collection_maxdocs4_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.451-0400 m31101| 2015-07-09T14:15:39.451-0400 I COMMAND [repl writer worker 9] CMD: drop db57.create_capped_collection_maxdocs4_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.452-0400 m31102| 2015-07-09T14:15:39.451-0400 I COMMAND [repl writer worker 1] CMD: drop db57.create_capped_collection_maxdocs4_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.506-0400 m30999| 2015-07-09T14:15:39.506-0400 I SHARDING [conn1] distributed lock 'db57.coll57/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba4bca4787b9985d1e53 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.507-0400 m31100| 2015-07-09T14:15:39.507-0400 I COMMAND [conn38] CMD: drop db57.coll57 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.510-0400 m31200| 2015-07-09T14:15:39.509-0400 I COMMAND [conn63] CMD: drop db57.coll57 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.511-0400 m31101| 2015-07-09T14:15:39.511-0400 I COMMAND [repl writer worker 8] CMD: drop db57.coll57 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.511-0400 m31102| 2015-07-09T14:15:39.511-0400 I COMMAND [repl writer worker 6] CMD: drop db57.coll57 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.513-0400 m31202| 2015-07-09T14:15:39.513-0400 I COMMAND [repl writer worker 7] CMD: drop db57.coll57 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.514-0400 m31201| 2015-07-09T14:15:39.513-0400 I COMMAND [repl writer worker 2] CMD: drop db57.coll57 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.565-0400 m31100| 2015-07-09T14:15:39.565-0400 I SHARDING [conn38] remotely refreshing metadata for db57.coll57 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559eba3bca4787b9985d1e50, current metadata version is 2|3||559eba3bca4787b9985d1e50 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.567-0400 m31100| 2015-07-09T14:15:39.566-0400 W SHARDING [conn38] no chunks found when reloading db57.coll57, previous version was 0|0||559eba3bca4787b9985d1e50, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.567-0400 m31100| 2015-07-09T14:15:39.566-0400 I SHARDING [conn38] dropping metadata for db57.coll57 at shard version 2|3||559eba3bca4787b9985d1e50, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.568-0400 m31200| 2015-07-09T14:15:39.568-0400 I SHARDING [conn63] remotely refreshing metadata for db57.coll57 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559eba3bca4787b9985d1e50, current metadata version is 2|5||559eba3bca4787b9985d1e50 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.570-0400 m31200| 2015-07-09T14:15:39.570-0400 W SHARDING [conn63] no chunks found when reloading db57.coll57, previous version was 0|0||559eba3bca4787b9985d1e50, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.571-0400 m31200| 2015-07-09T14:15:39.570-0400 I SHARDING [conn63] dropping metadata for db57.coll57 at shard version 2|5||559eba3bca4787b9985d1e50, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.572-0400 m30999| 2015-07-09T14:15:39.571-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:39.571-0400-559eba4bca4787b9985d1e54", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465739571), what: "dropCollection", ns: "db57.coll57", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.626-0400 m30999| 2015-07-09T14:15:39.626-0400 I SHARDING [conn1] distributed lock 'db57.coll57/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.682-0400 m30999| 2015-07-09T14:15:39.682-0400 I COMMAND [conn1] DROP DATABASE: db57 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.683-0400 m30999| 2015-07-09T14:15:39.682-0400 I SHARDING [conn1] DBConfig::dropDatabase: db57 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.683-0400 m30999| 2015-07-09T14:15:39.682-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:39.682-0400-559eba4bca4787b9985d1e55", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465739682), what: "dropDatabase.start", ns: "db57", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.789-0400 m30999| 2015-07-09T14:15:39.789-0400 I SHARDING [conn1] DBConfig::dropDatabase: db57 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.790-0400 m31100| 2015-07-09T14:15:39.790-0400 I COMMAND [conn157] dropDatabase db57 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.790-0400 m31100| 2015-07-09T14:15:39.790-0400 I COMMAND [conn157] dropDatabase db57 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.791-0400 m30999| 2015-07-09T14:15:39.790-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:39.790-0400-559eba4bca4787b9985d1e56", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465739790), what: "dropDatabase", ns: "db57", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.791-0400 m31101| 2015-07-09T14:15:39.790-0400 I COMMAND [repl writer worker 1] dropDatabase db57 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.791-0400 m31101| 2015-07-09T14:15:39.791-0400 I COMMAND [repl writer worker 1] dropDatabase db57 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.792-0400 m31102| 2015-07-09T14:15:39.791-0400 I COMMAND [repl writer worker 9] dropDatabase db57 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.792-0400 m31102| 2015-07-09T14:15:39.791-0400 I COMMAND [repl writer worker 9] dropDatabase db57 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.879-0400 m31100| 2015-07-09T14:15:39.879-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.883-0400 m31102| 2015-07-09T14:15:39.882-0400 I COMMAND [repl writer worker 2] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.883-0400 m31101| 2015-07-09T14:15:39.882-0400 I COMMAND [repl writer worker 7] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.918-0400 m31200| 2015-07-09T14:15:39.918-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.920-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.921-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.921-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.921-0400 jstests/concurrency/fsm_workloads/update_simple_capped.js [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.921-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.921-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.921-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.921-0400 m31202| 2015-07-09T14:15:39.921-0400 I COMMAND [repl writer worker 11] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.921-0400 m31201| 2015-07-09T14:15:39.921-0400 I COMMAND [repl writer worker 0] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.928-0400 m30999| 2015-07-09T14:15:39.927-0400 I SHARDING [conn1] distributed lock 'db58/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba4bca4787b9985d1e57 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.932-0400 m30999| 2015-07-09T14:15:39.931-0400 I SHARDING [conn1] Placing [db58] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.932-0400 m30999| 2015-07-09T14:15:39.931-0400 I SHARDING [conn1] Enabling sharding for database [db58] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:39.986-0400 m30999| 2015-07-09T14:15:39.986-0400 I SHARDING [conn1] distributed lock 'db58/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.006-0400 m31100| 2015-07-09T14:15:40.005-0400 I INDEX [conn70] build index on: db58.coll58 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db58.coll58" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.006-0400 m31100| 2015-07-09T14:15:40.005-0400 I INDEX [conn70] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.012-0400 m31100| 2015-07-09T14:15:40.011-0400 I INDEX [conn70] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.013-0400 m30999| 2015-07-09T14:15:40.013-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db58.coll58", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.018-0400 m30999| 2015-07-09T14:15:40.017-0400 I SHARDING [conn1] distributed lock 'db58.coll58/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba4cca4787b9985d1e58 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.022-0400 m30999| 2015-07-09T14:15:40.021-0400 I SHARDING [conn1] enable sharding on: db58.coll58 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.023-0400 m30999| 2015-07-09T14:15:40.021-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:40.021-0400-559eba4cca4787b9985d1e59", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465740021), what: "shardCollection.start", ns: "db58.coll58", details: { shardKey: { _id: "hashed" }, collection: "db58.coll58", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.031-0400 m31102| 2015-07-09T14:15:40.031-0400 I INDEX [repl writer worker 5] build index on: db58.coll58 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db58.coll58" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.032-0400 m31102| 2015-07-09T14:15:40.031-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.032-0400 m31101| 2015-07-09T14:15:40.032-0400 I INDEX [repl writer worker 5] build index on: db58.coll58 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db58.coll58" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.033-0400 m31101| 2015-07-09T14:15:40.032-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.038-0400 m31101| 2015-07-09T14:15:40.038-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.041-0400 m31102| 2015-07-09T14:15:40.041-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.076-0400 m30999| 2015-07-09T14:15:40.075-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db58.coll58 using new epoch 559eba4cca4787b9985d1e5a [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.185-0400 m30999| 2015-07-09T14:15:40.185-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db58.coll58: 0ms sequenceNumber: 257 version: 1|1||559eba4cca4787b9985d1e5a based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.240-0400 m30999| 2015-07-09T14:15:40.240-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db58.coll58: 0ms sequenceNumber: 258 version: 1|1||559eba4cca4787b9985d1e5a based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.242-0400 m31100| 2015-07-09T14:15:40.242-0400 I SHARDING [conn57] remotely refreshing metadata for db58.coll58 with requested shard version 1|1||559eba4cca4787b9985d1e5a, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.244-0400 m31100| 2015-07-09T14:15:40.243-0400 I SHARDING [conn57] collection db58.coll58 was previously unsharded, new metadata loaded with shard version 1|1||559eba4cca4787b9985d1e5a [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.244-0400 m31100| 2015-07-09T14:15:40.243-0400 I SHARDING [conn57] collection version was loaded at version 1|1||559eba4cca4787b9985d1e5a, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.245-0400 m30999| 2015-07-09T14:15:40.244-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:40.244-0400-559eba4cca4787b9985d1e5b", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465740244), what: "shardCollection", ns: "db58.coll58", details: { version: "1|1||559eba4cca4787b9985d1e5a" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.298-0400 m30999| 2015-07-09T14:15:40.298-0400 I SHARDING [conn1] distributed lock 'db58.coll58/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.300-0400 m30999| 2015-07-09T14:15:40.299-0400 I SHARDING [conn1] moving chunk ns: db58.coll58 moving ( ns: db58.coll58, shard: test-rs0, lastmod: 1|1||000000000000000000000000, min: { _id: 0 }, max: { _id: MaxKey }) test-rs0 -> test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.300-0400 m31100| 2015-07-09T14:15:40.299-0400 I SHARDING [conn38] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.301-0400 m31100| 2015-07-09T14:15:40.300-0400 I SHARDING [conn38] received moveChunk request: { moveChunk: "db58.coll58", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eba4cca4787b9985d1e5a') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.305-0400 m31100| 2015-07-09T14:15:40.304-0400 I SHARDING [conn38] distributed lock 'db58.coll58/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eba4c792e00bb67274a4a [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.305-0400 m31100| 2015-07-09T14:15:40.304-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:40.304-0400-559eba4c792e00bb67274a4b", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465740304), what: "moveChunk.start", ns: "db58.coll58", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.358-0400 m31100| 2015-07-09T14:15:40.358-0400 I SHARDING [conn38] remotely refreshing metadata for db58.coll58 based on current shard version 1|1||559eba4cca4787b9985d1e5a, current metadata version is 1|1||559eba4cca4787b9985d1e5a [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.360-0400 m31100| 2015-07-09T14:15:40.360-0400 I SHARDING [conn38] metadata of collection db58.coll58 already up to date (shard version : 1|1||559eba4cca4787b9985d1e5a, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.360-0400 m31100| 2015-07-09T14:15:40.360-0400 I SHARDING [conn38] moveChunk request accepted at version 1|1||559eba4cca4787b9985d1e5a [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.361-0400 m31100| 2015-07-09T14:15:40.360-0400 I SHARDING [conn38] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.361-0400 m31200| 2015-07-09T14:15:40.360-0400 I SHARDING [conn16] remotely refreshing metadata for db58.coll58, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.362-0400 m31200| 2015-07-09T14:15:40.362-0400 I SHARDING [conn16] collection db58.coll58 was previously unsharded, new metadata loaded with shard version 0|0||559eba4cca4787b9985d1e5a [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.363-0400 m31200| 2015-07-09T14:15:40.362-0400 I SHARDING [conn16] collection version was loaded at version 1|1||559eba4cca4787b9985d1e5a, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.363-0400 m31200| 2015-07-09T14:15:40.362-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: 0 } -> { _id: MaxKey } for collection db58.coll58 from test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 at epoch 559eba4cca4787b9985d1e5a [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.365-0400 m31100| 2015-07-09T14:15:40.364-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db58.coll58", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.368-0400 m31100| 2015-07-09T14:15:40.368-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db58.coll58", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.373-0400 m31100| 2015-07-09T14:15:40.373-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db58.coll58", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.381-0400 m31200| 2015-07-09T14:15:40.381-0400 I INDEX [migrateThread] build index on: db58.coll58 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db58.coll58" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.382-0400 m31200| 2015-07-09T14:15:40.381-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.383-0400 m31100| 2015-07-09T14:15:40.382-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db58.coll58", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.390-0400 m31200| 2015-07-09T14:15:40.389-0400 I INDEX [migrateThread] build index on: db58.coll58 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db58.coll58" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.390-0400 m31200| 2015-07-09T14:15:40.389-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.400-0400 m31100| 2015-07-09T14:15:40.399-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db58.coll58", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.402-0400 m31200| 2015-07-09T14:15:40.401-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.402-0400 m31200| 2015-07-09T14:15:40.402-0400 I SHARDING [migrateThread] Deleter starting delete for: db58.coll58 from { _id: 0 } -> { _id: MaxKey }, with opId: 91898 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.402-0400 m31200| 2015-07-09T14:15:40.402-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db58.coll58 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.409-0400 m31201| 2015-07-09T14:15:40.409-0400 I INDEX [repl writer worker 4] build index on: db58.coll58 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db58.coll58" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.410-0400 m31201| 2015-07-09T14:15:40.409-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.415-0400 m31202| 2015-07-09T14:15:40.415-0400 I INDEX [repl writer worker 13] build index on: db58.coll58 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db58.coll58" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.416-0400 m31202| 2015-07-09T14:15:40.415-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.418-0400 m31201| 2015-07-09T14:15:40.417-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.422-0400 m31200| 2015-07-09T14:15:40.422-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.422-0400 m31200| 2015-07-09T14:15:40.422-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db58.coll58' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.423-0400 m31202| 2015-07-09T14:15:40.422-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.433-0400 m31100| 2015-07-09T14:15:40.433-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db58.coll58", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.434-0400 m31100| 2015-07-09T14:15:40.433-0400 I SHARDING [conn38] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.434-0400 m31100| 2015-07-09T14:15:40.433-0400 I SHARDING [conn38] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.434-0400 m31100| 2015-07-09T14:15:40.433-0400 I SHARDING [conn38] moveChunk setting version to: 2|0||559eba4cca4787b9985d1e5a [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.435-0400 m31200| 2015-07-09T14:15:40.434-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db58.coll58' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.435-0400 m31200| 2015-07-09T14:15:40.434-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:40.434-0400-559eba4cd5a107a5b9c0db61", server: "bs-osx108-8", clientAddr: "", time: new Date(1436465740434), what: "moveChunk.to", ns: "db58.coll58", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 5: 39, step 2 of 5: 17, step 3 of 5: 0, step 4 of 5: 1, step 5 of 5: 12, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.489-0400 m31100| 2015-07-09T14:15:40.489-0400 I SHARDING [conn38] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db58.coll58", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.490-0400 m31100| 2015-07-09T14:15:40.489-0400 I SHARDING [conn38] moveChunk updating self version to: 2|1||559eba4cca4787b9985d1e5a through { _id: MinKey } -> { _id: 0 } for collection 'db58.coll58' [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.493-0400 m31100| 2015-07-09T14:15:40.493-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:40.493-0400-559eba4c792e00bb67274a4c", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465740493), what: "moveChunk.commit", ns: "db58.coll58", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.547-0400 m31100| 2015-07-09T14:15:40.547-0400 I SHARDING [conn38] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.547-0400 m31100| 2015-07-09T14:15:40.547-0400 I SHARDING [conn38] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.547-0400 m31100| 2015-07-09T14:15:40.547-0400 I SHARDING [conn38] Deleter starting delete for: db58.coll58 from { _id: 0 } -> { _id: MaxKey }, with opId: 182201 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.547-0400 m31100| 2015-07-09T14:15:40.547-0400 I SHARDING [conn38] rangeDeleter deleted 0 documents for db58.coll58 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.548-0400 m31100| 2015-07-09T14:15:40.547-0400 I SHARDING [conn38] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.548-0400 m31100| 2015-07-09T14:15:40.548-0400 I SHARDING [conn38] distributed lock 'db58.coll58/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.549-0400 m31100| 2015-07-09T14:15:40.548-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:40.548-0400-559eba4c792e00bb67274a4d", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465740548), what: "moveChunk.from", ns: "db58.coll58", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 6: 0, step 2 of 6: 59, step 3 of 6: 2, step 4 of 6: 70, step 5 of 6: 113, step 6 of 6: 0, to: "test-rs1", from: "test-rs0", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.602-0400 m31100| 2015-07-09T14:15:40.601-0400 I COMMAND [conn38] command db58.coll58 command: moveChunk { moveChunk: "db58.coll58", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eba4cca4787b9985d1e5a') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 301ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.604-0400 m30999| 2015-07-09T14:15:40.604-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db58.coll58: 1ms sequenceNumber: 259 version: 2|1||559eba4cca4787b9985d1e5a based on: 1|1||559eba4cca4787b9985d1e5a [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.606-0400 m31100| 2015-07-09T14:15:40.605-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db58.coll58", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4cca4787b9985d1e5a') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.610-0400 m31100| 2015-07-09T14:15:40.609-0400 I SHARDING [conn38] distributed lock 'db58.coll58/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eba4c792e00bb67274a4e [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.610-0400 m31100| 2015-07-09T14:15:40.609-0400 I SHARDING [conn38] remotely refreshing metadata for db58.coll58 based on current shard version 2|0||559eba4cca4787b9985d1e5a, current metadata version is 2|0||559eba4cca4787b9985d1e5a [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.611-0400 m31100| 2015-07-09T14:15:40.611-0400 I SHARDING [conn38] updating metadata for db58.coll58 from shard version 2|0||559eba4cca4787b9985d1e5a to shard version 2|1||559eba4cca4787b9985d1e5a [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.611-0400 m31100| 2015-07-09T14:15:40.611-0400 I SHARDING [conn38] collection version was loaded at version 2|1||559eba4cca4787b9985d1e5a, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.612-0400 m31100| 2015-07-09T14:15:40.611-0400 I SHARDING [conn38] splitChunk accepted at version 2|1||559eba4cca4787b9985d1e5a [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.613-0400 m31100| 2015-07-09T14:15:40.612-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:40.612-0400-559eba4c792e00bb67274a4f", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465740612), what: "split", ns: "db58.coll58", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559eba4cca4787b9985d1e5a') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559eba4cca4787b9985d1e5a') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.668-0400 m31100| 2015-07-09T14:15:40.668-0400 I SHARDING [conn38] distributed lock 'db58.coll58/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.670-0400 m30999| 2015-07-09T14:15:40.670-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db58.coll58: 0ms sequenceNumber: 260 version: 2|3||559eba4cca4787b9985d1e5a based on: 2|1||559eba4cca4787b9985d1e5a [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.671-0400 m31200| 2015-07-09T14:15:40.670-0400 I SHARDING [conn63] received splitChunk request: { splitChunk: "db58.coll58", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4cca4787b9985d1e5a') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.674-0400 m31200| 2015-07-09T14:15:40.673-0400 I SHARDING [conn63] distributed lock 'db58.coll58/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eba4cd5a107a5b9c0db62 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.674-0400 m31200| 2015-07-09T14:15:40.674-0400 I SHARDING [conn63] remotely refreshing metadata for db58.coll58 based on current shard version 0|0||559eba4cca4787b9985d1e5a, current metadata version is 1|1||559eba4cca4787b9985d1e5a [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.675-0400 m31200| 2015-07-09T14:15:40.675-0400 I SHARDING [conn63] updating metadata for db58.coll58 from shard version 0|0||559eba4cca4787b9985d1e5a to shard version 2|0||559eba4cca4787b9985d1e5a [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.675-0400 m31200| 2015-07-09T14:15:40.675-0400 I SHARDING [conn63] collection version was loaded at version 2|3||559eba4cca4787b9985d1e5a, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.676-0400 m31200| 2015-07-09T14:15:40.675-0400 I SHARDING [conn63] splitChunk accepted at version 2|0||559eba4cca4787b9985d1e5a [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.677-0400 m31200| 2015-07-09T14:15:40.676-0400 I SHARDING [conn63] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:40.676-0400-559eba4cd5a107a5b9c0db63", server: "bs-osx108-8", clientAddr: "127.0.0.1:62862", time: new Date(1436465740676), what: "split", ns: "db58.coll58", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559eba4cca4787b9985d1e5a') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559eba4cca4787b9985d1e5a') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.731-0400 m31200| 2015-07-09T14:15:40.731-0400 I SHARDING [conn63] distributed lock 'db58.coll58/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.734-0400 m30999| 2015-07-09T14:15:40.734-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db58.coll58: 1ms sequenceNumber: 261 version: 2|5||559eba4cca4787b9985d1e5a based on: 2|3||559eba4cca4787b9985d1e5a [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.736-0400 m30999| 2015-07-09T14:15:40.736-0400 I COMMAND [conn1] DROP: db58.coll58 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.737-0400 m30999| 2015-07-09T14:15:40.736-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:40.736-0400-559eba4cca4787b9985d1e5c", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465740736), what: "dropCollection.start", ns: "db58.coll58", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.793-0400 m30999| 2015-07-09T14:15:40.793-0400 I SHARDING [conn1] distributed lock 'db58.coll58/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba4cca4787b9985d1e5d [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.794-0400 m31100| 2015-07-09T14:15:40.794-0400 I COMMAND [conn38] CMD: drop db58.coll58 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.797-0400 m31200| 2015-07-09T14:15:40.797-0400 I COMMAND [conn63] CMD: drop db58.coll58 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.798-0400 m31102| 2015-07-09T14:15:40.798-0400 I COMMAND [repl writer worker 11] CMD: drop db58.coll58 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.798-0400 m31101| 2015-07-09T14:15:40.798-0400 I COMMAND [repl writer worker 0] CMD: drop db58.coll58 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.801-0400 m31202| 2015-07-09T14:15:40.800-0400 I COMMAND [repl writer worker 10] CMD: drop db58.coll58 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.801-0400 m31201| 2015-07-09T14:15:40.801-0400 I COMMAND [repl writer worker 5] CMD: drop db58.coll58 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.853-0400 m31100| 2015-07-09T14:15:40.852-0400 I SHARDING [conn38] remotely refreshing metadata for db58.coll58 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559eba4cca4787b9985d1e5a, current metadata version is 2|3||559eba4cca4787b9985d1e5a [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.854-0400 m31100| 2015-07-09T14:15:40.854-0400 W SHARDING [conn38] no chunks found when reloading db58.coll58, previous version was 0|0||559eba4cca4787b9985d1e5a, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.854-0400 m31100| 2015-07-09T14:15:40.854-0400 I SHARDING [conn38] dropping metadata for db58.coll58 at shard version 2|3||559eba4cca4787b9985d1e5a, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.856-0400 m31200| 2015-07-09T14:15:40.855-0400 I SHARDING [conn63] remotely refreshing metadata for db58.coll58 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559eba4cca4787b9985d1e5a, current metadata version is 2|5||559eba4cca4787b9985d1e5a [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.857-0400 m31200| 2015-07-09T14:15:40.856-0400 W SHARDING [conn63] no chunks found when reloading db58.coll58, previous version was 0|0||559eba4cca4787b9985d1e5a, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.857-0400 m31200| 2015-07-09T14:15:40.856-0400 I SHARDING [conn63] dropping metadata for db58.coll58 at shard version 2|5||559eba4cca4787b9985d1e5a, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.858-0400 m30999| 2015-07-09T14:15:40.858-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:40.858-0400-559eba4cca4787b9985d1e5e", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465740858), what: "dropCollection", ns: "db58.coll58", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.912-0400 m30999| 2015-07-09T14:15:40.911-0400 I SHARDING [conn1] distributed lock 'db58.coll58/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.968-0400 m30999| 2015-07-09T14:15:40.967-0400 I SHARDING [conn1] sharded connection to test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.968-0400 m30999| 2015-07-09T14:15:40.967-0400 I SHARDING [conn1] retrying command: { create: "coll58", capped: true, size: 16384.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.968-0400 m31100| 2015-07-09T14:15:40.967-0400 I NETWORK [conn57] end connection 127.0.0.1:62747 (112 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:40.999-0400 m31100| 2015-07-09T14:15:40.999-0400 I INDEX [conn180] build index on: db58.coll58 properties: { v: 1, key: { value: 1.0 }, name: "value_1", ns: "db58.coll58" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.000-0400 m31100| 2015-07-09T14:15:40.999-0400 I INDEX [conn180] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.006-0400 m31100| 2015-07-09T14:15:41.006-0400 I INDEX [conn180] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.012-0400 m31101| 2015-07-09T14:15:41.011-0400 I INDEX [repl writer worker 13] build index on: db58.coll58 properties: { v: 1, key: { value: 1.0 }, name: "value_1", ns: "db58.coll58" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.012-0400 m31101| 2015-07-09T14:15:41.011-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.013-0400 m31102| 2015-07-09T14:15:41.011-0400 I INDEX [repl writer worker 8] build index on: db58.coll58 properties: { v: 1, key: { value: 1.0 }, name: "value_1", ns: "db58.coll58" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.013-0400 m31102| 2015-07-09T14:15:41.011-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.015-0400 Using 20 threads (requested 20) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.019-0400 m31102| 2015-07-09T14:15:41.019-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.099-0400 m31101| 2015-07-09T14:15:41.056-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.234-0400 m30999| 2015-07-09T14:15:41.234-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63835 #365 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.244-0400 m30999| 2015-07-09T14:15:41.244-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63836 #366 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.247-0400 m30998| 2015-07-09T14:15:41.246-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63837 #364 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.257-0400 m30998| 2015-07-09T14:15:41.256-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63838 #365 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.267-0400 m30998| 2015-07-09T14:15:41.267-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63839 #366 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.271-0400 m30999| 2015-07-09T14:15:41.271-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63843 #367 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.277-0400 m30998| 2015-07-09T14:15:41.277-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63840 #367 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.281-0400 m30999| 2015-07-09T14:15:41.281-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63844 #368 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.287-0400 m30998| 2015-07-09T14:15:41.287-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63841 #368 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.288-0400 m30998| 2015-07-09T14:15:41.287-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63842 #369 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.304-0400 m30999| 2015-07-09T14:15:41.303-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63845 #369 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.304-0400 m30998| 2015-07-09T14:15:41.304-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63848 #370 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.306-0400 m30999| 2015-07-09T14:15:41.305-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63846 #370 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.312-0400 m30999| 2015-07-09T14:15:41.312-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63847 #371 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.312-0400 m30999| 2015-07-09T14:15:41.312-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63851 #372 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.314-0400 m30998| 2015-07-09T14:15:41.314-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63849 #371 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.316-0400 m30998| 2015-07-09T14:15:41.315-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63850 #372 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.316-0400 m30999| 2015-07-09T14:15:41.316-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63852 #373 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.322-0400 m30998| 2015-07-09T14:15:41.322-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63854 #373 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.323-0400 m30999| 2015-07-09T14:15:41.323-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63853 #374 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.330-0400 setting random seed: 6700544920749 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.330-0400 setting random seed: 7563610146753 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.330-0400 setting random seed: 5355383986607 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.331-0400 setting random seed: 7938704816624 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.333-0400 setting random seed: 538019170053 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.333-0400 setting random seed: 9098350023850 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.334-0400 setting random seed: 1595795680768 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.335-0400 setting random seed: 5968922064639 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.339-0400 setting random seed: 5811915136873 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.340-0400 setting random seed: 9673187108710 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.340-0400 setting random seed: 6219760184176 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.343-0400 setting random seed: 9808281664736 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.348-0400 setting random seed: 7255811798386 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.349-0400 setting random seed: 7790015987120 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.352-0400 setting random seed: 6798756499774 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.352-0400 setting random seed: 8428952260874 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.352-0400 setting random seed: 857750922441 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.353-0400 setting random seed: 5509464126080 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.354-0400 setting random seed: 2488195295445 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.354-0400 setting random seed: 5400714478455 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.551-0400 m30999| 2015-07-09T14:15:41.550-0400 I NETWORK [conn369] end connection 127.0.0.1:63845 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.578-0400 m30998| 2015-07-09T14:15:41.578-0400 I NETWORK [conn369] end connection 127.0.0.1:63842 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.582-0400 m30999| 2015-07-09T14:15:41.581-0400 I NETWORK [conn371] end connection 127.0.0.1:63847 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.595-0400 m30998| 2015-07-09T14:15:41.594-0400 I NETWORK [conn372] end connection 127.0.0.1:63850 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.610-0400 m30998| 2015-07-09T14:15:41.610-0400 I NETWORK [conn370] end connection 127.0.0.1:63848 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.623-0400 m30999| 2015-07-09T14:15:41.622-0400 I NETWORK [conn365] end connection 127.0.0.1:63835 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.623-0400 m30999| 2015-07-09T14:15:41.622-0400 I NETWORK [conn366] end connection 127.0.0.1:63836 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.623-0400 m30998| 2015-07-09T14:15:41.622-0400 I NETWORK [conn367] end connection 127.0.0.1:63840 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.624-0400 m30999| 2015-07-09T14:15:41.624-0400 I NETWORK [conn368] end connection 127.0.0.1:63844 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.644-0400 m30998| 2015-07-09T14:15:41.635-0400 I NETWORK [conn365] end connection 127.0.0.1:63838 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.645-0400 m30998| 2015-07-09T14:15:41.638-0400 I NETWORK [conn371] end connection 127.0.0.1:63849 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.651-0400 m30998| 2015-07-09T14:15:41.649-0400 I NETWORK [conn368] end connection 127.0.0.1:63841 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.654-0400 m30998| 2015-07-09T14:15:41.654-0400 I NETWORK [conn364] end connection 127.0.0.1:63837 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.670-0400 m30999| 2015-07-09T14:15:41.669-0400 I NETWORK [conn372] end connection 127.0.0.1:63851 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.679-0400 m30999| 2015-07-09T14:15:41.679-0400 I NETWORK [conn374] end connection 127.0.0.1:63853 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.686-0400 m30999| 2015-07-09T14:15:41.686-0400 I NETWORK [conn367] end connection 127.0.0.1:63843 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.688-0400 m30999| 2015-07-09T14:15:41.686-0400 I NETWORK [conn370] end connection 127.0.0.1:63846 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.689-0400 m30998| 2015-07-09T14:15:41.689-0400 I NETWORK [conn373] end connection 127.0.0.1:63854 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.724-0400 m30998| 2015-07-09T14:15:41.723-0400 I NETWORK [conn366] end connection 127.0.0.1:63839 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.729-0400 m30999| 2015-07-09T14:15:41.728-0400 I NETWORK [conn373] end connection 127.0.0.1:63852 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.750-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.751-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.751-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.751-0400 jstests/concurrency/fsm_workloads/update_simple_capped.js: Workload completed in 735 ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.751-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.751-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.751-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.751-0400 m30999| 2015-07-09T14:15:41.751-0400 I COMMAND [conn1] DROP: db58.coll58 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.752-0400 m30999| 2015-07-09T14:15:41.751-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.752-0400 m31100| 2015-07-09T14:15:41.751-0400 I COMMAND [conn180] CMD: drop db58.coll58 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.756-0400 m30999| 2015-07-09T14:15:41.756-0400 I COMMAND [conn1] DROP DATABASE: db58 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.756-0400 m31101| 2015-07-09T14:15:41.756-0400 I COMMAND [repl writer worker 1] CMD: drop db58.coll58 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.757-0400 m30999| 2015-07-09T14:15:41.756-0400 I SHARDING [conn1] DBConfig::dropDatabase: db58 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.757-0400 m31102| 2015-07-09T14:15:41.756-0400 I COMMAND [repl writer worker 4] CMD: drop db58.coll58 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.757-0400 m30999| 2015-07-09T14:15:41.756-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:41.756-0400-559eba4dca4787b9985d1e5f", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465741756), what: "dropDatabase.start", ns: "db58", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.862-0400 m30999| 2015-07-09T14:15:41.861-0400 I SHARDING [conn1] DBConfig::dropDatabase: db58 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.862-0400 m31100| 2015-07-09T14:15:41.862-0400 I COMMAND [conn157] dropDatabase db58 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.862-0400 m31100| 2015-07-09T14:15:41.862-0400 I COMMAND [conn157] dropDatabase db58 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.863-0400 m30999| 2015-07-09T14:15:41.862-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:41.862-0400-559eba4dca4787b9985d1e60", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465741862), what: "dropDatabase", ns: "db58", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.863-0400 m31102| 2015-07-09T14:15:41.863-0400 I COMMAND [repl writer worker 0] dropDatabase db58 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.863-0400 m31102| 2015-07-09T14:15:41.863-0400 I COMMAND [repl writer worker 0] dropDatabase db58 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.863-0400 m31101| 2015-07-09T14:15:41.863-0400 I COMMAND [repl writer worker 3] dropDatabase db58 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.864-0400 m31101| 2015-07-09T14:15:41.863-0400 I COMMAND [repl writer worker 3] dropDatabase db58 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.949-0400 m31100| 2015-07-09T14:15:41.949-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.953-0400 m31101| 2015-07-09T14:15:41.953-0400 I COMMAND [repl writer worker 0] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.953-0400 m31102| 2015-07-09T14:15:41.953-0400 I COMMAND [repl writer worker 9] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.980-0400 m31200| 2015-07-09T14:15:41.980-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.982-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.982-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.982-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.983-0400 jstests/concurrency/fsm_workloads/indexed_insert_1char.js [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.983-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.983-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.983-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.984-0400 m31201| 2015-07-09T14:15:41.984-0400 I COMMAND [repl writer worker 11] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.984-0400 m31202| 2015-07-09T14:15:41.984-0400 I COMMAND [repl writer worker 9] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.990-0400 m30999| 2015-07-09T14:15:41.990-0400 I SHARDING [conn1] distributed lock 'db59/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba4dca4787b9985d1e61 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.993-0400 m30999| 2015-07-09T14:15:41.993-0400 I SHARDING [conn1] Placing [db59] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:41.994-0400 m30999| 2015-07-09T14:15:41.993-0400 I SHARDING [conn1] Enabling sharding for database [db59] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.048-0400 m30999| 2015-07-09T14:15:42.047-0400 I SHARDING [conn1] distributed lock 'db59/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.069-0400 m31100| 2015-07-09T14:15:42.069-0400 I INDEX [conn68] build index on: db59.coll59 properties: { v: 1, key: { indexed_insert_1char: 1.0 }, name: "indexed_insert_1char_1", ns: "db59.coll59" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.070-0400 m31100| 2015-07-09T14:15:42.069-0400 I INDEX [conn68] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.077-0400 m31100| 2015-07-09T14:15:42.076-0400 I INDEX [conn68] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.078-0400 m30999| 2015-07-09T14:15:42.078-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db59.coll59", key: { indexed_insert_1char: 1.0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.084-0400 m30999| 2015-07-09T14:15:42.083-0400 I SHARDING [conn1] distributed lock 'db59.coll59/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba4eca4787b9985d1e62 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.085-0400 m30999| 2015-07-09T14:15:42.085-0400 I SHARDING [conn1] enable sharding on: db59.coll59 with shard key: { indexed_insert_1char: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.086-0400 m30999| 2015-07-09T14:15:42.085-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:42.085-0400-559eba4eca4787b9985d1e63", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465742085), what: "shardCollection.start", ns: "db59.coll59", details: { shardKey: { indexed_insert_1char: 1.0 }, collection: "db59.coll59", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 1 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.090-0400 m31101| 2015-07-09T14:15:42.090-0400 I INDEX [repl writer worker 15] build index on: db59.coll59 properties: { v: 1, key: { indexed_insert_1char: 1.0 }, name: "indexed_insert_1char_1", ns: "db59.coll59" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.091-0400 m31101| 2015-07-09T14:15:42.090-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.093-0400 m31102| 2015-07-09T14:15:42.092-0400 I INDEX [repl writer worker 1] build index on: db59.coll59 properties: { v: 1, key: { indexed_insert_1char: 1.0 }, name: "indexed_insert_1char_1", ns: "db59.coll59" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.094-0400 m31102| 2015-07-09T14:15:42.092-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.099-0400 m31102| 2015-07-09T14:15:42.098-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.101-0400 m31101| 2015-07-09T14:15:42.100-0400 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.140-0400 m30999| 2015-07-09T14:15:42.139-0400 I SHARDING [conn1] going to create 1 chunk(s) for: db59.coll59 using new epoch 559eba4eca4787b9985d1e64 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.194-0400 m30999| 2015-07-09T14:15:42.193-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db59.coll59: 0ms sequenceNumber: 262 version: 1|0||559eba4eca4787b9985d1e64 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.250-0400 m30999| 2015-07-09T14:15:42.249-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db59.coll59: 0ms sequenceNumber: 263 version: 1|0||559eba4eca4787b9985d1e64 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.252-0400 m31100| 2015-07-09T14:15:42.251-0400 I SHARDING [conn180] remotely refreshing metadata for db59.coll59 with requested shard version 1|0||559eba4eca4787b9985d1e64, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.253-0400 m31100| 2015-07-09T14:15:42.253-0400 I SHARDING [conn180] collection db59.coll59 was previously unsharded, new metadata loaded with shard version 1|0||559eba4eca4787b9985d1e64 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.253-0400 m31100| 2015-07-09T14:15:42.253-0400 I SHARDING [conn180] collection version was loaded at version 1|0||559eba4eca4787b9985d1e64, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.254-0400 m30999| 2015-07-09T14:15:42.253-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:42.253-0400-559eba4eca4787b9985d1e65", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465742253), what: "shardCollection", ns: "db59.coll59", details: { version: "1|0||559eba4eca4787b9985d1e64" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.308-0400 m30999| 2015-07-09T14:15:42.307-0400 I SHARDING [conn1] distributed lock 'db59.coll59/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.329-0400 m31200| 2015-07-09T14:15:42.328-0400 I INDEX [conn30] build index on: db59.coll59 properties: { v: 1, key: { indexed_insert_1char: 1.0 }, name: "indexed_insert_1char_1", ns: "db59.coll59" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.330-0400 m31200| 2015-07-09T14:15:42.328-0400 I INDEX [conn30] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.338-0400 m31200| 2015-07-09T14:15:42.337-0400 I INDEX [conn30] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.339-0400 Using 20 threads (requested 20) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.483-0400 m31201| 2015-07-09T14:15:42.468-0400 I INDEX [repl writer worker 8] build index on: db59.coll59 properties: { v: 1, key: { indexed_insert_1char: 1.0 }, name: "indexed_insert_1char_1", ns: "db59.coll59" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.483-0400 m31201| 2015-07-09T14:15:42.468-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.495-0400 m31202| 2015-07-09T14:15:42.484-0400 I INDEX [repl writer worker 2] build index on: db59.coll59 properties: { v: 1, key: { indexed_insert_1char: 1.0 }, name: "indexed_insert_1char_1", ns: "db59.coll59" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.496-0400 m31202| 2015-07-09T14:15:42.484-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.516-0400 m30998| 2015-07-09T14:15:42.515-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63855 #374 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.525-0400 m31201| 2015-07-09T14:15:42.525-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.532-0400 m31202| 2015-07-09T14:15:42.532-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.545-0400 m30998| 2015-07-09T14:15:42.544-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63856 #375 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.545-0400 m30999| 2015-07-09T14:15:42.545-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63858 #375 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.554-0400 m30999| 2015-07-09T14:15:42.554-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63860 #376 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.555-0400 m30998| 2015-07-09T14:15:42.554-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63857 #376 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.559-0400 m30998| 2015-07-09T14:15:42.559-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63859 #377 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.561-0400 m30999| 2015-07-09T14:15:42.560-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63861 #377 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.601-0400 m30999| 2015-07-09T14:15:42.600-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63862 #378 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.602-0400 m30998| 2015-07-09T14:15:42.601-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63863 #378 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.602-0400 m30999| 2015-07-09T14:15:42.601-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63864 #379 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.602-0400 m30998| 2015-07-09T14:15:42.602-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63866 #379 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.602-0400 m30999| 2015-07-09T14:15:42.602-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63865 #380 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.604-0400 m30998| 2015-07-09T14:15:42.603-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63868 #380 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.610-0400 m30999| 2015-07-09T14:15:42.610-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63867 #381 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.611-0400 m30999| 2015-07-09T14:15:42.611-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63869 #382 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.611-0400 m30999| 2015-07-09T14:15:42.611-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63872 #383 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.611-0400 m30998| 2015-07-09T14:15:42.611-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63870 #381 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.612-0400 m30999| 2015-07-09T14:15:42.612-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63873 #384 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.614-0400 m30998| 2015-07-09T14:15:42.614-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63871 #382 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.623-0400 m30998| 2015-07-09T14:15:42.622-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63874 #383 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.630-0400 setting random seed: 7921959999948 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.630-0400 setting random seed: 4381781136617 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.630-0400 setting random seed: 4512599790468 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.630-0400 setting random seed: 1746124844066 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.631-0400 setting random seed: 6784089701250 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.632-0400 setting random seed: 4595597414299 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.632-0400 setting random seed: 8205778100527 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.633-0400 setting random seed: 2040692684240 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.633-0400 setting random seed: 7909993808716 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.633-0400 setting random seed: 7120267893187 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.633-0400 setting random seed: 7011485728435 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.635-0400 setting random seed: 3264713725075 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.635-0400 setting random seed: 342536531388 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.635-0400 setting random seed: 195662006735 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.636-0400 setting random seed: 3266005003824 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.637-0400 m30998| 2015-07-09T14:15:42.637-0400 I SHARDING [conn375] ChunkManager: time to load chunks for db59.coll59: 0ms sequenceNumber: 69 version: 1|0||559eba4eca4787b9985d1e64 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.637-0400 setting random seed: 2833746895194 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.639-0400 setting random seed: 5805905731394 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.642-0400 setting random seed: 6058535482734 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.663-0400 setting random seed: 43602888472 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.664-0400 setting random seed: 9857672988437 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.673-0400 m31100| 2015-07-09T14:15:42.673-0400 I SHARDING [conn38] request split points lookup for chunk db59.coll59 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.679-0400 m31100| 2015-07-09T14:15:42.679-0400 I SHARDING [conn36] request split points lookup for chunk db59.coll59 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.693-0400 m31100| 2015-07-09T14:15:42.692-0400 I SHARDING [conn36] request split points lookup for chunk db59.coll59 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.696-0400 m31100| 2015-07-09T14:15:42.695-0400 I SHARDING [conn40] request split points lookup for chunk db59.coll59 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.697-0400 m31100| 2015-07-09T14:15:42.696-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db59.coll59", keyPattern: { indexed_insert_1char: 1.0 }, min: { indexed_insert_1char: MinKey }, max: { indexed_insert_1char: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_1char: "!" }, { indexed_insert_1char: "1" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4eca4787b9985d1e64') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.699-0400 m31100| 2015-07-09T14:15:42.697-0400 I SHARDING [conn38] request split points lookup for chunk db59.coll59 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.699-0400 m31100| 2015-07-09T14:15:42.697-0400 I SHARDING [conn34] request split points lookup for chunk db59.coll59 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.700-0400 m31100| 2015-07-09T14:15:42.698-0400 I SHARDING [conn132] request split points lookup for chunk db59.coll59 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.700-0400 m31100| 2015-07-09T14:15:42.698-0400 I SHARDING [conn36] received splitChunk request: { splitChunk: "db59.coll59", keyPattern: { indexed_insert_1char: 1.0 }, min: { indexed_insert_1char: MinKey }, max: { indexed_insert_1char: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_1char: "!" }, { indexed_insert_1char: "2" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4eca4787b9985d1e64') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.701-0400 m31100| 2015-07-09T14:15:42.698-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db59.coll59", keyPattern: { indexed_insert_1char: 1.0 }, min: { indexed_insert_1char: MinKey }, max: { indexed_insert_1char: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_1char: "!" }, { indexed_insert_1char: "1" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4eca4787b9985d1e64') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.704-0400 m31100| 2015-07-09T14:15:42.698-0400 I SHARDING [conn39] request split points lookup for chunk db59.coll59 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.704-0400 m31100| 2015-07-09T14:15:42.698-0400 I SHARDING [conn35] request split points lookup for chunk db59.coll59 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.704-0400 m31100| 2015-07-09T14:15:42.699-0400 I SHARDING [conn38] request split points lookup for chunk db59.coll59 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.705-0400 m31100| 2015-07-09T14:15:42.700-0400 I SHARDING [conn15] request split points lookup for chunk db59.coll59 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.708-0400 m31100| 2015-07-09T14:15:42.701-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db59.coll59", keyPattern: { indexed_insert_1char: 1.0 }, min: { indexed_insert_1char: MinKey }, max: { indexed_insert_1char: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_1char: "!" }, { indexed_insert_1char: "1" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4eca4787b9985d1e64') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.709-0400 m31100| 2015-07-09T14:15:42.702-0400 I SHARDING [conn132] received splitChunk request: { splitChunk: "db59.coll59", keyPattern: { indexed_insert_1char: 1.0 }, min: { indexed_insert_1char: MinKey }, max: { indexed_insert_1char: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_1char: "!" }, { indexed_insert_1char: "0" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4eca4787b9985d1e64') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.712-0400 m31100| 2015-07-09T14:15:42.702-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db59.coll59", keyPattern: { indexed_insert_1char: 1.0 }, min: { indexed_insert_1char: MinKey }, max: { indexed_insert_1char: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_1char: "!" }, { indexed_insert_1char: "/" }, { indexed_insert_1char: "4" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4eca4787b9985d1e64') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.713-0400 m31100| 2015-07-09T14:15:42.702-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db59.coll59", keyPattern: { indexed_insert_1char: 1.0 }, min: { indexed_insert_1char: MinKey }, max: { indexed_insert_1char: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_1char: "!" }, { indexed_insert_1char: "/" }, { indexed_insert_1char: "4" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4eca4787b9985d1e64') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.713-0400 m31100| 2015-07-09T14:15:42.703-0400 I SHARDING [conn39] received splitChunk request: { splitChunk: "db59.coll59", keyPattern: { indexed_insert_1char: 1.0 }, min: { indexed_insert_1char: MinKey }, max: { indexed_insert_1char: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_1char: "!" }, { indexed_insert_1char: "/" }, { indexed_insert_1char: "4" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4eca4787b9985d1e64') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.714-0400 m31100| 2015-07-09T14:15:42.703-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db59.coll59", keyPattern: { indexed_insert_1char: 1.0 }, min: { indexed_insert_1char: MinKey }, max: { indexed_insert_1char: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_1char: "!" }, { indexed_insert_1char: "/" }, { indexed_insert_1char: "4" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4eca4787b9985d1e64') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.714-0400 m31100| 2015-07-09T14:15:42.706-0400 I SHARDING [conn15] could not acquire lock 'db59.coll59/bs-osx108-8:31100:1436464536:197041335' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.714-0400 m31100| 2015-07-09T14:15:42.706-0400 I SHARDING [conn15] distributed lock 'db59.coll59/bs-osx108-8:31100:1436464536:197041335' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.714-0400 m31100| 2015-07-09T14:15:42.706-0400 W SHARDING [conn15] could not acquire collection lock for db59.coll59 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db59.coll59 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.715-0400 m30999| 2015-07-09T14:15:42.706-0400 W SHARDING [conn384] splitChunk failed - cmd: { splitChunk: "db59.coll59", keyPattern: { indexed_insert_1char: 1.0 }, min: { indexed_insert_1char: MinKey }, max: { indexed_insert_1char: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_1char: "!" }, { indexed_insert_1char: "/" }, { indexed_insert_1char: "4" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4eca4787b9985d1e64') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db59.coll59 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.715-0400 m31100| 2015-07-09T14:15:42.706-0400 I SHARDING [conn37] could not acquire lock 'db59.coll59/bs-osx108-8:31100:1436464536:197041335' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.715-0400 m31100| 2015-07-09T14:15:42.706-0400 I SHARDING [conn34] could not acquire lock 'db59.coll59/bs-osx108-8:31100:1436464536:197041335' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.715-0400 m31100| 2015-07-09T14:15:42.706-0400 I SHARDING [conn37] distributed lock 'db59.coll59/bs-osx108-8:31100:1436464536:197041335' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.715-0400 m31100| 2015-07-09T14:15:42.706-0400 I SHARDING [conn35] could not acquire lock 'db59.coll59/bs-osx108-8:31100:1436464536:197041335' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.716-0400 m31100| 2015-07-09T14:15:42.706-0400 I SHARDING [conn34] distributed lock 'db59.coll59/bs-osx108-8:31100:1436464536:197041335' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.718-0400 m31100| 2015-07-09T14:15:42.706-0400 W SHARDING [conn37] could not acquire collection lock for db59.coll59 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db59.coll59 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.718-0400 m31100| 2015-07-09T14:15:42.706-0400 I SHARDING [conn35] distributed lock 'db59.coll59/bs-osx108-8:31100:1436464536:197041335' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.718-0400 m31100| 2015-07-09T14:15:42.707-0400 W SHARDING [conn34] could not acquire collection lock for db59.coll59 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db59.coll59 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.719-0400 m31100| 2015-07-09T14:15:42.707-0400 I SHARDING [conn36] could not acquire lock 'db59.coll59/bs-osx108-8:31100:1436464536:197041335' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.719-0400 m31100| 2015-07-09T14:15:42.707-0400 I SHARDING [conn39] could not acquire lock 'db59.coll59/bs-osx108-8:31100:1436464536:197041335' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.719-0400 m30999| 2015-07-09T14:15:42.707-0400 W SHARDING [conn382] splitChunk failed - cmd: { splitChunk: "db59.coll59", keyPattern: { indexed_insert_1char: 1.0 }, min: { indexed_insert_1char: MinKey }, max: { indexed_insert_1char: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_1char: "!" }, { indexed_insert_1char: "1" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4eca4787b9985d1e64') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db59.coll59 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.720-0400 m31100| 2015-07-09T14:15:42.707-0400 I SHARDING [conn38] could not acquire lock 'db59.coll59/bs-osx108-8:31100:1436464536:197041335' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.720-0400 m31100| 2015-07-09T14:15:42.707-0400 W SHARDING [conn35] could not acquire collection lock for db59.coll59 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db59.coll59 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.720-0400 m30999| 2015-07-09T14:15:42.707-0400 W SHARDING [conn383] splitChunk failed - cmd: { splitChunk: "db59.coll59", keyPattern: { indexed_insert_1char: 1.0 }, min: { indexed_insert_1char: MinKey }, max: { indexed_insert_1char: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_1char: "!" }, { indexed_insert_1char: "1" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4eca4787b9985d1e64') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db59.coll59 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.721-0400 m30998| 2015-07-09T14:15:42.707-0400 W SHARDING [conn379] splitChunk failed - cmd: { splitChunk: "db59.coll59", keyPattern: { indexed_insert_1char: 1.0 }, min: { indexed_insert_1char: MinKey }, max: { indexed_insert_1char: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_1char: "!" }, { indexed_insert_1char: "/" }, { indexed_insert_1char: "4" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4eca4787b9985d1e64') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db59.coll59 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.721-0400 m31100| 2015-07-09T14:15:42.707-0400 I SHARDING [conn132] could not acquire lock 'db59.coll59/bs-osx108-8:31100:1436464536:197041335' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.721-0400 m31100| 2015-07-09T14:15:42.707-0400 I SHARDING [conn36] distributed lock 'db59.coll59/bs-osx108-8:31100:1436464536:197041335' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.721-0400 m31100| 2015-07-09T14:15:42.707-0400 I SHARDING [conn39] distributed lock 'db59.coll59/bs-osx108-8:31100:1436464536:197041335' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.721-0400 m31100| 2015-07-09T14:15:42.707-0400 I SHARDING [conn38] distributed lock 'db59.coll59/bs-osx108-8:31100:1436464536:197041335' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.722-0400 m31100| 2015-07-09T14:15:42.707-0400 I SHARDING [conn32] request split points lookup for chunk db59.coll59 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.722-0400 m31100| 2015-07-09T14:15:42.707-0400 I SHARDING [conn132] distributed lock 'db59.coll59/bs-osx108-8:31100:1436464536:197041335' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.722-0400 m31100| 2015-07-09T14:15:42.708-0400 W SHARDING [conn36] could not acquire collection lock for db59.coll59 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db59.coll59 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.722-0400 m31100| 2015-07-09T14:15:42.708-0400 W SHARDING [conn39] could not acquire collection lock for db59.coll59 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db59.coll59 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.723-0400 m30998| 2015-07-09T14:15:42.708-0400 W SHARDING [conn378] splitChunk failed - cmd: { splitChunk: "db59.coll59", keyPattern: { indexed_insert_1char: 1.0 }, min: { indexed_insert_1char: MinKey }, max: { indexed_insert_1char: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_1char: "!" }, { indexed_insert_1char: "2" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4eca4787b9985d1e64') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db59.coll59 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.723-0400 m31100| 2015-07-09T14:15:42.708-0400 W SHARDING [conn38] could not acquire collection lock for db59.coll59 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db59.coll59 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.724-0400 m30998| 2015-07-09T14:15:42.708-0400 W SHARDING [conn383] splitChunk failed - cmd: { splitChunk: "db59.coll59", keyPattern: { indexed_insert_1char: 1.0 }, min: { indexed_insert_1char: MinKey }, max: { indexed_insert_1char: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_1char: "!" }, { indexed_insert_1char: "/" }, { indexed_insert_1char: "4" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4eca4787b9985d1e64') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db59.coll59 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.724-0400 m31100| 2015-07-09T14:15:42.708-0400 W SHARDING [conn132] could not acquire collection lock for db59.coll59 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db59.coll59 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.724-0400 m30999| 2015-07-09T14:15:42.709-0400 W SHARDING [conn379] splitChunk failed - cmd: { splitChunk: "db59.coll59", keyPattern: { indexed_insert_1char: 1.0 }, min: { indexed_insert_1char: MinKey }, max: { indexed_insert_1char: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_1char: "!" }, { indexed_insert_1char: "/" }, { indexed_insert_1char: "4" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4eca4787b9985d1e64') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db59.coll59 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.725-0400 m31100| 2015-07-09T14:15:42.709-0400 I SHARDING [conn32] received splitChunk request: { splitChunk: "db59.coll59", keyPattern: { indexed_insert_1char: 1.0 }, min: { indexed_insert_1char: MinKey }, max: { indexed_insert_1char: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_1char: "!" }, { indexed_insert_1char: "," }, { indexed_insert_1char: "2" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4eca4787b9985d1e64') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.725-0400 m30998| 2015-07-09T14:15:42.709-0400 W SHARDING [conn377] splitChunk failed - cmd: { splitChunk: "db59.coll59", keyPattern: { indexed_insert_1char: 1.0 }, min: { indexed_insert_1char: MinKey }, max: { indexed_insert_1char: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_1char: "!" }, { indexed_insert_1char: "0" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4eca4787b9985d1e64') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db59.coll59 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.725-0400 m31100| 2015-07-09T14:15:42.710-0400 W SHARDING [conn32] could not acquire collection lock for db59.coll59 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db59.coll59 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.726-0400 m30998| 2015-07-09T14:15:42.710-0400 W SHARDING [conn380] splitChunk failed - cmd: { splitChunk: "db59.coll59", keyPattern: { indexed_insert_1char: 1.0 }, min: { indexed_insert_1char: MinKey }, max: { indexed_insert_1char: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_1char: "!" }, { indexed_insert_1char: "," }, { indexed_insert_1char: "2" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4eca4787b9985d1e64') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db59.coll59 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.726-0400 m31100| 2015-07-09T14:15:42.711-0400 I SHARDING [conn40] distributed lock 'db59.coll59/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eba4e792e00bb67274a51 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.727-0400 m31100| 2015-07-09T14:15:42.711-0400 I SHARDING [conn40] remotely refreshing metadata for db59.coll59 based on current shard version 1|0||559eba4eca4787b9985d1e64, current metadata version is 1|0||559eba4eca4787b9985d1e64 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.728-0400 m31100| 2015-07-09T14:15:42.716-0400 I SHARDING [conn40] metadata of collection db59.coll59 already up to date (shard version : 1|0||559eba4eca4787b9985d1e64, took 2ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.728-0400 m31100| 2015-07-09T14:15:42.716-0400 I SHARDING [conn40] splitChunk accepted at version 1|0||559eba4eca4787b9985d1e64 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.731-0400 m30999| 2015-07-09T14:15:42.730-0400 I SHARDING [conn376] ChunkManager: time to load chunks for db59.coll59: 0ms sequenceNumber: 264 version: 1|3||559eba4eca4787b9985d1e64 based on: 1|0||559eba4eca4787b9985d1e64 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.739-0400 m31100| 2015-07-09T14:15:42.735-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:42.735-0400-559eba4e792e00bb67274a5a", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436465742735), what: "multi-split", ns: "db59.coll59", details: { before: { min: { indexed_insert_1char: MinKey }, max: { indexed_insert_1char: MaxKey } }, number: 1, of: 3, chunk: { min: { indexed_insert_1char: MinKey }, max: { indexed_insert_1char: "!" }, lastmod: Timestamp 1000|1, lastmodEpoch: ObjectId('559eba4eca4787b9985d1e64') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.739-0400 m31100| 2015-07-09T14:15:42.736-0400 I SHARDING [conn36] request split points lookup for chunk db59.coll59 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.739-0400 m31100| 2015-07-09T14:15:42.736-0400 I SHARDING [conn39] request split points lookup for chunk db59.coll59 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.739-0400 m31100| 2015-07-09T14:15:42.736-0400 I SHARDING [conn132] request split points lookup for chunk db59.coll59 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.739-0400 m31100| 2015-07-09T14:15:42.737-0400 I SHARDING [conn35] request split points lookup for chunk db59.coll59 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.739-0400 m31100| 2015-07-09T14:15:42.737-0400 I SHARDING [conn32] request split points lookup for chunk db59.coll59 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.742-0400 m31100| 2015-07-09T14:15:42.740-0400 I SHARDING [conn36] received splitChunk request: { splitChunk: "db59.coll59", keyPattern: { indexed_insert_1char: 1.0 }, min: { indexed_insert_1char: MinKey }, max: { indexed_insert_1char: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_1char: "!" }, { indexed_insert_1char: "*" }, { indexed_insert_1char: "." }, { indexed_insert_1char: "1" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4eca4787b9985d1e64') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.744-0400 m31100| 2015-07-09T14:15:42.741-0400 I SHARDING [conn39] received splitChunk request: { splitChunk: "db59.coll59", keyPattern: { indexed_insert_1char: 1.0 }, min: { indexed_insert_1char: MinKey }, max: { indexed_insert_1char: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_1char: "!" }, { indexed_insert_1char: "*" }, { indexed_insert_1char: "." }, { indexed_insert_1char: "1" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4eca4787b9985d1e64') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.746-0400 m31100| 2015-07-09T14:15:42.741-0400 I SHARDING [conn132] received splitChunk request: { splitChunk: "db59.coll59", keyPattern: { indexed_insert_1char: 1.0 }, min: { indexed_insert_1char: MinKey }, max: { indexed_insert_1char: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_1char: "!" }, { indexed_insert_1char: "*" }, { indexed_insert_1char: "." }, { indexed_insert_1char: "1" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4eca4787b9985d1e64') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.747-0400 m31100| 2015-07-09T14:15:42.742-0400 W SHARDING [conn39] could not acquire collection lock for db59.coll59 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db59.coll59 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.747-0400 m31100| 2015-07-09T14:15:42.743-0400 W SHARDING [conn36] could not acquire collection lock for db59.coll59 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db59.coll59 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.747-0400 m31100| 2015-07-09T14:15:42.743-0400 W SHARDING [conn132] could not acquire collection lock for db59.coll59 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db59.coll59 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.748-0400 m30998| 2015-07-09T14:15:42.743-0400 W SHARDING [conn374] splitChunk failed - cmd: { splitChunk: "db59.coll59", keyPattern: { indexed_insert_1char: 1.0 }, min: { indexed_insert_1char: MinKey }, max: { indexed_insert_1char: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_1char: "!" }, { indexed_insert_1char: "*" }, { indexed_insert_1char: "." }, { indexed_insert_1char: "1" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4eca4787b9985d1e64') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db59.coll59 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.749-0400 m30998| 2015-07-09T14:15:42.744-0400 W SHARDING [conn383] splitChunk failed - cmd: { splitChunk: "db59.coll59", keyPattern: { indexed_insert_1char: 1.0 }, min: { indexed_insert_1char: MinKey }, max: { indexed_insert_1char: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_1char: "!" }, { indexed_insert_1char: "*" }, { indexed_insert_1char: "." }, { indexed_insert_1char: "1" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4eca4787b9985d1e64') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db59.coll59 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.749-0400 m31100| 2015-07-09T14:15:42.744-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db59.coll59", keyPattern: { indexed_insert_1char: 1.0 }, min: { indexed_insert_1char: MinKey }, max: { indexed_insert_1char: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_1char: "!" }, { indexed_insert_1char: "*" }, { indexed_insert_1char: "." }, { indexed_insert_1char: "1" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4eca4787b9985d1e64') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.750-0400 m30998| 2015-07-09T14:15:42.744-0400 W SHARDING [conn378] splitChunk failed - cmd: { splitChunk: "db59.coll59", keyPattern: { indexed_insert_1char: 1.0 }, min: { indexed_insert_1char: MinKey }, max: { indexed_insert_1char: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_1char: "!" }, { indexed_insert_1char: "*" }, { indexed_insert_1char: "." }, { indexed_insert_1char: "1" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4eca4787b9985d1e64') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db59.coll59 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.751-0400 m31100| 2015-07-09T14:15:42.745-0400 I SHARDING [conn32] received splitChunk request: { splitChunk: "db59.coll59", keyPattern: { indexed_insert_1char: 1.0 }, min: { indexed_insert_1char: MinKey }, max: { indexed_insert_1char: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_1char: "!" }, { indexed_insert_1char: "*" }, { indexed_insert_1char: "-" }, { indexed_insert_1char: "1" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4eca4787b9985d1e64') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.751-0400 m31100| 2015-07-09T14:15:42.746-0400 W SHARDING [conn35] could not acquire collection lock for db59.coll59 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db59.coll59 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.751-0400 m31100| 2015-07-09T14:15:42.747-0400 W SHARDING [conn32] could not acquire collection lock for db59.coll59 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db59.coll59 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.751-0400 m30998| 2015-07-09T14:15:42.747-0400 W SHARDING [conn377] splitChunk failed - cmd: { splitChunk: "db59.coll59", keyPattern: { indexed_insert_1char: 1.0 }, min: { indexed_insert_1char: MinKey }, max: { indexed_insert_1char: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_1char: "!" }, { indexed_insert_1char: "*" }, { indexed_insert_1char: "." }, { indexed_insert_1char: "1" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4eca4787b9985d1e64') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db59.coll59 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.752-0400 m30998| 2015-07-09T14:15:42.749-0400 W SHARDING [conn380] splitChunk failed - cmd: { splitChunk: "db59.coll59", keyPattern: { indexed_insert_1char: 1.0 }, min: { indexed_insert_1char: MinKey }, max: { indexed_insert_1char: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_1char: "!" }, { indexed_insert_1char: "*" }, { indexed_insert_1char: "-" }, { indexed_insert_1char: "1" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4eca4787b9985d1e64') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db59.coll59 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.753-0400 m30998| 2015-07-09T14:15:42.752-0400 I SHARDING [conn374] ChunkManager: time to load chunks for db59.coll59: 0ms sequenceNumber: 70 version: 1|3||559eba4eca4787b9985d1e64 based on: 1|0||559eba4eca4787b9985d1e64 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.799-0400 m31100| 2015-07-09T14:15:42.798-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:42.798-0400-559eba4e792e00bb67274a5b", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436465742798), what: "multi-split", ns: "db59.coll59", details: { before: { min: { indexed_insert_1char: MinKey }, max: { indexed_insert_1char: MaxKey } }, number: 2, of: 3, chunk: { min: { indexed_insert_1char: "!" }, max: { indexed_insert_1char: "1" }, lastmod: Timestamp 1000|2, lastmodEpoch: ObjectId('559eba4eca4787b9985d1e64') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.862-0400 m31100| 2015-07-09T14:15:42.861-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:42.861-0400-559eba4e792e00bb67274a5c", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436465742861), what: "multi-split", ns: "db59.coll59", details: { before: { min: { indexed_insert_1char: MinKey }, max: { indexed_insert_1char: MaxKey } }, number: 3, of: 3, chunk: { min: { indexed_insert_1char: "1" }, max: { indexed_insert_1char: MaxKey }, lastmod: Timestamp 1000|3, lastmodEpoch: ObjectId('559eba4eca4787b9985d1e64') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.925-0400 m31100| 2015-07-09T14:15:42.924-0400 I SHARDING [conn40] distributed lock 'db59.coll59/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.927-0400 m31100| 2015-07-09T14:15:42.924-0400 I COMMAND [conn40] command db59.coll59 command: splitChunk { splitChunk: "db59.coll59", keyPattern: { indexed_insert_1char: 1.0 }, min: { indexed_insert_1char: MinKey }, max: { indexed_insert_1char: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_1char: "!" }, { indexed_insert_1char: "1" } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4eca4787b9985d1e64') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 4, w: 2 } }, Database: { acquireCount: { r: 1, w: 2 } }, Collection: { acquireCount: { r: 1, W: 2 }, acquireWaitCount: { W: 2 }, timeAcquiringMicros: { W: 8766 } } } protocol:op_command 227ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.927-0400 m30999| 2015-07-09T14:15:42.925-0400 I SHARDING [conn378] autosplitted db59.coll59 shard: ns: db59.coll59, shard: test-rs0, lastmod: 1|0||000000000000000000000000, min: { indexed_insert_1char: MinKey }, max: { indexed_insert_1char: MaxKey } into 3 (splitThreshold 921) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:42.992-0400 m31100| 2015-07-09T14:15:42.992-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63875 #191 (113 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.112-0400 m30999| 2015-07-09T14:15:43.111-0400 I NETWORK [conn375] end connection 127.0.0.1:63858 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.141-0400 m30998| 2015-07-09T14:15:43.141-0400 I NETWORK [conn375] end connection 127.0.0.1:63856 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.168-0400 m30998| 2015-07-09T14:15:43.166-0400 I NETWORK [conn377] end connection 127.0.0.1:63859 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.169-0400 m30998| 2015-07-09T14:15:43.168-0400 I NETWORK [conn376] end connection 127.0.0.1:63857 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.174-0400 m30999| 2015-07-09T14:15:43.171-0400 I NETWORK [conn382] end connection 127.0.0.1:63869 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.174-0400 m30999| 2015-07-09T14:15:43.172-0400 I NETWORK [conn377] end connection 127.0.0.1:63861 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.196-0400 m30998| 2015-07-09T14:15:43.196-0400 I NETWORK [conn378] end connection 127.0.0.1:63863 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.199-0400 m30998| 2015-07-09T14:15:43.199-0400 I NETWORK [conn381] end connection 127.0.0.1:63870 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.213-0400 m30998| 2015-07-09T14:15:43.212-0400 I NETWORK [conn374] end connection 127.0.0.1:63855 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.221-0400 m30999| 2015-07-09T14:15:43.220-0400 I NETWORK [conn379] end connection 127.0.0.1:63864 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.233-0400 m30999| 2015-07-09T14:15:43.233-0400 I NETWORK [conn380] end connection 127.0.0.1:63865 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.244-0400 m30999| 2015-07-09T14:15:43.243-0400 I NETWORK [conn376] end connection 127.0.0.1:63860 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.251-0400 m30999| 2015-07-09T14:15:43.251-0400 I NETWORK [conn384] end connection 127.0.0.1:63873 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.258-0400 m30998| 2015-07-09T14:15:43.255-0400 I NETWORK [conn380] end connection 127.0.0.1:63868 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.267-0400 m30998| 2015-07-09T14:15:43.267-0400 I NETWORK [conn382] end connection 127.0.0.1:63871 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.281-0400 m30998| 2015-07-09T14:15:43.280-0400 I NETWORK [conn379] end connection 127.0.0.1:63866 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.307-0400 m30999| 2015-07-09T14:15:43.307-0400 I NETWORK [conn381] end connection 127.0.0.1:63867 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.312-0400 m30999| 2015-07-09T14:15:43.311-0400 I NETWORK [conn383] end connection 127.0.0.1:63872 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.342-0400 m30998| 2015-07-09T14:15:43.341-0400 I NETWORK [conn383] end connection 127.0.0.1:63874 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.354-0400 m30999| 2015-07-09T14:15:43.353-0400 I NETWORK [conn378] end connection 127.0.0.1:63862 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.373-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.373-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.374-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.374-0400 jstests/concurrency/fsm_workloads/indexed_insert_1char.js: Workload completed in 1034 ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.374-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.374-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.374-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.374-0400 m30999| 2015-07-09T14:15:43.374-0400 I COMMAND [conn1] DROP: db59.coll59 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.374-0400 m30999| 2015-07-09T14:15:43.374-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:43.374-0400-559eba4fca4787b9985d1e66", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465743374), what: "dropCollection.start", ns: "db59.coll59", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.429-0400 m30999| 2015-07-09T14:15:43.429-0400 I SHARDING [conn1] distributed lock 'db59.coll59/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba4fca4787b9985d1e67 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.430-0400 m31100| 2015-07-09T14:15:43.429-0400 I COMMAND [conn40] CMD: drop db59.coll59 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.433-0400 m31200| 2015-07-09T14:15:43.433-0400 I COMMAND [conn63] CMD: drop db59.coll59 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.434-0400 m31101| 2015-07-09T14:15:43.434-0400 I COMMAND [repl writer worker 13] CMD: drop db59.coll59 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.434-0400 m31102| 2015-07-09T14:15:43.434-0400 I COMMAND [repl writer worker 13] CMD: drop db59.coll59 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.437-0400 m31201| 2015-07-09T14:15:43.436-0400 I COMMAND [repl writer worker 6] CMD: drop db59.coll59 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.437-0400 m31202| 2015-07-09T14:15:43.437-0400 I COMMAND [repl writer worker 5] CMD: drop db59.coll59 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.489-0400 m31100| 2015-07-09T14:15:43.489-0400 I SHARDING [conn40] remotely refreshing metadata for db59.coll59 with requested shard version 0|0||000000000000000000000000, current shard version is 1|3||559eba4eca4787b9985d1e64, current metadata version is 1|3||559eba4eca4787b9985d1e64 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.491-0400 m31100| 2015-07-09T14:15:43.491-0400 W SHARDING [conn40] no chunks found when reloading db59.coll59, previous version was 0|0||559eba4eca4787b9985d1e64, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.491-0400 m31100| 2015-07-09T14:15:43.491-0400 I SHARDING [conn40] dropping metadata for db59.coll59 at shard version 1|3||559eba4eca4787b9985d1e64, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.494-0400 m30999| 2015-07-09T14:15:43.493-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:43.493-0400-559eba4fca4787b9985d1e68", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465743493), what: "dropCollection", ns: "db59.coll59", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.549-0400 m30999| 2015-07-09T14:15:43.549-0400 I SHARDING [conn1] distributed lock 'db59.coll59/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.605-0400 m30999| 2015-07-09T14:15:43.605-0400 I COMMAND [conn1] DROP DATABASE: db59 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.606-0400 m30999| 2015-07-09T14:15:43.605-0400 I SHARDING [conn1] DBConfig::dropDatabase: db59 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.606-0400 m30999| 2015-07-09T14:15:43.605-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:43.605-0400-559eba4fca4787b9985d1e69", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465743605), what: "dropDatabase.start", ns: "db59", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.712-0400 m30999| 2015-07-09T14:15:43.712-0400 I SHARDING [conn1] DBConfig::dropDatabase: db59 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.713-0400 m31100| 2015-07-09T14:15:43.712-0400 I COMMAND [conn157] dropDatabase db59 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.713-0400 m31100| 2015-07-09T14:15:43.712-0400 I COMMAND [conn157] dropDatabase db59 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.713-0400 m30999| 2015-07-09T14:15:43.713-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:43.713-0400-559eba4fca4787b9985d1e6a", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465743713), what: "dropDatabase", ns: "db59", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.714-0400 m31101| 2015-07-09T14:15:43.713-0400 I COMMAND [repl writer worker 6] dropDatabase db59 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.714-0400 m31101| 2015-07-09T14:15:43.714-0400 I COMMAND [repl writer worker 6] dropDatabase db59 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.714-0400 m31102| 2015-07-09T14:15:43.713-0400 I COMMAND [repl writer worker 4] dropDatabase db59 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.714-0400 m31102| 2015-07-09T14:15:43.713-0400 I COMMAND [repl writer worker 4] dropDatabase db59 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.798-0400 m31100| 2015-07-09T14:15:43.798-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.802-0400 m31101| 2015-07-09T14:15:43.801-0400 I COMMAND [repl writer worker 4] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.802-0400 m31102| 2015-07-09T14:15:43.801-0400 I COMMAND [repl writer worker 10] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.835-0400 m31200| 2015-07-09T14:15:43.835-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.837-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.838-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.838-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.838-0400 jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname.js [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.838-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.838-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.838-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.839-0400 m31201| 2015-07-09T14:15:43.838-0400 I COMMAND [repl writer worker 2] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.839-0400 m31202| 2015-07-09T14:15:43.838-0400 I COMMAND [repl writer worker 7] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.846-0400 m30999| 2015-07-09T14:15:43.846-0400 I SHARDING [conn1] distributed lock 'db60/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba4fca4787b9985d1e6b [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.850-0400 m30999| 2015-07-09T14:15:43.849-0400 I SHARDING [conn1] Placing [db60] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.850-0400 m30999| 2015-07-09T14:15:43.850-0400 I SHARDING [conn1] Enabling sharding for database [db60] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.904-0400 m30999| 2015-07-09T14:15:43.904-0400 I SHARDING [conn1] distributed lock 'db60/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.927-0400 m31100| 2015-07-09T14:15:43.926-0400 I INDEX [conn145] build index on: db60.coll60 properties: { v: 1, key: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, name: "indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx_1", ns: "db60.coll60" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.927-0400 m31100| 2015-07-09T14:15:43.926-0400 I INDEX [conn145] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.934-0400 m31100| 2015-07-09T14:15:43.934-0400 I INDEX [conn145] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.936-0400 m30999| 2015-07-09T14:15:43.936-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db60.coll60", key: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.941-0400 m30999| 2015-07-09T14:15:43.940-0400 I SHARDING [conn1] distributed lock 'db60.coll60/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba4fca4787b9985d1e6c [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.942-0400 m30999| 2015-07-09T14:15:43.941-0400 I SHARDING [conn1] enable sharding on: db60.coll60 with shard key: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.943-0400 m30999| 2015-07-09T14:15:43.941-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:43.941-0400-559eba4fca4787b9985d1e6d", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465743941), what: "shardCollection.start", ns: "db60.coll60", details: { shardKey: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, collection: "db60.coll60", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 1 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.947-0400 m31102| 2015-07-09T14:15:43.946-0400 I INDEX [repl writer worker 2] build index on: db60.coll60 properties: { v: 1, key: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, name: "indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx_1", ns: "db60.coll60" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.948-0400 m31102| 2015-07-09T14:15:43.946-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.953-0400 m31101| 2015-07-09T14:15:43.952-0400 I INDEX [repl writer worker 0] build index on: db60.coll60 properties: { v: 1, key: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, name: "indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx_1", ns: "db60.coll60" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.954-0400 m31101| 2015-07-09T14:15:43.952-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.955-0400 m31102| 2015-07-09T14:15:43.955-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.961-0400 m31101| 2015-07-09T14:15:43.960-0400 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:43.996-0400 m30999| 2015-07-09T14:15:43.996-0400 I SHARDING [conn1] going to create 1 chunk(s) for: db60.coll60 using new epoch 559eba4fca4787b9985d1e6e [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.050-0400 m30999| 2015-07-09T14:15:44.050-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db60.coll60: 0ms sequenceNumber: 265 version: 1|0||559eba4fca4787b9985d1e6e based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.106-0400 m30999| 2015-07-09T14:15:44.106-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db60.coll60: 0ms sequenceNumber: 266 version: 1|0||559eba4fca4787b9985d1e6e based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.109-0400 m31100| 2015-07-09T14:15:44.108-0400 I SHARDING [conn188] remotely refreshing metadata for db60.coll60 with requested shard version 1|0||559eba4fca4787b9985d1e6e, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.110-0400 m31100| 2015-07-09T14:15:44.110-0400 I SHARDING [conn188] collection db60.coll60 was previously unsharded, new metadata loaded with shard version 1|0||559eba4fca4787b9985d1e6e [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.110-0400 m31100| 2015-07-09T14:15:44.110-0400 I SHARDING [conn188] collection version was loaded at version 1|0||559eba4fca4787b9985d1e6e, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.111-0400 m30999| 2015-07-09T14:15:44.110-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:44.110-0400-559eba50ca4787b9985d1e6f", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465744110), what: "shardCollection", ns: "db60.coll60", details: { version: "1|0||559eba4fca4787b9985d1e6e" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.165-0400 m30999| 2015-07-09T14:15:44.164-0400 I SHARDING [conn1] distributed lock 'db60.coll60/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.181-0400 m31200| 2015-07-09T14:15:44.180-0400 I INDEX [conn30] build index on: db60.coll60 properties: { v: 1, key: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, name: "indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx_1", ns: "db60.coll60" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.181-0400 m31200| 2015-07-09T14:15:44.181-0400 I INDEX [conn30] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.191-0400 m31200| 2015-07-09T14:15:44.191-0400 I INDEX [conn30] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.192-0400 Using 20 threads (requested 20) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.246-0400 m31202| 2015-07-09T14:15:44.245-0400 I INDEX [repl writer worker 12] build index on: db60.coll60 properties: { v: 1, key: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, name: "indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx_1", ns: "db60.coll60" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.246-0400 m31202| 2015-07-09T14:15:44.245-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.268-0400 m31201| 2015-07-09T14:15:44.244-0400 I INDEX [repl writer worker 9] build index on: db60.coll60 properties: { v: 1, key: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, name: "indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx_1", ns: "db60.coll60" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.268-0400 m31201| 2015-07-09T14:15:44.244-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.300-0400 m31202| 2015-07-09T14:15:44.300-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.305-0400 m31201| 2015-07-09T14:15:44.304-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.377-0400 m30998| 2015-07-09T14:15:44.377-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63876 #384 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.379-0400 m30998| 2015-07-09T14:15:44.379-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63877 #385 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.389-0400 m30998| 2015-07-09T14:15:44.389-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63878 #386 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.419-0400 m30998| 2015-07-09T14:15:44.419-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63879 #387 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.424-0400 m30998| 2015-07-09T14:15:44.420-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63880 #388 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.429-0400 m30999| 2015-07-09T14:15:44.428-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63881 #385 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.430-0400 m30998| 2015-07-09T14:15:44.428-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63882 #389 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.432-0400 m30999| 2015-07-09T14:15:44.429-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63883 #386 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.432-0400 m30998| 2015-07-09T14:15:44.430-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63885 #390 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.436-0400 m30999| 2015-07-09T14:15:44.436-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63884 #387 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.437-0400 m30998| 2015-07-09T14:15:44.437-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63890 #391 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.438-0400 m30998| 2015-07-09T14:15:44.438-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63894 #392 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.441-0400 m30998| 2015-07-09T14:15:44.439-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63895 #393 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.444-0400 m30999| 2015-07-09T14:15:44.444-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63886 #388 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.444-0400 m30999| 2015-07-09T14:15:44.444-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63887 #389 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.446-0400 m30999| 2015-07-09T14:15:44.446-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63888 #390 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.446-0400 m30999| 2015-07-09T14:15:44.446-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63889 #391 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.451-0400 m30999| 2015-07-09T14:15:44.451-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63891 #392 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.451-0400 m30999| 2015-07-09T14:15:44.451-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63892 #393 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.452-0400 m30999| 2015-07-09T14:15:44.451-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63893 #394 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.462-0400 setting random seed: 9661018364131 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.462-0400 setting random seed: 6856226013042 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.463-0400 setting random seed: 5928977304138 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.463-0400 setting random seed: 9272393174469 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.464-0400 setting random seed: 1136178243905 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.465-0400 setting random seed: 2590654855594 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.467-0400 setting random seed: 1112155877053 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.467-0400 setting random seed: 8321597049944 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.469-0400 setting random seed: 2735721566714 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.473-0400 setting random seed: 2003834345377 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.473-0400 setting random seed: 4098272207193 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.473-0400 m30998| 2015-07-09T14:15:44.470-0400 I SHARDING [conn386] ChunkManager: time to load chunks for db60.coll60: 0ms sequenceNumber: 71 version: 1|0||559eba4fca4787b9985d1e6e based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.474-0400 setting random seed: 1081683537922 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.481-0400 setting random seed: 2504984377883 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.481-0400 setting random seed: 8443858446553 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.483-0400 setting random seed: 8491348735988 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.483-0400 setting random seed: 9855230329558 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.483-0400 setting random seed: 6213216460309 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.483-0400 setting random seed: 5747754205949 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.484-0400 setting random seed: 1272458690218 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.484-0400 setting random seed: 5529097053222 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.486-0400 m31100| 2015-07-09T14:15:44.486-0400 I SHARDING [conn40] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.486-0400 m31100| 2015-07-09T14:15:44.486-0400 I SHARDING [conn32] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.504-0400 m31100| 2015-07-09T14:15:44.503-0400 I SHARDING [conn40] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.506-0400 m31100| 2015-07-09T14:15:44.504-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.506-0400 m31100| 2015-07-09T14:15:44.505-0400 I SHARDING [conn38] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.507-0400 m31100| 2015-07-09T14:15:44.506-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.509-0400 m31100| 2015-07-09T14:15:44.508-0400 I SHARDING [conn37] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.509-0400 m31100| 2015-07-09T14:15:44.509-0400 I SHARDING [conn34] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.510-0400 m31100| 2015-07-09T14:15:44.509-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.511-0400 m31100| 2015-07-09T14:15:44.510-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.512-0400 m31100| 2015-07-09T14:15:44.511-0400 I SHARDING [conn35] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.513-0400 m31100| 2015-07-09T14:15:44.512-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.513-0400 m31100| 2015-07-09T14:15:44.512-0400 I SHARDING [conn32] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.515-0400 m31100| 2015-07-09T14:15:44.514-0400 I SHARDING [conn32] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.515-0400 m31100| 2015-07-09T14:15:44.514-0400 I SHARDING [conn132] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.517-0400 m31100| 2015-07-09T14:15:44.515-0400 I SHARDING [conn15] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.520-0400 m31100| 2015-07-09T14:15:44.516-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.521-0400 m31100| 2015-07-09T14:15:44.516-0400 I SHARDING [conn132] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.521-0400 m31100| 2015-07-09T14:15:44.516-0400 I SHARDING [conn37] could not acquire lock 'db60.coll60/bs-osx108-8:31100:1436464536:197041335' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.521-0400 m31100| 2015-07-09T14:15:44.516-0400 I SHARDING [conn34] could not acquire lock 'db60.coll60/bs-osx108-8:31100:1436464536:197041335' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.521-0400 m31100| 2015-07-09T14:15:44.516-0400 I SHARDING [conn38] could not acquire lock 'db60.coll60/bs-osx108-8:31100:1436464536:197041335' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.521-0400 m31100| 2015-07-09T14:15:44.516-0400 I SHARDING [conn37] distributed lock 'db60.coll60/bs-osx108-8:31100:1436464536:197041335' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.522-0400 m31100| 2015-07-09T14:15:44.517-0400 I SHARDING [conn34] distributed lock 'db60.coll60/bs-osx108-8:31100:1436464536:197041335' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.522-0400 m31100| 2015-07-09T14:15:44.517-0400 I SHARDING [conn38] distributed lock 'db60.coll60/bs-osx108-8:31100:1436464536:197041335' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.522-0400 m31100| 2015-07-09T14:15:44.517-0400 W SHARDING [conn37] could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db60.coll60 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.522-0400 m31100| 2015-07-09T14:15:44.517-0400 W SHARDING [conn34] could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db60.coll60 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.523-0400 m30999| 2015-07-09T14:15:44.518-0400 W SHARDING [conn392] splitChunk failed - cmd: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.523-0400 m31100| 2015-07-09T14:15:44.517-0400 W SHARDING [conn38] could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db60.coll60 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.525-0400 m30999| 2015-07-09T14:15:44.518-0400 W SHARDING [conn388] splitChunk failed - cmd: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.526-0400 m30999| 2015-07-09T14:15:44.518-0400 W SHARDING [conn386] splitChunk failed - cmd: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.527-0400 m30999| 2015-07-09T14:15:44.518-0400 W SHARDING [conn394] splitChunk failed - cmd: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.527-0400 m31100| 2015-07-09T14:15:44.517-0400 W SHARDING [conn15] could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db60.coll60 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.527-0400 m31100| 2015-07-09T14:15:44.517-0400 I SHARDING [conn35] could not acquire lock 'db60.coll60/bs-osx108-8:31100:1436464536:197041335' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.528-0400 m31100| 2015-07-09T14:15:44.518-0400 W SHARDING [conn132] could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db60.coll60 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.528-0400 m31100| 2015-07-09T14:15:44.518-0400 I SHARDING [conn36] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.528-0400 m31100| 2015-07-09T14:15:44.518-0400 I SHARDING [conn35] distributed lock 'db60.coll60/bs-osx108-8:31100:1436464536:197041335' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.529-0400 m31100| 2015-07-09T14:15:44.519-0400 W SHARDING [conn35] could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db60.coll60 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.550-0400 m30998| 2015-07-09T14:15:44.519-0400 W SHARDING [conn393] splitChunk failed - cmd: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.551-0400 m31100| 2015-07-09T14:15:44.519-0400 I SHARDING [conn32] could not acquire lock 'db60.coll60/bs-osx108-8:31100:1436464536:197041335' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.551-0400 m31100| 2015-07-09T14:15:44.519-0400 I SHARDING [conn36] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.552-0400 m30998| 2015-07-09T14:15:44.519-0400 W SHARDING [conn386] splitChunk failed - cmd: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.552-0400 m31100| 2015-07-09T14:15:44.519-0400 I SHARDING [conn32] distributed lock 'db60.coll60/bs-osx108-8:31100:1436464536:197041335' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.552-0400 m31100| 2015-07-09T14:15:44.520-0400 W SHARDING [conn32] could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db60.coll60 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.552-0400 m31100| 2015-07-09T14:15:44.520-0400 W SHARDING [conn36] could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db60.coll60 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.553-0400 m30998| 2015-07-09T14:15:44.522-0400 W SHARDING [conn388] splitChunk failed - cmd: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.553-0400 m31100| 2015-07-09T14:15:44.528-0400 I SHARDING [conn15] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.553-0400 m31100| 2015-07-09T14:15:44.529-0400 I SHARDING [conn38] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.554-0400 m30998| 2015-07-09T14:15:44.523-0400 W SHARDING [conn390] splitChunk failed - cmd: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.555-0400 m31100| 2015-07-09T14:15:44.529-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.555-0400 m31100| 2015-07-09T14:15:44.530-0400 I SHARDING [conn32] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.555-0400 m31100| 2015-07-09T14:15:44.530-0400 I SHARDING [conn34] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.555-0400 m31100| 2015-07-09T14:15:44.530-0400 I SHARDING [conn36] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.556-0400 m31100| 2015-07-09T14:15:44.530-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.557-0400 m31100| 2015-07-09T14:15:44.531-0400 I SHARDING [conn36] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.557-0400 m31100| 2015-07-09T14:15:44.531-0400 I SHARDING [conn40] distributed lock 'db60.coll60/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eba50792e00bb67274a5e [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.558-0400 m31100| 2015-07-09T14:15:44.532-0400 I SHARDING [conn32] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.558-0400 m31100| 2015-07-09T14:15:44.532-0400 I SHARDING [conn40] remotely refreshing metadata for db60.coll60 based on current shard version 1|0||559eba4fca4787b9985d1e6e, current metadata version is 1|0||559eba4fca4787b9985d1e6e [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.558-0400 m31100| 2015-07-09T14:15:44.532-0400 W SHARDING [conn15] could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db60.coll60 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.559-0400 m31100| 2015-07-09T14:15:44.532-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.559-0400 m31100| 2015-07-09T14:15:44.532-0400 I SHARDING [conn37] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.566-0400 m30999| 2015-07-09T14:15:44.532-0400 W SHARDING [conn393] splitChunk failed - cmd: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.567-0400 m31100| 2015-07-09T14:15:44.534-0400 W SHARDING [conn36] could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db60.coll60 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.568-0400 m30998| 2015-07-09T14:15:44.534-0400 W SHARDING [conn389] splitChunk failed - cmd: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.570-0400 m31100| 2015-07-09T14:15:44.534-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.570-0400 m31100| 2015-07-09T14:15:44.534-0400 W SHARDING [conn38] could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db60.coll60 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.570-0400 m31100| 2015-07-09T14:15:44.534-0400 W SHARDING [conn32] could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db60.coll60 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.572-0400 m30999| 2015-07-09T14:15:44.535-0400 W SHARDING [conn389] splitChunk failed - cmd: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.572-0400 m31100| 2015-07-09T14:15:44.535-0400 W SHARDING [conn34] could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db60.coll60 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.574-0400 m30998| 2015-07-09T14:15:44.535-0400 W SHARDING [conn385] splitChunk failed - cmd: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.575-0400 m31100| 2015-07-09T14:15:44.537-0400 W SHARDING [conn37] could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db60.coll60 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.581-0400 m30999| 2015-07-09T14:15:44.535-0400 W SHARDING [conn388] splitChunk failed - cmd: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.583-0400 m30999| 2015-07-09T14:15:44.537-0400 W SHARDING [conn394] splitChunk failed - cmd: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.583-0400 m31100| 2015-07-09T14:15:44.543-0400 I SHARDING [conn40] metadata of collection db60.coll60 already up to date (shard version : 1|0||559eba4fca4787b9985d1e6e, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.584-0400 m31100| 2015-07-09T14:15:44.544-0400 I SHARDING [conn40] splitChunk accepted at version 1|0||559eba4fca4787b9985d1e6e [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.584-0400 m31100| 2015-07-09T14:15:44.560-0400 I SHARDING [conn34] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.584-0400 m31100| 2015-07-09T14:15:44.560-0400 I SHARDING [conn36] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.584-0400 m31100| 2015-07-09T14:15:44.560-0400 I SHARDING [conn32] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.584-0400 m31100| 2015-07-09T14:15:44.560-0400 I SHARDING [conn38] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.584-0400 m31100| 2015-07-09T14:15:44.561-0400 I SHARDING [conn37] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.585-0400 m31100| 2015-07-09T14:15:44.561-0400 I SHARDING [conn132] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.585-0400 m31100| 2015-07-09T14:15:44.561-0400 I SHARDING [conn35] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.585-0400 m31100| 2015-07-09T14:15:44.562-0400 I SHARDING [conn39] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.588-0400 m31100| 2015-07-09T14:15:44.562-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.590-0400 m31100| 2015-07-09T14:15:44.562-0400 I SHARDING [conn15] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.591-0400 m31100| 2015-07-09T14:15:44.562-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.592-0400 m31100| 2015-07-09T14:15:44.562-0400 I SHARDING [conn36] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.593-0400 m31100| 2015-07-09T14:15:44.563-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.594-0400 m31100| 2015-07-09T14:15:44.563-0400 I SHARDING [conn32] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.604-0400 m31100| 2015-07-09T14:15:44.564-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.605-0400 m31100| 2015-07-09T14:15:44.564-0400 I SHARDING [conn132] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.606-0400 m31100| 2015-07-09T14:15:44.564-0400 I SHARDING [conn39] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.607-0400 m31100| 2015-07-09T14:15:44.564-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.607-0400 m31100| 2015-07-09T14:15:44.574-0400 W SHARDING [conn132] could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db60.coll60 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.607-0400 m31100| 2015-07-09T14:15:44.575-0400 W SHARDING [conn32] could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db60.coll60 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.610-0400 m30998| 2015-07-09T14:15:44.575-0400 W SHARDING [conn393] splitChunk failed - cmd: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.619-0400 m30998| 2015-07-09T14:15:44.575-0400 W SHARDING [conn391] splitChunk failed - cmd: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.688-0400 m31100| 2015-07-09T14:15:44.575-0400 W SHARDING [conn34] could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db60.coll60 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.714-0400 m31100| 2015-07-09T14:15:44.576-0400 W SHARDING [conn15] could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db60.coll60 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.764-0400 m30999| 2015-07-09T14:15:44.576-0400 W SHARDING [conn388] splitChunk failed - cmd: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.812-0400 m30998| 2015-07-09T14:15:44.576-0400 W SHARDING [conn387] splitChunk failed - cmd: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.814-0400 m30999| 2015-07-09T14:15:44.578-0400 W SHARDING [conn393] splitChunk failed - cmd: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.815-0400 m30998| 2015-07-09T14:15:44.578-0400 W SHARDING [conn390] splitChunk failed - cmd: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.816-0400 m30999| 2015-07-09T14:15:44.578-0400 W SHARDING [conn394] splitChunk failed - cmd: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.817-0400 m31100| 2015-07-09T14:15:44.576-0400 W SHARDING [conn36] could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db60.coll60 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.818-0400 m30998| 2015-07-09T14:15:44.579-0400 W SHARDING [conn388] splitChunk failed - cmd: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.818-0400 m31100| 2015-07-09T14:15:44.578-0400 W SHARDING [conn38] could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db60.coll60 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.818-0400 m31100| 2015-07-09T14:15:44.578-0400 W SHARDING [conn39] could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db60.coll60 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.828-0400 m31100| 2015-07-09T14:15:44.578-0400 W SHARDING [conn35] could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db60.coll60 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.830-0400 m31100| 2015-07-09T14:15:44.579-0400 W SHARDING [conn37] could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db60.coll60 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.831-0400 m30999| 2015-07-09T14:15:44.579-0400 W SHARDING [conn392] splitChunk failed - cmd: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.831-0400 m30998| 2015-07-09T14:15:44.582-0400 I SHARDING [conn393] ChunkManager: time to load chunks for db60.coll60: 0ms sequenceNumber: 72 version: 1|3||559eba4fca4787b9985d1e6e based on: 1|0||559eba4fca4787b9985d1e6e [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.832-0400 m31100| 2015-07-09T14:15:44.585-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:44.585-0400-559eba50792e00bb67274a64", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436465744585), what: "multi-split", ns: "db60.coll60", details: { before: { min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey } }, number: 1, of: 3, chunk: { min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, lastmod: Timestamp 1000|1, lastmodEpoch: ObjectId('559eba4fca4787b9985d1e6e') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.832-0400 m31100| 2015-07-09T14:15:44.597-0400 I SHARDING [conn38] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.832-0400 m31100| 2015-07-09T14:15:44.598-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.832-0400 m31100| 2015-07-09T14:15:44.598-0400 I SHARDING [conn37] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.832-0400 m31100| 2015-07-09T14:15:44.598-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.833-0400 m31100| 2015-07-09T14:15:44.598-0400 I SHARDING [conn15] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.834-0400 m31100| 2015-07-09T14:15:44.598-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.834-0400 m31100| 2015-07-09T14:15:44.600-0400 I SHARDING [conn34] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.835-0400 m31100| 2015-07-09T14:15:44.608-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.838-0400 m31100| 2015-07-09T14:15:44.610-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.842-0400 m31100| 2015-07-09T14:15:44.611-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.842-0400 m31100| 2015-07-09T14:15:44.612-0400 W SHARDING [conn15] could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db60.coll60 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.844-0400 m30999| 2015-07-09T14:15:44.613-0400 W SHARDING [conn386] splitChunk failed - cmd: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.844-0400 m31100| 2015-07-09T14:15:44.614-0400 W SHARDING [conn38] could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db60.coll60 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.845-0400 m31100| 2015-07-09T14:15:44.614-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.845-0400 m31100| 2015-07-09T14:15:44.614-0400 W SHARDING [conn37] could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db60.coll60 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.847-0400 m30999| 2015-07-09T14:15:44.615-0400 W SHARDING [conn389] splitChunk failed - cmd: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.848-0400 m30999| 2015-07-09T14:15:44.615-0400 W SHARDING [conn387] splitChunk failed - cmd: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.859-0400 m31100| 2015-07-09T14:15:44.617-0400 W SHARDING [conn34] could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db60.coll60 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.870-0400 m30999| 2015-07-09T14:15:44.618-0400 W SHARDING [conn385] splitChunk failed - cmd: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.870-0400 m31100| 2015-07-09T14:15:44.621-0400 I SHARDING [conn34] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.871-0400 m31100| 2015-07-09T14:15:44.621-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.871-0400 m31100| 2015-07-09T14:15:44.621-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.872-0400 m31100| 2015-07-09T14:15:44.623-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.872-0400 m31100| 2015-07-09T14:15:44.626-0400 W SHARDING [conn34] could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db60.coll60 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.881-0400 m30999| 2015-07-09T14:15:44.627-0400 W SHARDING [conn392] splitChunk failed - cmd: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.881-0400 m31100| 2015-07-09T14:15:44.633-0400 I SHARDING [conn34] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.881-0400 m31100| 2015-07-09T14:15:44.633-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.881-0400 m31100| 2015-07-09T14:15:44.633-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.882-0400 m31100| 2015-07-09T14:15:44.633-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.883-0400 m31100| 2015-07-09T14:15:44.633-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.883-0400 m31100| 2015-07-09T14:15:44.633-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.895-0400 m31100| 2015-07-09T14:15:44.634-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.895-0400 m31100| 2015-07-09T14:15:44.635-0400 I SHARDING [conn37] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.895-0400 m31100| 2015-07-09T14:15:44.635-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.895-0400 m31100| 2015-07-09T14:15:44.635-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.895-0400 m31100| 2015-07-09T14:15:44.635-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.896-0400 m31100| 2015-07-09T14:15:44.635-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.896-0400 m31100| 2015-07-09T14:15:44.635-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.896-0400 m31100| 2015-07-09T14:15:44.635-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.896-0400 m31100| 2015-07-09T14:15:44.635-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.896-0400 m31100| 2015-07-09T14:15:44.635-0400 I SHARDING [conn38] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.897-0400 m31100| 2015-07-09T14:15:44.636-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.897-0400 m31100| 2015-07-09T14:15:44.636-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.897-0400 m31100| 2015-07-09T14:15:44.636-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.897-0400 m31100| 2015-07-09T14:15:44.636-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.899-0400 m31100| 2015-07-09T14:15:44.636-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.899-0400 m31100| 2015-07-09T14:15:44.636-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.899-0400 m31100| 2015-07-09T14:15:44.636-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.899-0400 m31100| 2015-07-09T14:15:44.637-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.899-0400 m31100| 2015-07-09T14:15:44.637-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.900-0400 m31100| 2015-07-09T14:15:44.637-0400 W SHARDING [conn34] could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db60.coll60 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.900-0400 m31100| 2015-07-09T14:15:44.637-0400 I SHARDING [conn15] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.900-0400 m31100| 2015-07-09T14:15:44.637-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.900-0400 m31100| 2015-07-09T14:15:44.637-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.900-0400 m31100| 2015-07-09T14:15:44.637-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.901-0400 m31100| 2015-07-09T14:15:44.637-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.903-0400 m31100| 2015-07-09T14:15:44.637-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.904-0400 m31100| 2015-07-09T14:15:44.638-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.905-0400 m31100| 2015-07-09T14:15:44.638-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.905-0400 m31100| 2015-07-09T14:15:44.638-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.906-0400 m30999| 2015-07-09T14:15:44.637-0400 W SHARDING [conn388] splitChunk failed - cmd: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.907-0400 m31100| 2015-07-09T14:15:44.638-0400 W SHARDING [conn37] could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db60.coll60 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.908-0400 m30999| 2015-07-09T14:15:44.638-0400 W SHARDING [conn386] splitChunk failed - cmd: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.910-0400 m31100| 2015-07-09T14:15:44.639-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.912-0400 m31100| 2015-07-09T14:15:44.639-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.915-0400 m31100| 2015-07-09T14:15:44.642-0400 W SHARDING [conn15] could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db60.coll60 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.915-0400 m31100| 2015-07-09T14:15:44.642-0400 W SHARDING [conn38] could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db60.coll60 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.917-0400 m30999| 2015-07-09T14:15:44.642-0400 W SHARDING [conn393] splitChunk failed - cmd: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:44.917-0400 m31100| 2015-07-09T14:15:44.647-0400 I SHARDING [conn38] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.018-0400 m30999| 2015-07-09T14:15:44.642-0400 W SHARDING [conn389] splitChunk failed - cmd: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.018-0400 m31100| 2015-07-09T14:15:44.647-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.020-0400 m30999| 2015-07-09T14:15:44.653-0400 W SHARDING [conn392] splitChunk failed - cmd: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.026-0400 m31100| 2015-07-09T14:15:44.647-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.028-0400 m30999| 2015-07-09T14:15:44.657-0400 W SHARDING [conn390] splitChunk failed - cmd: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.028-0400 m31100| 2015-07-09T14:15:44.647-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.028-0400 m31100| 2015-07-09T14:15:44.647-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.028-0400 m31100| 2015-07-09T14:15:44.647-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.030-0400 m30999| 2015-07-09T14:15:44.659-0400 W SHARDING [conn388] splitChunk failed - cmd: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.030-0400 m31100| 2015-07-09T14:15:44.647-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.032-0400 m30999| 2015-07-09T14:15:44.666-0400 W SHARDING [conn389] splitChunk failed - cmd: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.033-0400 m31100| 2015-07-09T14:15:44.647-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.035-0400 m30999| 2015-07-09T14:15:44.668-0400 W SHARDING [conn387] splitChunk failed - cmd: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.035-0400 m31100| 2015-07-09T14:15:44.647-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.037-0400 m30999| 2015-07-09T14:15:44.670-0400 W SHARDING [conn386] splitChunk failed - cmd: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.040-0400 m31100| 2015-07-09T14:15:44.648-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:44.648-0400-559eba50792e00bb67274a65", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436465744648), what: "multi-split", ns: "db60.coll60", details: { before: { min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey } }, number: 2, of: 3, chunk: { min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, lastmod: Timestamp 1000|2, lastmodEpoch: ObjectId('559eba4fca4787b9985d1e6e') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.042-0400 m30999| 2015-07-09T14:15:44.678-0400 W SHARDING [conn385] splitChunk failed - cmd: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.042-0400 m31100| 2015-07-09T14:15:44.649-0400 I SHARDING [conn15] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.044-0400 m30999| 2015-07-09T14:15:44.678-0400 W SHARDING [conn388] splitChunk failed - cmd: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.053-0400 m31100| 2015-07-09T14:15:44.649-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.055-0400 m30999| 2015-07-09T14:15:44.683-0400 W SHARDING [conn393] splitChunk failed - cmd: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.056-0400 m31100| 2015-07-09T14:15:44.649-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.056-0400 m31100| 2015-07-09T14:15:44.650-0400 I SHARDING [conn38] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.056-0400 m31100| 2015-07-09T14:15:44.651-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.058-0400 m30999| 2015-07-09T14:15:44.767-0400 W SHARDING [conn389] splitChunk failed - cmd: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.058-0400 m31100| 2015-07-09T14:15:44.651-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.060-0400 m30999| 2015-07-09T14:15:44.769-0400 W SHARDING [conn386] splitChunk failed - cmd: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.060-0400 m31100| 2015-07-09T14:15:44.651-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.062-0400 m30999| 2015-07-09T14:15:44.773-0400 W SHARDING [conn392] splitChunk failed - cmd: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.062-0400 m31100| 2015-07-09T14:15:44.652-0400 W SHARDING [conn37] could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db60.coll60 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.062-0400 m30998| 2015-07-09T14:15:44.893-0400 I NETWORK [conn386] end connection 127.0.0.1:63878 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.069-0400 m30999| 2015-07-09T14:15:44.773-0400 W SHARDING [conn387] splitChunk failed - cmd: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.070-0400 m31100| 2015-07-09T14:15:44.653-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.073-0400 m30999| 2015-07-09T14:15:44.784-0400 W SHARDING [conn390] splitChunk failed - cmd: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.073-0400 m31100| 2015-07-09T14:15:44.653-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.077-0400 m30999| 2015-07-09T14:15:44.790-0400 W SHARDING [conn394] splitChunk failed - cmd: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.078-0400 m31100| 2015-07-09T14:15:44.653-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.081-0400 m30999| 2015-07-09T14:15:44.793-0400 W SHARDING [conn386] splitChunk failed - cmd: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.082-0400 m31100| 2015-07-09T14:15:44.653-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.093-0400 m30999| 2015-07-09T14:15:44.797-0400 W SHARDING [conn388] splitChunk failed - cmd: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.093-0400 m31100| 2015-07-09T14:15:44.653-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.096-0400 m30999| 2015-07-09T14:15:44.802-0400 W SHARDING [conn393] splitChunk failed - cmd: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.096-0400 m31100| 2015-07-09T14:15:44.653-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.097-0400 m31100| 2015-07-09T14:15:44.653-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.097-0400 m31100| 2015-07-09T14:15:44.653-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.097-0400 m31100| 2015-07-09T14:15:44.654-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.098-0400 m31100| 2015-07-09T14:15:44.654-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.098-0400 m31100| 2015-07-09T14:15:44.654-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.098-0400 m31100| 2015-07-09T14:15:44.654-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.099-0400 m31100| 2015-07-09T14:15:44.654-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.103-0400 m31100| 2015-07-09T14:15:44.654-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.109-0400 m31100| 2015-07-09T14:15:44.654-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.109-0400 m30998| 2015-07-09T14:15:44.935-0400 I NETWORK [conn389] end connection 127.0.0.1:63882 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.109-0400 m30999| 2015-07-09T14:15:44.949-0400 I NETWORK [conn385] end connection 127.0.0.1:63881 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.120-0400 m31100| 2015-07-09T14:15:44.655-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.120-0400 m30998| 2015-07-09T14:15:44.940-0400 I NETWORK [conn384] end connection 127.0.0.1:63876 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.120-0400 m30999| 2015-07-09T14:15:44.970-0400 I NETWORK [conn388] end connection 127.0.0.1:63886 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.121-0400 m30998| 2015-07-09T14:15:44.941-0400 I NETWORK [conn387] end connection 127.0.0.1:63879 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.123-0400 m31100| 2015-07-09T14:15:44.656-0400 W SHARDING [conn38] could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db60.coll60 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.125-0400 m30998| 2015-07-09T14:15:44.959-0400 I NETWORK [conn385] end connection 127.0.0.1:63877 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.126-0400 m31100| 2015-07-09T14:15:44.658-0400 W SHARDING [conn15] could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db60.coll60 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.126-0400 m30998| 2015-07-09T14:15:44.959-0400 I NETWORK [conn392] end connection 127.0.0.1:63894 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.126-0400 m31100| 2015-07-09T14:15:44.661-0400 I SHARDING [conn15] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.126-0400 m30998| 2015-07-09T14:15:45.012-0400 I NETWORK [conn390] end connection 127.0.0.1:63885 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.128-0400 m30998| 2015-07-09T14:15:45.014-0400 I NETWORK [conn388] end connection 127.0.0.1:63880 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.128-0400 m30998| 2015-07-09T14:15:45.029-0400 I NETWORK [conn393] end connection 127.0.0.1:63895 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.129-0400 m31100| 2015-07-09T14:15:44.661-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.132-0400 m31100| 2015-07-09T14:15:44.661-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.136-0400 m31100| 2015-07-09T14:15:44.661-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.136-0400 m31100| 2015-07-09T14:15:44.661-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.136-0400 m31100| 2015-07-09T14:15:44.661-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.137-0400 m31100| 2015-07-09T14:15:44.662-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.137-0400 m31100| 2015-07-09T14:15:44.662-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.139-0400 m31100| 2015-07-09T14:15:44.662-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.140-0400 m31100| 2015-07-09T14:15:44.662-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.142-0400 m31100| 2015-07-09T14:15:44.662-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.143-0400 m31100| 2015-07-09T14:15:44.662-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.144-0400 m31100| 2015-07-09T14:15:44.662-0400 I SHARDING [conn38] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.145-0400 m31100| 2015-07-09T14:15:44.663-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.147-0400 m31100| 2015-07-09T14:15:44.663-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.150-0400 m31100| 2015-07-09T14:15:44.663-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.151-0400 m31100| 2015-07-09T14:15:44.663-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.151-0400 m31100| 2015-07-09T14:15:44.663-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.152-0400 m31100| 2015-07-09T14:15:44.663-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.153-0400 m31100| 2015-07-09T14:15:44.663-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.157-0400 m31100| 2015-07-09T14:15:44.663-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.158-0400 m31100| 2015-07-09T14:15:44.663-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.161-0400 m31100| 2015-07-09T14:15:44.663-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.169-0400 m31100| 2015-07-09T14:15:44.663-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.190-0400 m31100| 2015-07-09T14:15:44.664-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.191-0400 m31100| 2015-07-09T14:15:44.664-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.191-0400 m31100| 2015-07-09T14:15:44.666-0400 W SHARDING [conn15] could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db60.coll60 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.192-0400 m31100| 2015-07-09T14:15:44.667-0400 I SHARDING [conn37] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.192-0400 m31100| 2015-07-09T14:15:44.667-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.193-0400 m31100| 2015-07-09T14:15:44.667-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.194-0400 m31100| 2015-07-09T14:15:44.667-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.194-0400 m31100| 2015-07-09T14:15:44.667-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.194-0400 m31100| 2015-07-09T14:15:44.667-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.195-0400 m31100| 2015-07-09T14:15:44.667-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.195-0400 m31100| 2015-07-09T14:15:44.667-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.196-0400 m31100| 2015-07-09T14:15:44.667-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.196-0400 m31100| 2015-07-09T14:15:44.667-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.196-0400 m31100| 2015-07-09T14:15:44.667-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.196-0400 m31100| 2015-07-09T14:15:44.667-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.197-0400 m31100| 2015-07-09T14:15:44.667-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.197-0400 m31100| 2015-07-09T14:15:44.667-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.197-0400 m31100| 2015-07-09T14:15:44.667-0400 W SHARDING [conn38] could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db60.coll60 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.199-0400 m31100| 2015-07-09T14:15:44.668-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.199-0400 m30999| 2015-07-09T14:15:45.038-0400 I NETWORK [conn390] end connection 127.0.0.1:63888 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.199-0400 m31100| 2015-07-09T14:15:44.670-0400 W SHARDING [conn37] could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db60.coll60 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.200-0400 m30999| 2015-07-09T14:15:45.046-0400 I NETWORK [conn386] end connection 127.0.0.1:63883 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.200-0400 m31100| 2015-07-09T14:15:44.673-0400 I SHARDING [conn37] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.200-0400 m31100| 2015-07-09T14:15:44.673-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.200-0400 m31100| 2015-07-09T14:15:44.673-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.200-0400 m31100| 2015-07-09T14:15:44.673-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.201-0400 m31100| 2015-07-09T14:15:44.674-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.201-0400 m31100| 2015-07-09T14:15:44.674-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.201-0400 m31100| 2015-07-09T14:15:44.674-0400 I SHARDING [conn38] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.201-0400 m31100| 2015-07-09T14:15:44.674-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.202-0400 m31100| 2015-07-09T14:15:44.674-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.202-0400 m31100| 2015-07-09T14:15:44.674-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.202-0400 m31100| 2015-07-09T14:15:44.674-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.203-0400 m31100| 2015-07-09T14:15:44.674-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.203-0400 m31100| 2015-07-09T14:15:44.674-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.203-0400 m31100| 2015-07-09T14:15:44.674-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.204-0400 m31100| 2015-07-09T14:15:44.674-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.204-0400 m31100| 2015-07-09T14:15:44.675-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.204-0400 m31100| 2015-07-09T14:15:44.675-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.205-0400 m31100| 2015-07-09T14:15:44.675-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.205-0400 m31100| 2015-07-09T14:15:44.675-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.205-0400 m31100| 2015-07-09T14:15:44.675-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.206-0400 m31100| 2015-07-09T14:15:44.675-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.206-0400 m31100| 2015-07-09T14:15:44.675-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.206-0400 m31100| 2015-07-09T14:15:44.675-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.207-0400 m31100| 2015-07-09T14:15:44.675-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.207-0400 m31100| 2015-07-09T14:15:44.675-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.207-0400 m31100| 2015-07-09T14:15:44.675-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.207-0400 m31100| 2015-07-09T14:15:44.675-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.207-0400 m31100| 2015-07-09T14:15:44.675-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.213-0400 m31100| 2015-07-09T14:15:44.675-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.213-0400 m30998| 2015-07-09T14:15:45.051-0400 I NETWORK [conn391] end connection 127.0.0.1:63890 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.213-0400 m31100| 2015-07-09T14:15:44.676-0400 I SHARDING [conn15] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.216-0400 m31100| 2015-07-09T14:15:44.676-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.217-0400 m31100| 2015-07-09T14:15:44.676-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.217-0400 m31100| 2015-07-09T14:15:44.676-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.218-0400 m31100| 2015-07-09T14:15:44.677-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.218-0400 m31100| 2015-07-09T14:15:44.677-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.218-0400 m31100| 2015-07-09T14:15:44.677-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.219-0400 m31100| 2015-07-09T14:15:44.677-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.219-0400 m31100| 2015-07-09T14:15:44.677-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.219-0400 m31100| 2015-07-09T14:15:44.677-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.220-0400 m31100| 2015-07-09T14:15:44.677-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.220-0400 m31100| 2015-07-09T14:15:44.677-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.229-0400 m31100| 2015-07-09T14:15:44.677-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.230-0400 m31100| 2015-07-09T14:15:44.677-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.230-0400 m31100| 2015-07-09T14:15:44.677-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.230-0400 m31100| 2015-07-09T14:15:44.677-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.231-0400 m31100| 2015-07-09T14:15:44.677-0400 W SHARDING [conn38] could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db60.coll60 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.231-0400 m31100| 2015-07-09T14:15:44.678-0400 W SHARDING [conn37] could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db60.coll60 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.233-0400 m31100| 2015-07-09T14:15:44.680-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.233-0400 m31100| 2015-07-09T14:15:44.683-0400 W SHARDING [conn15] could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db60.coll60 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.233-0400 m31100| 2015-07-09T14:15:44.689-0400 I SHARDING [conn15] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.233-0400 m31100| 2015-07-09T14:15:44.689-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.233-0400 m31100| 2015-07-09T14:15:44.689-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.234-0400 m31100| 2015-07-09T14:15:44.689-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.235-0400 m31100| 2015-07-09T14:15:44.690-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.235-0400 m31100| 2015-07-09T14:15:44.690-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.235-0400 m31100| 2015-07-09T14:15:44.690-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.236-0400 m31100| 2015-07-09T14:15:44.690-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.236-0400 m31100| 2015-07-09T14:15:44.690-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.237-0400 m31100| 2015-07-09T14:15:44.690-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.237-0400 m31100| 2015-07-09T14:15:44.690-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.237-0400 m31100| 2015-07-09T14:15:44.690-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.237-0400 m31100| 2015-07-09T14:15:44.690-0400 I SHARDING [conn38] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.238-0400 m31100| 2015-07-09T14:15:44.690-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.238-0400 m31100| 2015-07-09T14:15:44.690-0400 I SHARDING [conn37] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.238-0400 m31100| 2015-07-09T14:15:44.690-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.238-0400 m31100| 2015-07-09T14:15:44.690-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.239-0400 m31100| 2015-07-09T14:15:44.691-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.239-0400 m31100| 2015-07-09T14:15:44.691-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.239-0400 m31100| 2015-07-09T14:15:44.691-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.240-0400 m31100| 2015-07-09T14:15:44.691-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.240-0400 m31100| 2015-07-09T14:15:44.691-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.240-0400 m31100| 2015-07-09T14:15:44.691-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.241-0400 m31100| 2015-07-09T14:15:44.692-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.241-0400 m31100| 2015-07-09T14:15:44.692-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.241-0400 m31100| 2015-07-09T14:15:44.692-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.242-0400 m31100| 2015-07-09T14:15:44.692-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.242-0400 m31100| 2015-07-09T14:15:44.692-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.242-0400 m31100| 2015-07-09T14:15:44.692-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.243-0400 m31100| 2015-07-09T14:15:44.692-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.243-0400 m31100| 2015-07-09T14:15:44.692-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.244-0400 m31100| 2015-07-09T14:15:44.692-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.244-0400 m31100| 2015-07-09T14:15:44.692-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.245-0400 m31100| 2015-07-09T14:15:44.693-0400 I SHARDING [conn34] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.245-0400 m31100| 2015-07-09T14:15:44.693-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.245-0400 m31100| 2015-07-09T14:15:44.693-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.246-0400 m31100| 2015-07-09T14:15:44.693-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.246-0400 m31100| 2015-07-09T14:15:44.694-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.247-0400 m31100| 2015-07-09T14:15:44.694-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.247-0400 m31100| 2015-07-09T14:15:44.694-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.251-0400 m31100| 2015-07-09T14:15:44.694-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.251-0400 m30999| 2015-07-09T14:15:45.076-0400 I NETWORK [conn387] end connection 127.0.0.1:63884 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.251-0400 m31100| 2015-07-09T14:15:44.694-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.251-0400 m31100| 2015-07-09T14:15:44.694-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.251-0400 m31100| 2015-07-09T14:15:44.695-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.252-0400 m31100| 2015-07-09T14:15:44.695-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.252-0400 m31100| 2015-07-09T14:15:44.695-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.253-0400 m31100| 2015-07-09T14:15:44.695-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.253-0400 m31100| 2015-07-09T14:15:44.695-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.253-0400 m31100| 2015-07-09T14:15:44.695-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.254-0400 m31100| 2015-07-09T14:15:44.695-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.254-0400 m31100| 2015-07-09T14:15:44.695-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.254-0400 m31100| 2015-07-09T14:15:44.695-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.255-0400 m31100| 2015-07-09T14:15:44.696-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.255-0400 m31100| 2015-07-09T14:15:44.696-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.255-0400 m31100| 2015-07-09T14:15:44.696-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.256-0400 m31100| 2015-07-09T14:15:44.696-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.256-0400 m31100| 2015-07-09T14:15:44.696-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.256-0400 m31100| 2015-07-09T14:15:44.696-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.259-0400 m31100| 2015-07-09T14:15:44.698-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.261-0400 m30999| 2015-07-09T14:15:45.083-0400 W SHARDING [conn392] splitChunk failed - cmd: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.262-0400 m31100| 2015-07-09T14:15:44.698-0400 W SHARDING [conn15] could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db60.coll60 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.268-0400 m30999| 2015-07-09T14:15:45.084-0400 W SHARDING [conn394] splitChunk failed - cmd: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.270-0400 m31100| 2015-07-09T14:15:44.708-0400 I SHARDING [conn40] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:44.708-0400-559eba50792e00bb67274a66", server: "bs-osx108-8", clientAddr: "127.0.0.1:62642", time: new Date(1436465744708), what: "multi-split", ns: "db60.coll60", details: { before: { min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey } }, number: 3, of: 3, chunk: { min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, lastmod: Timestamp 1000|3, lastmodEpoch: ObjectId('559eba4fca4787b9985d1e6e') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.270-0400 m30999| 2015-07-09T14:15:45.086-0400 I SHARDING [conn391] ChunkManager: time to load chunks for db60.coll60: 0ms sequenceNumber: 267 version: 1|3||559eba4fca4787b9985d1e6e based on: 1|0||559eba4fca4787b9985d1e6e [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.271-0400 m31100| 2015-07-09T14:15:44.766-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.272-0400 m30999| 2015-07-09T14:15:45.086-0400 I SHARDING [conn391] autosplitted db60.coll60 shard: ns: db60.coll60, shard: test-rs0, lastmod: 1|0||000000000000000000000000, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey } into 3 (splitThreshold 921) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.272-0400 m31100| 2015-07-09T14:15:44.766-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.274-0400 m30999| 2015-07-09T14:15:45.093-0400 W SHARDING [conn389] splitChunk failed - cmd: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.275-0400 m31100| 2015-07-09T14:15:44.768-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.276-0400 m30999| 2015-07-09T14:15:45.099-0400 W SHARDING [conn393] splitChunk failed - cmd: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.277-0400 m31100| 2015-07-09T14:15:44.768-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.277-0400 m31100| 2015-07-09T14:15:44.769-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.278-0400 m31100| 2015-07-09T14:15:44.769-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.279-0400 m31100| 2015-07-09T14:15:44.769-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.280-0400 m31100| 2015-07-09T14:15:44.769-0400 W SHARDING [conn37] could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db60.coll60 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.284-0400 m31100| 2015-07-09T14:15:44.770-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.284-0400 m30999| 2015-07-09T14:15:45.165-0400 I NETWORK [conn394] end connection 127.0.0.1:63893 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.288-0400 m31100| 2015-07-09T14:15:44.770-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.288-0400 m30999| 2015-07-09T14:15:45.182-0400 I NETWORK [conn392] end connection 127.0.0.1:63891 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.289-0400 m31100| 2015-07-09T14:15:44.772-0400 W SHARDING [conn38] could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db60.coll60 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.289-0400 m31100| 2015-07-09T14:15:44.773-0400 W SHARDING [conn34] could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db60.coll60 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.289-0400 m31100| 2015-07-09T14:15:44.778-0400 I SHARDING [conn34] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.290-0400 m31100| 2015-07-09T14:15:44.778-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.290-0400 m31100| 2015-07-09T14:15:44.778-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.290-0400 m31100| 2015-07-09T14:15:44.778-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.290-0400 m30999| 2015-07-09T14:15:45.192-0400 I NETWORK [conn393] end connection 127.0.0.1:63892 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.291-0400 m31100| 2015-07-09T14:15:44.779-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.292-0400 m31100| 2015-07-09T14:15:44.779-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.292-0400 m31100| 2015-07-09T14:15:44.779-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.293-0400 m31100| 2015-07-09T14:15:44.779-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.293-0400 m31100| 2015-07-09T14:15:44.779-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.293-0400 m31100| 2015-07-09T14:15:44.779-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.294-0400 m31100| 2015-07-09T14:15:44.779-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.294-0400 m31100| 2015-07-09T14:15:44.779-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.294-0400 m31100| 2015-07-09T14:15:44.779-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.295-0400 m31100| 2015-07-09T14:15:44.779-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.295-0400 m31100| 2015-07-09T14:15:44.779-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.295-0400 m31100| 2015-07-09T14:15:44.779-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.296-0400 m31100| 2015-07-09T14:15:44.779-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.296-0400 m31100| 2015-07-09T14:15:44.779-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.296-0400 m31100| 2015-07-09T14:15:44.779-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.297-0400 m31100| 2015-07-09T14:15:44.779-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.301-0400 m31100| 2015-07-09T14:15:44.781-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.302-0400 m31100| 2015-07-09T14:15:44.783-0400 I SHARDING [conn38] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.302-0400 m31100| 2015-07-09T14:15:44.783-0400 W SHARDING [conn34] could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db60.coll60 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.303-0400 m31100| 2015-07-09T14:15:44.784-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.303-0400 m31100| 2015-07-09T14:15:44.784-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.303-0400 m31100| 2015-07-09T14:15:44.784-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.304-0400 m31100| 2015-07-09T14:15:44.784-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.304-0400 m31100| 2015-07-09T14:15:44.784-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.305-0400 m31100| 2015-07-09T14:15:44.784-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.305-0400 m31100| 2015-07-09T14:15:44.784-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.306-0400 m31100| 2015-07-09T14:15:44.784-0400 I SHARDING [conn37] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.306-0400 m31100| 2015-07-09T14:15:44.784-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.307-0400 m31100| 2015-07-09T14:15:44.784-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.309-0400 m31100| 2015-07-09T14:15:44.784-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.309-0400 m31100| 2015-07-09T14:15:44.785-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.309-0400 m31100| 2015-07-09T14:15:44.785-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.310-0400 m31100| 2015-07-09T14:15:44.785-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.311-0400 m31100| 2015-07-09T14:15:44.785-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.311-0400 m31100| 2015-07-09T14:15:44.785-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.311-0400 m31100| 2015-07-09T14:15:44.785-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.312-0400 m31100| 2015-07-09T14:15:44.785-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.312-0400 m30999| 2015-07-09T14:15:45.207-0400 I NETWORK [conn389] end connection 127.0.0.1:63887 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.312-0400 m31100| 2015-07-09T14:15:44.785-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.313-0400 m31100| 2015-07-09T14:15:44.785-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.313-0400 m31100| 2015-07-09T14:15:44.786-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.313-0400 m31100| 2015-07-09T14:15:44.786-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.313-0400 m31100| 2015-07-09T14:15:44.786-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.313-0400 m31100| 2015-07-09T14:15:44.786-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.313-0400 m31100| 2015-07-09T14:15:44.786-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.314-0400 m31100| 2015-07-09T14:15:44.786-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.314-0400 m31100| 2015-07-09T14:15:44.786-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.314-0400 m31100| 2015-07-09T14:15:44.786-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.314-0400 m31100| 2015-07-09T14:15:44.786-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.315-0400 m31100| 2015-07-09T14:15:44.786-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.315-0400 m31100| 2015-07-09T14:15:44.786-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.315-0400 m31100| 2015-07-09T14:15:44.786-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.315-0400 m31100| 2015-07-09T14:15:44.786-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.315-0400 m31100| 2015-07-09T14:15:44.786-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.316-0400 m31100| 2015-07-09T14:15:44.787-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.316-0400 m31100| 2015-07-09T14:15:44.787-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.316-0400 m31100| 2015-07-09T14:15:44.787-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.316-0400 m31100| 2015-07-09T14:15:44.787-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.316-0400 m31100| 2015-07-09T14:15:44.787-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.316-0400 m31100| 2015-07-09T14:15:44.788-0400 I SHARDING [conn34] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.317-0400 m31100| 2015-07-09T14:15:44.788-0400 I SHARDING [conn15] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.318-0400 m31100| 2015-07-09T14:15:44.788-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.318-0400 m31100| 2015-07-09T14:15:44.789-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.319-0400 m31100| 2015-07-09T14:15:44.789-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.319-0400 m31100| 2015-07-09T14:15:44.789-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.319-0400 m31100| 2015-07-09T14:15:44.789-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.319-0400 m31100| 2015-07-09T14:15:44.789-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.319-0400 m31100| 2015-07-09T14:15:44.789-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.319-0400 m31100| 2015-07-09T14:15:44.789-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.320-0400 m31100| 2015-07-09T14:15:44.789-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.320-0400 m31100| 2015-07-09T14:15:44.789-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.320-0400 m31100| 2015-07-09T14:15:44.789-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.320-0400 m31100| 2015-07-09T14:15:44.789-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.320-0400 m31100| 2015-07-09T14:15:44.789-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.321-0400 m31100| 2015-07-09T14:15:44.789-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.322-0400 m31100| 2015-07-09T14:15:44.789-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.322-0400 m31100| 2015-07-09T14:15:44.789-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.323-0400 m31100| 2015-07-09T14:15:44.789-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.323-0400 m31100| 2015-07-09T14:15:44.790-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.323-0400 m31100| 2015-07-09T14:15:44.790-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.323-0400 m31100| 2015-07-09T14:15:44.790-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.323-0400 m31100| 2015-07-09T14:15:44.790-0400 W SHARDING [conn38] could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db60.coll60 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.324-0400 m31100| 2015-07-09T14:15:44.790-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.324-0400 m31100| 2015-07-09T14:15:44.790-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.324-0400 m31100| 2015-07-09T14:15:44.791-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.324-0400 m31100| 2015-07-09T14:15:44.791-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.324-0400 m31100| 2015-07-09T14:15:44.791-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.325-0400 m31100| 2015-07-09T14:15:44.791-0400 W SHARDING [conn37] could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db60.coll60 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.325-0400 m31100| 2015-07-09T14:15:44.792-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.325-0400 m31100| 2015-07-09T14:15:44.792-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.325-0400 m31100| 2015-07-09T14:15:44.793-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.325-0400 m31100| 2015-07-09T14:15:44.793-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.325-0400 m31100| 2015-07-09T14:15:44.793-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.326-0400 m31100| 2015-07-09T14:15:44.793-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.326-0400 m31100| 2015-07-09T14:15:44.793-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.326-0400 m31100| 2015-07-09T14:15:44.794-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.326-0400 m31100| 2015-07-09T14:15:44.794-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.327-0400 m31100| 2015-07-09T14:15:44.794-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.327-0400 m31100| 2015-07-09T14:15:44.794-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.327-0400 m31100| 2015-07-09T14:15:44.794-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.327-0400 m31100| 2015-07-09T14:15:44.794-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.327-0400 m31100| 2015-07-09T14:15:44.794-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.329-0400 m31100| 2015-07-09T14:15:44.794-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.330-0400 m31100| 2015-07-09T14:15:44.794-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.330-0400 m31100| 2015-07-09T14:15:44.797-0400 W SHARDING [conn34] could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db60.coll60 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.332-0400 m31100| 2015-07-09T14:15:44.799-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.333-0400 m30999| 2015-07-09T14:15:45.250-0400 I NETWORK [conn391] end connection 127.0.0.1:63889 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.333-0400 m31100| 2015-07-09T14:15:44.801-0400 I SHARDING [conn37] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.333-0400 m31100| 2015-07-09T14:15:44.801-0400 W SHARDING [conn15] could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db60.coll60 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.334-0400 m31100| 2015-07-09T14:15:44.802-0400 I SHARDING [conn34] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.334-0400 m31100| 2015-07-09T14:15:44.803-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.334-0400 m31100| 2015-07-09T14:15:44.803-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.334-0400 m31100| 2015-07-09T14:15:44.803-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.335-0400 m31100| 2015-07-09T14:15:44.803-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.335-0400 m31100| 2015-07-09T14:15:44.803-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.335-0400 m31100| 2015-07-09T14:15:44.803-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.336-0400 m31100| 2015-07-09T14:15:44.803-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.336-0400 m31100| 2015-07-09T14:15:44.803-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.336-0400 m31100| 2015-07-09T14:15:44.803-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.337-0400 m31100| 2015-07-09T14:15:44.803-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.337-0400 m31100| 2015-07-09T14:15:44.803-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.337-0400 m31100| 2015-07-09T14:15:44.803-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.339-0400 m31100| 2015-07-09T14:15:44.803-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.339-0400 m31100| 2015-07-09T14:15:44.803-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.339-0400 m31100| 2015-07-09T14:15:44.803-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.339-0400 m31100| 2015-07-09T14:15:44.803-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.339-0400 m31100| 2015-07-09T14:15:44.803-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.340-0400 m31100| 2015-07-09T14:15:44.803-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.340-0400 m31100| 2015-07-09T14:15:44.803-0400 W SHARDING [conn37] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.340-0400 m31100| 2015-07-09T14:15:44.804-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.340-0400 m31100| 2015-07-09T14:15:44.804-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.341-0400 m31100| 2015-07-09T14:15:44.804-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.341-0400 m31100| 2015-07-09T14:15:44.804-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.341-0400 m31100| 2015-07-09T14:15:44.804-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.341-0400 m31100| 2015-07-09T14:15:44.804-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.341-0400 m31100| 2015-07-09T14:15:44.804-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.342-0400 m31100| 2015-07-09T14:15:44.804-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.342-0400 m31100| 2015-07-09T14:15:44.804-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.342-0400 m31100| 2015-07-09T14:15:44.804-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.342-0400 m31100| 2015-07-09T14:15:44.804-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.342-0400 m31100| 2015-07-09T14:15:44.804-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.343-0400 m31100| 2015-07-09T14:15:44.804-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.343-0400 m31100| 2015-07-09T14:15:44.804-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.343-0400 m31100| 2015-07-09T14:15:44.804-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.343-0400 m31100| 2015-07-09T14:15:44.804-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.343-0400 m31100| 2015-07-09T14:15:44.804-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.344-0400 m31100| 2015-07-09T14:15:44.804-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.344-0400 m31100| 2015-07-09T14:15:44.804-0400 W SHARDING [conn34] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.345-0400 m31100| 2015-07-09T14:15:44.805-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.346-0400 m31100| 2015-07-09T14:15:44.806-0400 I SHARDING [conn15] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.347-0400 m31100| 2015-07-09T14:15:44.806-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.347-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.347-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.347-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.348-0400 jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname.js: Workload complete m31100| 2015-07-09T14:15:44.808-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.348-0400 d m31100| 2015-07-09T14:15:44.808-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.348-0400 in m31100| 2015-07-09T14:15:44.808-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.348-0400 1 m31100| 2015-07-09T14:15:44.808-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.349-0400 07 m31100| 2015-07-09T14:15:44.808-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.349-0400 8 m31100| 2015-07-09T14:15:44.808-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.349-0400 ms m31100| 2015-07-09T14:15:44.808-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.349-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.349-0400 - m31100| 2015-07-09T14:15:44.808-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.349-0400 -- m31100| 2015-07-09T14:15:44.808-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.350-0400 - [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.350-0400 m31100| 2015-07-09T14:15:44.808-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.350-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.350-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.350-0400 m31100| 2015-07-09T14:15:44.808-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.350-0400 m31100| 2015-07-09T14:15:44.808-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.350-0400 m31100| 2015-07-09T14:15:44.808-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.351-0400 m31100| 2015-07-09T14:15:44.808-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.351-0400 m31100| 2015-07-09T14:15:44.808-0400 W SHARDING [conn34] could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db60.coll60 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.351-0400 m31100| 2015-07-09T14:15:44.808-0400 W SHARDING [conn37] could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db60.coll60 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.351-0400 m31100| 2015-07-09T14:15:44.810-0400 I SHARDING [conn38] request split points lookup for chunk db60.coll60 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.351-0400 m31100| 2015-07-09T14:15:44.839-0400 I SHARDING [conn40] distributed lock 'db60.coll60/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.352-0400 m31100| 2015-07-09T14:15:45.083-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.353-0400 m31100| 2015-07-09T14:15:45.083-0400 I COMMAND [conn34] command admin.$cmd command: splitChunk { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:292 locks:{} protocol:op_command 277ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.353-0400 m30999| 2015-07-09T14:15:45.277-0400 I COMMAND [conn1] DROP: db60.coll60 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.355-0400 m31100| 2015-07-09T14:15:45.083-0400 I COMMAND [conn37] command admin.$cmd command: splitChunk { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:292 locks:{} protocol:op_command 278ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.355-0400 m30999| 2015-07-09T14:15:45.277-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:45.277-0400-559eba51ca4787b9985d1e70", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465745277), what: "dropCollection.start", ns: "db60.coll60", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.355-0400 m31100| 2015-07-09T14:15:45.083-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.356-0400 m31100| 2015-07-09T14:15:45.083-0400 I COMMAND [conn40] command db60.coll60 command: splitChunk { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 4, w: 2 } }, Database: { acquireCount: { r: 1, w: 2 } }, Collection: { acquireCount: { r: 1, W: 2 }, acquireWaitCount: { W: 2 }, timeAcquiringMicros: { W: 16292 } } } protocol:op_command 578ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.356-0400 m31100| 2015-07-09T14:15:45.084-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.356-0400 m31100| 2015-07-09T14:15:45.084-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.357-0400 m31100| 2015-07-09T14:15:45.084-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.357-0400 m31100| 2015-07-09T14:15:45.084-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.357-0400 m31100| 2015-07-09T14:15:45.084-0400 W SHARDING [conn15] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.357-0400 m31100| 2015-07-09T14:15:45.084-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.357-0400 m31100| 2015-07-09T14:15:45.085-0400 W SHARDING [conn15] Finding the split vector for db60.coll60 over { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 } keyCount: 3 numSplits: 19 lookedAt: 9 took 279ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.358-0400 m31100| 2015-07-09T14:15:45.085-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.358-0400 m31100| 2015-07-09T14:15:45.085-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.358-0400 m31100| 2015-07-09T14:15:45.085-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.358-0400 m31100| 2015-07-09T14:15:45.085-0400 I COMMAND [conn15] command db60.coll60 command: splitVector { splitVector: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, maxChunkSizeBytes: 1024, maxSplitPoints: 0, maxChunkObjects: 250000 } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:2365 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_command 279ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.359-0400 m31100| 2015-07-09T14:15:45.085-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.359-0400 m31100| 2015-07-09T14:15:45.086-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.359-0400 m31100| 2015-07-09T14:15:45.086-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.359-0400 m31100| 2015-07-09T14:15:45.086-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.359-0400 m31100| 2015-07-09T14:15:45.086-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.360-0400 m31100| 2015-07-09T14:15:45.086-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.360-0400 m31100| 2015-07-09T14:15:45.086-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.360-0400 m31100| 2015-07-09T14:15:45.086-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.360-0400 m31100| 2015-07-09T14:15:45.086-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.360-0400 m31100| 2015-07-09T14:15:45.086-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.361-0400 m31100| 2015-07-09T14:15:45.086-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.361-0400 m31100| 2015-07-09T14:15:45.086-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.361-0400 m31100| 2015-07-09T14:15:45.086-0400 W SHARDING [conn38] possible low cardinality key detected in db60.coll60 - key is { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.361-0400 m31100| 2015-07-09T14:15:45.086-0400 I COMMAND [conn38] command db60.coll60 command: splitVector { splitVector: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, maxChunkSizeBytes: 1024, maxSplitPoints: 0, maxChunkObjects: 250000 } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:3 reslen:2365 locks:{ Global: { acquireCount: { r: 8 } }, Database: { acquireCount: { r: 4 } }, Collection: { acquireCount: { r: 4 } } } protocol:op_command 276ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.363-0400 m31100| 2015-07-09T14:15:45.087-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.363-0400 m31100| 2015-07-09T14:15:45.089-0400 I SHARDING [conn38] distributed lock 'db60.coll60/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eba51792e00bb67274a67 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.363-0400 m31100| 2015-07-09T14:15:45.089-0400 I SHARDING [conn38] remotely refreshing metadata for db60.coll60 based on current shard version 1|3||559eba4fca4787b9985d1e6e, current metadata version is 1|3||559eba4fca4787b9985d1e6e [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.364-0400 m31100| 2015-07-09T14:15:45.091-0400 I SHARDING [conn38] metadata of collection db60.coll60 already up to date (shard version : 1|3||559eba4fca4787b9985d1e6e, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.364-0400 m31100| 2015-07-09T14:15:45.091-0400 W SHARDING [conn38] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.365-0400 m31100| 2015-07-09T14:15:45.091-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db60.coll60", keyPattern: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 1.0 }, min: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MinKey }, max: { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 0.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 2.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 3.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 4.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 5.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 6.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 7.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 8.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 9.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 10.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 11.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 12.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 13.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 14.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 15.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 16.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 17.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 18.0 }, { indexed_insert_long_fieldname_xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba4fca4787b9985d1e6e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.365-0400 m31100| 2015-07-09T14:15:45.093-0400 W SHARDING [conn15] could not acquire collection lock for db60.coll60 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db60.coll60 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.366-0400 m31100| 2015-07-09T14:15:45.099-0400 I SHARDING [conn38] distributed lock 'db60.coll60/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.366-0400 m30999| 2015-07-09T14:15:45.333-0400 I SHARDING [conn1] distributed lock 'db60.coll60/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba51ca4787b9985d1e71 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.366-0400 m31100| 2015-07-09T14:15:45.334-0400 I COMMAND [conn38] CMD: drop db60.coll60 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.366-0400 m31200| 2015-07-09T14:15:45.337-0400 I COMMAND [conn63] CMD: drop db60.coll60 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.366-0400 m31102| 2015-07-09T14:15:45.338-0400 I COMMAND [repl writer worker 5] CMD: drop db60.coll60 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.366-0400 m31101| 2015-07-09T14:15:45.338-0400 I COMMAND [repl writer worker 2] CMD: drop db60.coll60 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.366-0400 m31202| 2015-07-09T14:15:45.341-0400 I COMMAND [repl writer worker 11] CMD: drop db60.coll60 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.366-0400 m31201| 2015-07-09T14:15:45.341-0400 I COMMAND [repl writer worker 0] CMD: drop db60.coll60 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.393-0400 m31100| 2015-07-09T14:15:45.392-0400 I SHARDING [conn38] remotely refreshing metadata for db60.coll60 with requested shard version 0|0||000000000000000000000000, current shard version is 1|3||559eba4fca4787b9985d1e6e, current metadata version is 1|3||559eba4fca4787b9985d1e6e [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.395-0400 m31100| 2015-07-09T14:15:45.394-0400 W SHARDING [conn38] no chunks found when reloading db60.coll60, previous version was 0|0||559eba4fca4787b9985d1e6e, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.395-0400 m31100| 2015-07-09T14:15:45.394-0400 I SHARDING [conn38] dropping metadata for db60.coll60 at shard version 1|3||559eba4fca4787b9985d1e6e, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.397-0400 m30999| 2015-07-09T14:15:45.397-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:45.397-0400-559eba51ca4787b9985d1e72", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465745397), what: "dropCollection", ns: "db60.coll60", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.452-0400 m30999| 2015-07-09T14:15:45.451-0400 I SHARDING [conn1] distributed lock 'db60.coll60/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.509-0400 m30999| 2015-07-09T14:15:45.508-0400 I COMMAND [conn1] DROP DATABASE: db60 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.509-0400 m30999| 2015-07-09T14:15:45.509-0400 I SHARDING [conn1] DBConfig::dropDatabase: db60 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.509-0400 m30999| 2015-07-09T14:15:45.509-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:45.509-0400-559eba51ca4787b9985d1e73", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465745509), what: "dropDatabase.start", ns: "db60", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.616-0400 m30999| 2015-07-09T14:15:45.616-0400 I SHARDING [conn1] DBConfig::dropDatabase: db60 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.617-0400 m31100| 2015-07-09T14:15:45.616-0400 I COMMAND [conn157] dropDatabase db60 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.617-0400 m31100| 2015-07-09T14:15:45.616-0400 I COMMAND [conn157] dropDatabase db60 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.618-0400 m30999| 2015-07-09T14:15:45.617-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:45.617-0400-559eba51ca4787b9985d1e74", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465745617), what: "dropDatabase", ns: "db60", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.618-0400 m31101| 2015-07-09T14:15:45.618-0400 I COMMAND [repl writer worker 6] dropDatabase db60 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.618-0400 m31101| 2015-07-09T14:15:45.618-0400 I COMMAND [repl writer worker 6] dropDatabase db60 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.618-0400 m31102| 2015-07-09T14:15:45.618-0400 I COMMAND [repl writer worker 0] dropDatabase db60 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.619-0400 m31102| 2015-07-09T14:15:45.618-0400 I COMMAND [repl writer worker 0] dropDatabase db60 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.704-0400 m31100| 2015-07-09T14:15:45.704-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.708-0400 m31101| 2015-07-09T14:15:45.707-0400 I COMMAND [repl writer worker 7] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.708-0400 m31102| 2015-07-09T14:15:45.707-0400 I COMMAND [repl writer worker 1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.746-0400 m31200| 2015-07-09T14:15:45.746-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.749-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.749-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.749-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.749-0400 jstests/concurrency/fsm_workloads/map_reduce_reduce_nonatomic.js [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.749-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.749-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.750-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.750-0400 m31201| 2015-07-09T14:15:45.749-0400 I COMMAND [repl writer worker 4] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.750-0400 m31202| 2015-07-09T14:15:45.749-0400 I COMMAND [repl writer worker 13] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.757-0400 m30999| 2015-07-09T14:15:45.756-0400 I SHARDING [conn1] distributed lock 'db61/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba51ca4787b9985d1e75 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.760-0400 m30999| 2015-07-09T14:15:45.760-0400 I SHARDING [conn1] Placing [db61] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.761-0400 m30999| 2015-07-09T14:15:45.760-0400 I SHARDING [conn1] Enabling sharding for database [db61] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:45.814-0400 m30999| 2015-07-09T14:15:45.814-0400 I SHARDING [conn1] distributed lock 'db61/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.123-0400 m31100| 2015-07-09T14:15:45.837-0400 I INDEX [conn144] build index on: db61.coll61 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db61.coll61" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.123-0400 m31100| 2015-07-09T14:15:45.837-0400 I INDEX [conn144] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.123-0400 m31100| 2015-07-09T14:15:45.847-0400 I INDEX [conn144] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.123-0400 m30999| 2015-07-09T14:15:45.848-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db61.coll61", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.123-0400 m30999| 2015-07-09T14:15:45.852-0400 I SHARDING [conn1] distributed lock 'db61.coll61/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba51ca4787b9985d1e76 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.123-0400 m30999| 2015-07-09T14:15:45.853-0400 I SHARDING [conn1] enable sharding on: db61.coll61 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.124-0400 m30999| 2015-07-09T14:15:45.853-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:45.853-0400-559eba51ca4787b9985d1e77", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465745853), what: "shardCollection.start", ns: "db61.coll61", details: { shardKey: { _id: "hashed" }, collection: "db61.coll61", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.124-0400 m31101| 2015-07-09T14:15:45.858-0400 I INDEX [repl writer worker 15] build index on: db61.coll61 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db61.coll61" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.124-0400 m31101| 2015-07-09T14:15:45.858-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.124-0400 m31102| 2015-07-09T14:15:45.861-0400 I INDEX [repl writer worker 2] build index on: db61.coll61 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db61.coll61" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.124-0400 m31102| 2015-07-09T14:15:45.862-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.124-0400 m31101| 2015-07-09T14:15:45.864-0400 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.124-0400 m31102| 2015-07-09T14:15:45.868-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.125-0400 m30999| 2015-07-09T14:15:45.906-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db61.coll61 using new epoch 559eba51ca4787b9985d1e78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.125-0400 m30999| 2015-07-09T14:15:46.015-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db61.coll61: 1ms sequenceNumber: 268 version: 1|1||559eba51ca4787b9985d1e78 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.125-0400 m30999| 2015-07-09T14:15:46.073-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db61.coll61: 0ms sequenceNumber: 269 version: 1|1||559eba51ca4787b9985d1e78 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.125-0400 m31100| 2015-07-09T14:15:46.074-0400 I SHARDING [conn45] remotely refreshing metadata for db61.coll61 with requested shard version 1|1||559eba51ca4787b9985d1e78, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.125-0400 m31100| 2015-07-09T14:15:46.076-0400 I SHARDING [conn45] collection db61.coll61 was previously unsharded, new metadata loaded with shard version 1|1||559eba51ca4787b9985d1e78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.125-0400 m31100| 2015-07-09T14:15:46.076-0400 I SHARDING [conn45] collection version was loaded at version 1|1||559eba51ca4787b9985d1e78, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.126-0400 m30999| 2015-07-09T14:15:46.076-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:46.076-0400-559eba52ca4787b9985d1e79", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465746076), what: "shardCollection", ns: "db61.coll61", details: { version: "1|1||559eba51ca4787b9985d1e78" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.131-0400 m30999| 2015-07-09T14:15:46.131-0400 I SHARDING [conn1] distributed lock 'db61.coll61/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.132-0400 m30999| 2015-07-09T14:15:46.132-0400 I SHARDING [conn1] moving chunk ns: db61.coll61 moving ( ns: db61.coll61, shard: test-rs0, lastmod: 1|1||000000000000000000000000, min: { _id: 0 }, max: { _id: MaxKey }) test-rs0 -> test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.132-0400 m31100| 2015-07-09T14:15:46.132-0400 I SHARDING [conn38] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.134-0400 m31100| 2015-07-09T14:15:46.133-0400 I SHARDING [conn38] received moveChunk request: { moveChunk: "db61.coll61", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eba51ca4787b9985d1e78') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.137-0400 m31100| 2015-07-09T14:15:46.136-0400 I SHARDING [conn38] distributed lock 'db61.coll61/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eba52792e00bb67274a69 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.137-0400 m31100| 2015-07-09T14:15:46.137-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:46.137-0400-559eba52792e00bb67274a6a", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465746137), what: "moveChunk.start", ns: "db61.coll61", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.190-0400 m31100| 2015-07-09T14:15:46.190-0400 I SHARDING [conn38] remotely refreshing metadata for db61.coll61 based on current shard version 1|1||559eba51ca4787b9985d1e78, current metadata version is 1|1||559eba51ca4787b9985d1e78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.192-0400 m31100| 2015-07-09T14:15:46.191-0400 I SHARDING [conn38] metadata of collection db61.coll61 already up to date (shard version : 1|1||559eba51ca4787b9985d1e78, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.192-0400 m31100| 2015-07-09T14:15:46.191-0400 I SHARDING [conn38] moveChunk request accepted at version 1|1||559eba51ca4787b9985d1e78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.192-0400 m31100| 2015-07-09T14:15:46.192-0400 I SHARDING [conn38] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.193-0400 m31200| 2015-07-09T14:15:46.192-0400 I SHARDING [conn16] remotely refreshing metadata for db61.coll61, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.194-0400 m31200| 2015-07-09T14:15:46.194-0400 I SHARDING [conn16] collection db61.coll61 was previously unsharded, new metadata loaded with shard version 0|0||559eba51ca4787b9985d1e78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.195-0400 m31200| 2015-07-09T14:15:46.194-0400 I SHARDING [conn16] collection version was loaded at version 1|1||559eba51ca4787b9985d1e78, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.195-0400 m31200| 2015-07-09T14:15:46.194-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: 0 } -> { _id: MaxKey } for collection db61.coll61 from test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 at epoch 559eba51ca4787b9985d1e78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.197-0400 m31100| 2015-07-09T14:15:46.196-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db61.coll61", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.201-0400 m31100| 2015-07-09T14:15:46.200-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db61.coll61", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.206-0400 m31100| 2015-07-09T14:15:46.206-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db61.coll61", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.210-0400 m31200| 2015-07-09T14:15:46.209-0400 I INDEX [migrateThread] build index on: db61.coll61 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db61.coll61" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.210-0400 m31200| 2015-07-09T14:15:46.209-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.215-0400 m31100| 2015-07-09T14:15:46.215-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db61.coll61", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.222-0400 m31200| 2015-07-09T14:15:46.222-0400 I INDEX [migrateThread] build index on: db61.coll61 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db61.coll61" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.222-0400 m31200| 2015-07-09T14:15:46.222-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.232-0400 m31100| 2015-07-09T14:15:46.231-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db61.coll61", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.234-0400 m31200| 2015-07-09T14:15:46.233-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.234-0400 m31200| 2015-07-09T14:15:46.234-0400 I SHARDING [migrateThread] Deleter starting delete for: db61.coll61 from { _id: 0 } -> { _id: MaxKey }, with opId: 92017 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.234-0400 m31200| 2015-07-09T14:15:46.234-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db61.coll61 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.243-0400 m31202| 2015-07-09T14:15:46.242-0400 I INDEX [repl writer worker 6] build index on: db61.coll61 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db61.coll61" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.244-0400 m31202| 2015-07-09T14:15:46.242-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.244-0400 m31201| 2015-07-09T14:15:46.244-0400 I INDEX [repl writer worker 7] build index on: db61.coll61 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db61.coll61" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.244-0400 m31201| 2015-07-09T14:15:46.244-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.245-0400 m31202| 2015-07-09T14:15:46.245-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.247-0400 m31200| 2015-07-09T14:15:46.247-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.247-0400 m31200| 2015-07-09T14:15:46.247-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db61.coll61' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.250-0400 m31201| 2015-07-09T14:15:46.250-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.266-0400 m31100| 2015-07-09T14:15:46.265-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db61.coll61", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.266-0400 m31100| 2015-07-09T14:15:46.266-0400 I SHARDING [conn38] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.267-0400 m31100| 2015-07-09T14:15:46.266-0400 I SHARDING [conn38] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.267-0400 m31100| 2015-07-09T14:15:46.266-0400 I SHARDING [conn38] moveChunk setting version to: 2|0||559eba51ca4787b9985d1e78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.270-0400 m31200| 2015-07-09T14:15:46.270-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db61.coll61' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.271-0400 m31200| 2015-07-09T14:15:46.270-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:46.270-0400-559eba52d5a107a5b9c0db64", server: "bs-osx108-8", clientAddr: "", time: new Date(1436465746270), what: "moveChunk.to", ns: "db61.coll61", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 5: 39, step 2 of 5: 12, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 23, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.325-0400 m31100| 2015-07-09T14:15:46.324-0400 I SHARDING [conn38] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db61.coll61", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.325-0400 m31100| 2015-07-09T14:15:46.324-0400 I SHARDING [conn38] moveChunk updating self version to: 2|1||559eba51ca4787b9985d1e78 through { _id: MinKey } -> { _id: 0 } for collection 'db61.coll61' [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.327-0400 m31100| 2015-07-09T14:15:46.327-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:46.327-0400-559eba52792e00bb67274a6b", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465746327), what: "moveChunk.commit", ns: "db61.coll61", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.380-0400 m31100| 2015-07-09T14:15:46.380-0400 I SHARDING [conn38] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.380-0400 m31100| 2015-07-09T14:15:46.380-0400 I SHARDING [conn38] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.380-0400 m31100| 2015-07-09T14:15:46.380-0400 I SHARDING [conn38] Deleter starting delete for: db61.coll61 from { _id: 0 } -> { _id: MaxKey }, with opId: 186979 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.381-0400 m31100| 2015-07-09T14:15:46.380-0400 I SHARDING [conn38] rangeDeleter deleted 0 documents for db61.coll61 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.381-0400 m31100| 2015-07-09T14:15:46.380-0400 I SHARDING [conn38] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.382-0400 m31100| 2015-07-09T14:15:46.381-0400 I SHARDING [conn38] distributed lock 'db61.coll61/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.382-0400 m31100| 2015-07-09T14:15:46.381-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:46.381-0400-559eba52792e00bb67274a6c", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465746381), what: "moveChunk.from", ns: "db61.coll61", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 6: 0, step 2 of 6: 58, step 3 of 6: 3, step 4 of 6: 71, step 5 of 6: 114, step 6 of 6: 0, to: "test-rs1", from: "test-rs0", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.436-0400 m31100| 2015-07-09T14:15:46.435-0400 I COMMAND [conn38] command db61.coll61 command: moveChunk { moveChunk: "db61.coll61", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eba51ca4787b9985d1e78') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 302ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.438-0400 m30999| 2015-07-09T14:15:46.437-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db61.coll61: 0ms sequenceNumber: 270 version: 2|1||559eba51ca4787b9985d1e78 based on: 1|1||559eba51ca4787b9985d1e78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.439-0400 m31100| 2015-07-09T14:15:46.438-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db61.coll61", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba51ca4787b9985d1e78') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.443-0400 m31100| 2015-07-09T14:15:46.443-0400 I SHARDING [conn38] distributed lock 'db61.coll61/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eba52792e00bb67274a6d [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.443-0400 m31100| 2015-07-09T14:15:46.443-0400 I SHARDING [conn38] remotely refreshing metadata for db61.coll61 based on current shard version 2|0||559eba51ca4787b9985d1e78, current metadata version is 2|0||559eba51ca4787b9985d1e78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.445-0400 m31100| 2015-07-09T14:15:46.445-0400 I SHARDING [conn38] updating metadata for db61.coll61 from shard version 2|0||559eba51ca4787b9985d1e78 to shard version 2|1||559eba51ca4787b9985d1e78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.445-0400 m31100| 2015-07-09T14:15:46.445-0400 I SHARDING [conn38] collection version was loaded at version 2|1||559eba51ca4787b9985d1e78, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.446-0400 m31100| 2015-07-09T14:15:46.445-0400 I SHARDING [conn38] splitChunk accepted at version 2|1||559eba51ca4787b9985d1e78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.448-0400 m31100| 2015-07-09T14:15:46.447-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:46.447-0400-559eba52792e00bb67274a6e", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465746447), what: "split", ns: "db61.coll61", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559eba51ca4787b9985d1e78') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559eba51ca4787b9985d1e78') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.502-0400 m31100| 2015-07-09T14:15:46.502-0400 I SHARDING [conn38] distributed lock 'db61.coll61/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.504-0400 m30999| 2015-07-09T14:15:46.504-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db61.coll61: 0ms sequenceNumber: 271 version: 2|3||559eba51ca4787b9985d1e78 based on: 2|1||559eba51ca4787b9985d1e78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.505-0400 m31200| 2015-07-09T14:15:46.504-0400 I SHARDING [conn63] received splitChunk request: { splitChunk: "db61.coll61", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba51ca4787b9985d1e78') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.509-0400 m31200| 2015-07-09T14:15:46.508-0400 I SHARDING [conn63] distributed lock 'db61.coll61/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eba52d5a107a5b9c0db65 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.509-0400 m31200| 2015-07-09T14:15:46.508-0400 I SHARDING [conn63] remotely refreshing metadata for db61.coll61 based on current shard version 0|0||559eba51ca4787b9985d1e78, current metadata version is 1|1||559eba51ca4787b9985d1e78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.510-0400 m31200| 2015-07-09T14:15:46.510-0400 I SHARDING [conn63] updating metadata for db61.coll61 from shard version 0|0||559eba51ca4787b9985d1e78 to shard version 2|0||559eba51ca4787b9985d1e78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.510-0400 m31200| 2015-07-09T14:15:46.510-0400 I SHARDING [conn63] collection version was loaded at version 2|3||559eba51ca4787b9985d1e78, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.511-0400 m31200| 2015-07-09T14:15:46.510-0400 I SHARDING [conn63] splitChunk accepted at version 2|0||559eba51ca4787b9985d1e78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.512-0400 m31200| 2015-07-09T14:15:46.511-0400 I SHARDING [conn63] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:46.511-0400-559eba52d5a107a5b9c0db66", server: "bs-osx108-8", clientAddr: "127.0.0.1:62862", time: new Date(1436465746511), what: "split", ns: "db61.coll61", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559eba51ca4787b9985d1e78') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559eba51ca4787b9985d1e78') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.566-0400 m31200| 2015-07-09T14:15:46.565-0400 I SHARDING [conn63] distributed lock 'db61.coll61/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.568-0400 m30999| 2015-07-09T14:15:46.567-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db61.coll61: 0ms sequenceNumber: 272 version: 2|5||559eba51ca4787b9985d1e78 based on: 2|3||559eba51ca4787b9985d1e78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.826-0400 m31100| 2015-07-09T14:15:46.825-0400 I COMMAND [conn144] command db61.$cmd command: insert { insert: "coll61", documents: 500, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 2000|3, ObjectId('559eba51ca4787b9985d1e78') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 509, w: 509 } }, Database: { acquireCount: { w: 509 } }, Collection: { acquireCount: { w: 9 } }, Metadata: { acquireCount: { w: 500 } }, oplog: { acquireCount: { w: 500 } } } protocol:op_command 184ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:46.832-0400 m31200| 2015-07-09T14:15:46.831-0400 I COMMAND [conn144] command db61.$cmd command: insert { insert: "coll61", documents: 500, ordered: false, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 2000|5, ObjectId('559eba51ca4787b9985d1e78') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 510, w: 510 } }, Database: { acquireCount: { w: 510 } }, Collection: { acquireCount: { w: 10 } }, Metadata: { acquireCount: { w: 500 } }, oplog: { acquireCount: { w: 500 } } } protocol:op_command 190ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:47.014-0400 m31200| 2015-07-09T14:15:47.014-0400 I COMMAND [conn144] command db61.$cmd command: insert { insert: "coll61", documents: 517, ordered: false, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 2000|5, ObjectId('559eba51ca4787b9985d1e78') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 525, w: 525 } }, Database: { acquireCount: { w: 525 } }, Collection: { acquireCount: { w: 8 } }, Metadata: { acquireCount: { w: 517 } }, oplog: { acquireCount: { w: 517 } } } protocol:op_command 172ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:47.015-0400 m31100| 2015-07-09T14:15:47.014-0400 I COMMAND [conn144] command db61.$cmd command: insert { insert: "coll61", documents: 483, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 2000|3, ObjectId('559eba51ca4787b9985d1e78') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 492, w: 492 } }, Database: { acquireCount: { w: 492 } }, Collection: { acquireCount: { w: 9 } }, Metadata: { acquireCount: { w: 483 } }, oplog: { acquireCount: { w: 483 } } } protocol:op_command 173ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:47.027-0400 Using 5 threads (requested 5) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:47.102-0400 m30999| 2015-07-09T14:15:47.102-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63897 #395 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:47.104-0400 m30999| 2015-07-09T14:15:47.103-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63898 #396 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:47.110-0400 m30998| 2015-07-09T14:15:47.110-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63899 #394 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:47.110-0400 m30998| 2015-07-09T14:15:47.110-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63900 #395 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:47.116-0400 m30998| 2015-07-09T14:15:47.116-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63901 #396 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:47.123-0400 setting random seed: 8513758112676 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:47.123-0400 setting random seed: 9217847250401 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:47.123-0400 setting random seed: 732666384428 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:47.123-0400 setting random seed: 4358809646219 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:47.124-0400 setting random seed: 9946404341608 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:47.127-0400 m30998| 2015-07-09T14:15:47.127-0400 I SHARDING [conn396] ChunkManager: time to load chunks for db61.coll61: 0ms sequenceNumber: 73 version: 2|5||559eba51ca4787b9985d1e78 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:47.165-0400 m31100| 2015-07-09T14:15:47.165-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_226 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:47.177-0400 m31200| 2015-07-09T14:15:47.177-0400 I COMMAND [conn35] CMD: drop db61.tmp.mr.coll61_181 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:47.196-0400 m31100| 2015-07-09T14:15:47.195-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_227 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:47.197-0400 m31100| 2015-07-09T14:15:47.197-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_228 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:47.197-0400 m31100| 2015-07-09T14:15:47.197-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_225 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:47.198-0400 m31100| 2015-07-09T14:15:47.197-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_229 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:47.215-0400 m31200| 2015-07-09T14:15:47.215-0400 I COMMAND [conn29] CMD: drop db61.tmp.mr.coll61_184 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:47.215-0400 m31200| 2015-07-09T14:15:47.215-0400 I COMMAND [conn30] CMD: drop db61.tmp.mr.coll61_180 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:47.216-0400 m31200| 2015-07-09T14:15:47.215-0400 I COMMAND [conn36] CMD: drop db61.tmp.mr.coll61_183 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:47.216-0400 m31200| 2015-07-09T14:15:47.215-0400 I COMMAND [conn79] CMD: drop db61.tmp.mr.coll61_182 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:47.976-0400 m31200| 2015-07-09T14:15:47.976-0400 I COMMAND [conn30] CMD: drop db61.tmp.mrs.coll61_1436465747_64 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:47.979-0400 m31200| 2015-07-09T14:15:47.979-0400 I COMMAND [conn30] CMD: drop db61.tmp.mr.coll61_180 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:47.980-0400 m31200| 2015-07-09T14:15:47.979-0400 I COMMAND [conn30] CMD: drop db61.tmp.mr.coll61_180 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:47.984-0400 m31200| 2015-07-09T14:15:47.984-0400 I COMMAND [conn30] CMD: drop db61.tmp.mr.coll61_180 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:47.988-0400 m31200| 2015-07-09T14:15:47.987-0400 I COMMAND [conn79] CMD: drop db61.tmp.mrs.coll61_1436465747_73 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:47.988-0400 m31200| 2015-07-09T14:15:47.988-0400 I COMMAND [conn30] command db61.tmp.mrs.coll61_1436465747_64 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:47.989-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:47.989-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:47.989-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:47.989-0400 m31200| values...., out: "tmp.mrs.coll61_1436465747_64", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:15 reslen:212 locks:{ Global: { acquireCount: { r: 185, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 593 } }, Database: { acquireCount: { r: 27, w: 66, R: 27, W: 11 }, acquireWaitCount: { r: 1, w: 5, R: 3, W: 9 }, timeAcquiringMicros: { r: 18717, w: 7815, R: 49442, W: 103982 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 860ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:47.993-0400 m31200| 2015-07-09T14:15:47.993-0400 I COMMAND [conn79] CMD: drop db61.tmp.mr.coll61_182 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:47.993-0400 m31200| 2015-07-09T14:15:47.993-0400 I COMMAND [conn79] CMD: drop db61.tmp.mr.coll61_182 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:47.995-0400 m31200| 2015-07-09T14:15:47.995-0400 I COMMAND [conn79] CMD: drop db61.tmp.mr.coll61_182 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:47.996-0400 m31200| 2015-07-09T14:15:47.995-0400 I COMMAND [conn29] CMD: drop db61.tmp.mrs.coll61_1436465747_72 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.005-0400 m31200| 2015-07-09T14:15:48.005-0400 I COMMAND [conn29] CMD: drop db61.tmp.mr.coll61_184 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.005-0400 m31200| 2015-07-09T14:15:48.005-0400 I COMMAND [conn29] CMD: drop db61.tmp.mr.coll61_184 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.006-0400 m31200| 2015-07-09T14:15:48.006-0400 I COMMAND [conn79] command db61.tmp.mrs.coll61_1436465747_73 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.007-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.007-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.007-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.008-0400 m31200| values...., out: "tmp.mrs.coll61_1436465747_73", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:17 reslen:212 locks:{ Global: { acquireCount: { r: 189, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 3758, w: 9690, W: 236 } }, Database: { acquireCount: { r: 27, w: 66, R: 29, W: 11 }, acquireWaitCount: { r: 5, w: 6, R: 1, W: 8 }, timeAcquiringMicros: { r: 27026, w: 103915, R: 212, W: 72361 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 862ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.008-0400 m31200| 2015-07-09T14:15:48.008-0400 I COMMAND [conn29] CMD: drop db61.tmp.mr.coll61_184 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.012-0400 m31200| 2015-07-09T14:15:48.012-0400 I COMMAND [conn35] CMD: drop db61.tmp.mrs.coll61_1436465747_63 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.017-0400 m31200| 2015-07-09T14:15:48.017-0400 I COMMAND [conn29] command db61.tmp.mrs.coll61_1436465747_72 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.017-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.017-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.018-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.018-0400 m31200| values...., out: "tmp.mrs.coll61_1436465747_72", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:19 reslen:212 locks:{ Global: { acquireCount: { r: 193, w: 74, W: 3 }, acquireWaitCount: { r: 2, W: 1 }, timeAcquiringMicros: { r: 9570, W: 1147 } }, Database: { acquireCount: { r: 27, w: 66, R: 31, W: 11 }, acquireWaitCount: { r: 7, w: 4, R: 3, W: 9 }, timeAcquiringMicros: { r: 27577, w: 43115, R: 64351, W: 34168 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 856ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.020-0400 m31200| 2015-07-09T14:15:48.019-0400 I COMMAND [conn35] CMD: drop db61.tmp.mr.coll61_181 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.020-0400 m31200| 2015-07-09T14:15:48.020-0400 I COMMAND [conn35] CMD: drop db61.tmp.mr.coll61_181 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.021-0400 m31200| 2015-07-09T14:15:48.020-0400 I COMMAND [conn35] CMD: drop db61.tmp.mr.coll61_181 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.028-0400 m31200| 2015-07-09T14:15:48.027-0400 I COMMAND [conn36] CMD: drop db61.tmp.mrs.coll61_1436465747_74 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.036-0400 m31200| 2015-07-09T14:15:48.035-0400 I COMMAND [conn35] command db61.tmp.mrs.coll61_1436465747_63 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.036-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.036-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.036-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.037-0400 m31200| values...., out: "tmp.mrs.coll61_1436465747_63", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:16 reslen:212 locks:{ Global: { acquireCount: { r: 187, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 1, W: 1 }, timeAcquiringMicros: { r: 10357, w: 10412, W: 2011 } }, Database: { acquireCount: { r: 27, w: 66, R: 28, W: 11 }, acquireWaitCount: { r: 9, w: 9, R: 5, W: 3 }, timeAcquiringMicros: { r: 54178, w: 23022, R: 110876, W: 1165 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 905ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.037-0400 m31200| 2015-07-09T14:15:48.036-0400 I COMMAND [conn36] CMD: drop db61.tmp.mr.coll61_183 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.037-0400 m31200| 2015-07-09T14:15:48.036-0400 I COMMAND [conn36] CMD: drop db61.tmp.mr.coll61_183 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.037-0400 m31200| 2015-07-09T14:15:48.037-0400 I COMMAND [conn36] CMD: drop db61.tmp.mr.coll61_183 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.049-0400 m31200| 2015-07-09T14:15:48.048-0400 I COMMAND [conn36] command db61.tmp.mrs.coll61_1436465747_74 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.049-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.049-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.049-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.050-0400 m31200| values...., out: "tmp.mrs.coll61_1436465747_74", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:17 reslen:212 locks:{ Global: { acquireCount: { r: 189, w: 74, W: 3 }, acquireWaitCount: { r: 3, w: 1 }, timeAcquiringMicros: { r: 24005, w: 4527 } }, Database: { acquireCount: { r: 27, w: 66, R: 29, W: 11 }, acquireWaitCount: { r: 11, w: 11, R: 2, W: 5 }, timeAcquiringMicros: { r: 60268, w: 62198, R: 2760, W: 101990 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 904ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.066-0400 m31100| 2015-07-09T14:15:48.065-0400 I COMMAND [conn191] CMD: drop db61.tmp.mrs.coll61_1436465747_63 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.074-0400 m31100| 2015-07-09T14:15:48.074-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_226 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.074-0400 m31100| 2015-07-09T14:15:48.074-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_226 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.076-0400 m31100| 2015-07-09T14:15:48.076-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_226 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.077-0400 m31100| 2015-07-09T14:15:48.077-0400 I COMMAND [conn186] CMD: drop db61.tmp.mrs.coll61_1436465747_74 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.084-0400 m31100| 2015-07-09T14:15:48.083-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_227 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.085-0400 m31100| 2015-07-09T14:15:48.083-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_227 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.085-0400 m31100| 2015-07-09T14:15:48.084-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_227 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.088-0400 m31100| 2015-07-09T14:15:48.088-0400 I COMMAND [conn45] CMD: drop db61.tmp.mrs.coll61_1436465747_64 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.102-0400 m31100| 2015-07-09T14:15:48.101-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_225 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.102-0400 m31100| 2015-07-09T14:15:48.102-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_225 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.105-0400 m31100| 2015-07-09T14:15:48.105-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_225 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.108-0400 m31100| 2015-07-09T14:15:48.108-0400 I COMMAND [conn49] CMD: drop db61.tmp.mrs.coll61_1436465747_72 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.117-0400 m31100| 2015-07-09T14:15:48.116-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_229 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.117-0400 m31100| 2015-07-09T14:15:48.117-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_229 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.117-0400 m31100| 2015-07-09T14:15:48.117-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_229 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.125-0400 m31100| 2015-07-09T14:15:48.125-0400 I COMMAND [conn178] CMD: drop db61.tmp.mrs.coll61_1436465747_73 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.130-0400 m31100| 2015-07-09T14:15:48.129-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_228 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.131-0400 m31100| 2015-07-09T14:15:48.130-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_228 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.131-0400 m31100| 2015-07-09T14:15:48.130-0400 I COMMAND [conn191] command db61.tmp.mrs.coll61_1436465747_63 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.132-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.132-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.132-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.134-0400 m31100| values...., out: "tmp.mrs.coll61_1436465747_63", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:13 reslen:212 locks:{ Global: { acquireCount: { r: 179, w: 74, W: 3 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 7601, W: 639 } }, Database: { acquireCount: { r: 27, w: 66, R: 24, W: 11 }, acquireWaitCount: { r: 1, w: 11, R: 6, W: 5 }, timeAcquiringMicros: { r: 348, w: 144295, R: 208494, W: 4191 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 1000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.134-0400 m31100| 2015-07-09T14:15:48.131-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_228 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.160-0400 m31100| 2015-07-09T14:15:48.159-0400 I COMMAND [conn186] command db61.tmp.mrs.coll61_1436465747_74 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.160-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.160-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.160-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.160-0400 m31100| values...., out: "tmp.mrs.coll61_1436465747_74", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:16 reslen:212 locks:{ Global: { acquireCount: { r: 185, w: 74, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 9292, W: 734 } }, Database: { acquireCount: { r: 27, w: 66, R: 27, W: 11 }, acquireWaitCount: { r: 4, w: 4, R: 8, W: 7 }, timeAcquiringMicros: { r: 23670, w: 11544, R: 113845, W: 96217 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 1028ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.169-0400 m31100| 2015-07-09T14:15:48.169-0400 I COMMAND [conn45] command db61.tmp.mrs.coll61_1436465747_64 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.170-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.170-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.170-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.171-0400 m31100| values...., out: "tmp.mrs.coll61_1436465747_64", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:15 reslen:212 locks:{ Global: { acquireCount: { r: 183, w: 74, W: 3 }, acquireWaitCount: { r: 2, W: 1 }, timeAcquiringMicros: { r: 16988, W: 170 } }, Database: { acquireCount: { r: 27, w: 66, R: 26, W: 11 }, acquireWaitCount: { r: 5, w: 7, R: 3, W: 9 }, timeAcquiringMicros: { r: 26447, w: 107148, R: 4673, W: 125495 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 1041ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.213-0400 m31100| 2015-07-09T14:15:48.212-0400 I COMMAND [conn49] command db61.tmp.mrs.coll61_1436465747_72 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.213-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.213-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.213-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.214-0400 m31100| values...., out: "tmp.mrs.coll61_1436465747_72", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:17 reslen:212 locks:{ Global: { acquireCount: { r: 187, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 1, W: 1 }, timeAcquiringMicros: { r: 20229, w: 9788, W: 40 } }, Database: { acquireCount: { r: 27, w: 66, R: 28, W: 11 }, acquireWaitCount: { r: 8, w: 8, R: 6, W: 6 }, timeAcquiringMicros: { r: 65978, w: 74367, R: 12472, W: 100508 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 1069ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.214-0400 m31100| 2015-07-09T14:15:48.213-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_230 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.215-0400 m31100| 2015-07-09T14:15:48.213-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_232 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.215-0400 m31100| 2015-07-09T14:15:48.214-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_231 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.215-0400 m31100| 2015-07-09T14:15:48.214-0400 I COMMAND [conn178] command db61.tmp.mrs.coll61_1436465747_73 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.215-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.216-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.216-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.216-0400 m31100| values...., out: "tmp.mrs.coll61_1436465747_73", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:16 reslen:212 locks:{ Global: { acquireCount: { r: 185, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 3 }, timeAcquiringMicros: { r: 9020, w: 30563 } }, Database: { acquireCount: { r: 27, w: 66, R: 27, W: 11 }, acquireWaitCount: { r: 5, w: 8, R: 6, W: 5 }, timeAcquiringMicros: { r: 20752, w: 115582, R: 12739, W: 129831 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 1072ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.217-0400 m31100| 2015-07-09T14:15:48.214-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_233 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.217-0400 m31100| 2015-07-09T14:15:48.216-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_234 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.227-0400 m31100| 2015-07-09T14:15:48.226-0400 I SHARDING [conn191] ChunkManager: time to load chunks for db61.coll61: 0ms sequenceNumber: 4 version: 2|5||559eba51ca4787b9985d1e78 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.263-0400 m31100| 2015-07-09T14:15:48.262-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63902 #192 (114 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.265-0400 m31200| 2015-07-09T14:15:48.264-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63903 #152 (95 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.282-0400 m31100| 2015-07-09T14:15:48.282-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63904 #193 (115 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.285-0400 m31200| 2015-07-09T14:15:48.285-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63905 #153 (96 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.304-0400 m31100| 2015-07-09T14:15:48.304-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63906 #194 (116 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.307-0400 m31200| 2015-07-09T14:15:48.305-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63907 #154 (97 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.374-0400 m31100| 2015-07-09T14:15:48.374-0400 I COMMAND [conn191] CMD: drop db61.map_reduce_reduce_nonatomic [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.381-0400 m31100| 2015-07-09T14:15:48.381-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_230 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.382-0400 m31100| 2015-07-09T14:15:48.381-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_230 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.382-0400 m31100| 2015-07-09T14:15:48.382-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_230 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.384-0400 m31102| 2015-07-09T14:15:48.384-0400 I COMMAND [repl writer worker 11] CMD: drop db61.map_reduce_reduce_nonatomic [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.385-0400 m31100| 2015-07-09T14:15:48.384-0400 I COMMAND [conn191] command db61.map_reduce_reduce_nonatomic command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.385-0400 m31101| 2015-07-09T14:15:48.384-0400 I COMMAND [repl writer worker 12] CMD: drop db61.map_reduce_reduce_nonatomic [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.386-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.386-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.386-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.386-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.386-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.387-0400 m31100| }, out: { reduce: "map_reduce_reduce_nonatomic", nonAtomic: true } }, inputDB: "db61", shardedOutputCollection: "tmp.mrs.coll61_1436465747_63", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll61_1436465747_63", timeMillis: 945, counts: { input: 983, emit: 983, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465748000|62, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll61_1436465747_63", timeMillis: 890, counts: { input: 1017, emit: 1017, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465748000|11, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 983, emit: 983, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1017, emit: 1017, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:239 locks:{ Global: { acquireCount: { r: 56, w: 50, W: 2 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 439 } }, Database: { acquireCount: { r: 2, w: 45, W: 6 }, acquireWaitCount: { w: 2, W: 3 }, timeAcquiringMicros: { w: 56351, W: 1089 } }, Collection: { acquireCount: { r: 2, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 252ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.387-0400 m31100| 2015-07-09T14:15:48.385-0400 I COMMAND [conn38] CMD: drop db61.tmp.mrs.coll61_1436465747_63 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.448-0400 m31100| 2015-07-09T14:15:48.447-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_231 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.448-0400 m31200| 2015-07-09T14:15:48.447-0400 I COMMAND [conn63] CMD: drop db61.tmp.mrs.coll61_1436465747_63 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.449-0400 m31100| 2015-07-09T14:15:48.449-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_232 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.452-0400 m31100| 2015-07-09T14:15:48.452-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_231 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.454-0400 m31202| 2015-07-09T14:15:48.454-0400 I COMMAND [repl writer worker 3] CMD: drop db61.tmp.mrs.coll61_1436465747_63 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.454-0400 m31201| 2015-07-09T14:15:48.454-0400 I COMMAND [repl writer worker 12] CMD: drop db61.tmp.mrs.coll61_1436465747_63 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.455-0400 m31100| 2015-07-09T14:15:48.454-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_232 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.456-0400 m31100| 2015-07-09T14:15:48.456-0400 I COMMAND [conn186] command db61.map_reduce_reduce_nonatomic command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.457-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.457-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.457-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.457-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.457-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.458-0400 m31100| }, out: { reduce: "map_reduce_reduce_nonatomic", nonAtomic: true } }, inputDB: "db61", shardedOutputCollection: "tmp.mrs.coll61_1436465747_74", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll61_1436465747_74", timeMillis: 952, counts: { input: 983, emit: 983, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465748000|68, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll61_1436465747_74", timeMillis: 892, counts: { input: 1017, emit: 1017, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465748000|25, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 983, emit: 983, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1017, emit: 1017, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:239 locks:{ Global: { acquireCount: { r: 95, w: 67, W: 20 }, acquireWaitCount: { w: 1, W: 20 }, timeAcquiringMicros: { w: 7009, W: 33329 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { r: 3, w: 2, W: 4 }, timeAcquiringMicros: { r: 2697, w: 22100, W: 30059 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 294ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.458-0400 m31100| 2015-07-09T14:15:48.457-0400 I COMMAND [conn35] CMD: drop db61.tmp.mrs.coll61_1436465747_74 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.459-0400 m31101| 2015-07-09T14:15:48.457-0400 I COMMAND [repl writer worker 5] CMD: drop db61.tmp.mrs.coll61_1436465747_63 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.459-0400 m31200| 2015-07-09T14:15:48.458-0400 I COMMAND [conn35] CMD: drop db61.tmp.mr.coll61_185 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.459-0400 m31100| 2015-07-09T14:15:48.457-0400 I COMMAND [conn45] command db61.map_reduce_reduce_nonatomic command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.459-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.459-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.460-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.460-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.461-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.462-0400 m31100| }, out: { reduce: "map_reduce_reduce_nonatomic", nonAtomic: true } }, inputDB: "db61", shardedOutputCollection: "tmp.mrs.coll61_1436465747_64", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll61_1436465747_64", timeMillis: 974, counts: { input: 983, emit: 983, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465748000|80, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll61_1436465747_64", timeMillis: 851, counts: { input: 1017, emit: 1017, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465747000|103, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 983, emit: 983, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1017, emit: 1017, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:239 locks:{ Global: { acquireCount: { r: 95, w: 67, W: 20 }, acquireWaitCount: { r: 1, w: 1, W: 20 }, timeAcquiringMicros: { r: 1801, w: 6560, W: 29472 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { r: 3, w: 2, W: 4 }, timeAcquiringMicros: { r: 3552, w: 65865, W: 5896 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 285ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.463-0400 m31200| 2015-07-09T14:15:48.458-0400 I COMMAND [conn85] CMD: drop db61.tmp.mrs.coll61_1436465747_74 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.463-0400 m31100| 2015-07-09T14:15:48.459-0400 I COMMAND [conn38] CMD: drop db61.tmp.mrs.coll61_1436465747_64 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.463-0400 m31102| 2015-07-09T14:15:48.460-0400 I COMMAND [repl writer worker 8] CMD: drop db61.tmp.mrs.coll61_1436465747_63 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.463-0400 m31101| 2015-07-09T14:15:48.460-0400 I COMMAND [repl writer worker 8] CMD: drop db61.tmp.mr.coll61_231 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.465-0400 m31101| 2015-07-09T14:15:48.464-0400 I COMMAND [repl writer worker 13] CMD: drop db61.tmp.mr.coll61_232 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.465-0400 m31102| 2015-07-09T14:15:48.465-0400 I COMMAND [repl writer worker 9] CMD: drop db61.tmp.mr.coll61_231 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.468-0400 m31102| 2015-07-09T14:15:48.468-0400 I COMMAND [repl writer worker 0] CMD: drop db61.tmp.mr.coll61_232 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.474-0400 m31202| 2015-07-09T14:15:48.473-0400 I COMMAND [repl writer worker 7] CMD: drop db61.tmp.mrs.coll61_1436465747_74 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.474-0400 m31201| 2015-07-09T14:15:48.473-0400 I COMMAND [repl writer worker 11] CMD: drop db61.tmp.mrs.coll61_1436465747_74 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.527-0400 m31101| 2015-07-09T14:15:48.527-0400 I COMMAND [repl writer worker 11] CMD: drop db61.tmp.mrs.coll61_1436465747_74 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.528-0400 m31102| 2015-07-09T14:15:48.527-0400 I COMMAND [repl writer worker 14] CMD: drop db61.tmp.mrs.coll61_1436465747_74 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.528-0400 m31200| 2015-07-09T14:15:48.528-0400 I COMMAND [conn63] CMD: drop db61.tmp.mrs.coll61_1436465747_64 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.530-0400 m31100| 2015-07-09T14:15:48.530-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_235 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.530-0400 m31100| 2015-07-09T14:15:48.530-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_233 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.531-0400 m31100| 2015-07-09T14:15:48.531-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_234 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.536-0400 m31200| 2015-07-09T14:15:48.535-0400 I COMMAND [conn36] CMD: drop db61.tmp.mr.coll61_186 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.536-0400 m31201| 2015-07-09T14:15:48.536-0400 I COMMAND [repl writer worker 4] CMD: drop db61.tmp.mrs.coll61_1436465747_64 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.536-0400 m31202| 2015-07-09T14:15:48.536-0400 I COMMAND [repl writer worker 0] CMD: drop db61.tmp.mrs.coll61_1436465747_64 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.538-0400 m31100| 2015-07-09T14:15:48.537-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_233 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.539-0400 m31100| 2015-07-09T14:15:48.538-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_236 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.539-0400 m31101| 2015-07-09T14:15:48.539-0400 I COMMAND [repl writer worker 13] CMD: drop db61.tmp.mrs.coll61_1436465747_64 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.541-0400 m31102| 2015-07-09T14:15:48.540-0400 I COMMAND [repl writer worker 0] CMD: drop db61.tmp.mrs.coll61_1436465747_64 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.541-0400 m31100| 2015-07-09T14:15:48.540-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_234 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.542-0400 m31101| 2015-07-09T14:15:48.542-0400 I COMMAND [repl writer worker 2] CMD: drop db61.tmp.mr.coll61_233 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.542-0400 m31100| 2015-07-09T14:15:48.542-0400 I COMMAND [conn49] command db61.map_reduce_reduce_nonatomic command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.542-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.543-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.543-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.543-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.543-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.544-0400 m31100| }, out: { reduce: "map_reduce_reduce_nonatomic", nonAtomic: true } }, inputDB: "db61", shardedOutputCollection: "tmp.mrs.coll61_1436465747_72", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll61_1436465747_72", timeMillis: 973, counts: { input: 983, emit: 983, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465748000|88, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll61_1436465747_72", timeMillis: 845, counts: { input: 1017, emit: 1017, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465748000|1, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 983, emit: 983, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1017, emit: 1017, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:239 locks:{ Global: { acquireCount: { r: 95, w: 67, W: 20 }, acquireWaitCount: { r: 1, w: 2, W: 20 }, timeAcquiringMicros: { r: 1276, w: 66338, W: 35877 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { r: 4, w: 2, W: 4 }, timeAcquiringMicros: { r: 9350, w: 22777, W: 14572 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 327ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.545-0400 m31100| 2015-07-09T14:15:48.542-0400 I COMMAND [conn178] command db61.map_reduce_reduce_nonatomic command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.545-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.545-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.545-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.545-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.545-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.547-0400 m31100| }, out: { reduce: "map_reduce_reduce_nonatomic", nonAtomic: true } }, inputDB: "db61", shardedOutputCollection: "tmp.mrs.coll61_1436465747_73", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll61_1436465747_73", timeMillis: 989, counts: { input: 983, emit: 983, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465748000|105, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll61_1436465747_73", timeMillis: 849, counts: { input: 1017, emit: 1017, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465747000|122, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 983, emit: 983, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1017, emit: 1017, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:239 locks:{ Global: { acquireCount: { r: 95, w: 67, W: 20 }, acquireWaitCount: { w: 2, W: 20 }, timeAcquiringMicros: { w: 66675, W: 33966 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { r: 3, w: 5, W: 4 }, timeAcquiringMicros: { r: 3504, w: 28076, W: 50671 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 326ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.547-0400 m31100| 2015-07-09T14:15:48.543-0400 I COMMAND [conn35] CMD: drop db61.tmp.mrs.coll61_1436465747_72 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.547-0400 m31100| 2015-07-09T14:15:48.543-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_237 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.548-0400 m31100| 2015-07-09T14:15:48.543-0400 I COMMAND [conn39] CMD: drop db61.tmp.mrs.coll61_1436465747_73 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.548-0400 m31102| 2015-07-09T14:15:48.545-0400 I COMMAND [repl writer worker 2] CMD: drop db61.tmp.mr.coll61_233 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.550-0400 m31101| 2015-07-09T14:15:48.550-0400 I COMMAND [repl writer worker 11] CMD: drop db61.tmp.mr.coll61_234 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.553-0400 m31102| 2015-07-09T14:15:48.553-0400 I COMMAND [repl writer worker 14] CMD: drop db61.tmp.mr.coll61_234 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.560-0400 m31200| 2015-07-09T14:15:48.560-0400 I COMMAND [conn85] CMD: drop db61.tmp.mrs.coll61_1436465747_72 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.563-0400 m31102| 2015-07-09T14:15:48.562-0400 I COMMAND [repl writer worker 3] CMD: drop db61.tmp.mrs.coll61_1436465747_72 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.563-0400 m31200| 2015-07-09T14:15:48.563-0400 I COMMAND [conn30] CMD: drop db61.tmp.mr.coll61_187 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.563-0400 m31200| 2015-07-09T14:15:48.563-0400 I COMMAND [conn34] CMD: drop db61.tmp.mrs.coll61_1436465747_73 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.564-0400 m31101| 2015-07-09T14:15:48.564-0400 I COMMAND [repl writer worker 4] CMD: drop db61.tmp.mrs.coll61_1436465747_72 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.566-0400 m31101| 2015-07-09T14:15:48.566-0400 I COMMAND [repl writer worker 10] CMD: drop db61.tmp.mrs.coll61_1436465747_73 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.570-0400 m31102| 2015-07-09T14:15:48.570-0400 I COMMAND [repl writer worker 6] CMD: drop db61.tmp.mrs.coll61_1436465747_73 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.572-0400 m31201| 2015-07-09T14:15:48.572-0400 I COMMAND [repl writer worker 0] CMD: drop db61.tmp.mrs.coll61_1436465747_72 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.573-0400 m31202| 2015-07-09T14:15:48.572-0400 I COMMAND [repl writer worker 8] CMD: drop db61.tmp.mrs.coll61_1436465747_72 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.631-0400 m31201| 2015-07-09T14:15:48.630-0400 I COMMAND [repl writer worker 6] CMD: drop db61.tmp.mrs.coll61_1436465747_73 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.631-0400 m31200| 2015-07-09T14:15:48.630-0400 I COMMAND [conn79] CMD: drop db61.tmp.mr.coll61_188 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.631-0400 m31202| 2015-07-09T14:15:48.630-0400 I COMMAND [repl writer worker 6] CMD: drop db61.tmp.mrs.coll61_1436465747_73 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.632-0400 m31200| 2015-07-09T14:15:48.632-0400 I COMMAND [conn29] CMD: drop db61.tmp.mr.coll61_189 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.638-0400 m31100| 2015-07-09T14:15:48.637-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_239 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:48.638-0400 m31100| 2015-07-09T14:15:48.637-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_238 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.024-0400 m31200| 2015-07-09T14:15:49.024-0400 I COMMAND [conn35] CMD: drop db61.tmp.mrs.coll61_1436465748_65 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.029-0400 m31200| 2015-07-09T14:15:49.029-0400 I COMMAND [conn35] CMD: drop db61.tmp.mr.coll61_185 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.030-0400 m31200| 2015-07-09T14:15:49.030-0400 I COMMAND [conn35] CMD: drop db61.tmp.mr.coll61_185 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.034-0400 m31200| 2015-07-09T14:15:49.033-0400 I COMMAND [conn35] CMD: drop db61.tmp.mr.coll61_185 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.038-0400 m31200| 2015-07-09T14:15:49.037-0400 I COMMAND [conn35] command db61.tmp.mrs.coll61_1436465748_65 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.038-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.038-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.038-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.039-0400 m31200| values...., out: "tmp.mrs.coll61_1436465748_65", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 421 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { w: 33, R: 4, W: 4 }, timeAcquiringMicros: { w: 421318, R: 23979, W: 5954 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 579ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.043-0400 m31200| 2015-07-09T14:15:49.043-0400 I COMMAND [conn30] CMD: drop db61.tmp.mrs.coll61_1436465748_66 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.050-0400 m31200| 2015-07-09T14:15:49.050-0400 I COMMAND [conn30] CMD: drop db61.tmp.mr.coll61_187 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.050-0400 m31200| 2015-07-09T14:15:49.050-0400 I COMMAND [conn30] CMD: drop db61.tmp.mr.coll61_187 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.052-0400 m31200| 2015-07-09T14:15:49.051-0400 I COMMAND [conn30] CMD: drop db61.tmp.mr.coll61_187 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.054-0400 m31200| 2015-07-09T14:15:49.053-0400 I COMMAND [conn30] command db61.tmp.mrs.coll61_1436465748_66 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.054-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.054-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.054-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.055-0400 m31200| values...., out: "tmp.mrs.coll61_1436465748_66", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:4 reslen:212 locks:{ Global: { acquireCount: { r: 161, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 11, w: 6033, W: 444 } }, Database: { acquireCount: { r: 26, w: 66, R: 16, W: 11 }, acquireWaitCount: { r: 4, w: 13, R: 16, W: 9 }, timeAcquiringMicros: { r: 5194, w: 76994, R: 116258, W: 24404 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 512ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.061-0400 m31200| 2015-07-09T14:15:49.060-0400 I COMMAND [conn36] CMD: drop db61.tmp.mrs.coll61_1436465748_75 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.067-0400 m31200| 2015-07-09T14:15:49.067-0400 I COMMAND [conn36] CMD: drop db61.tmp.mr.coll61_186 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.068-0400 m31200| 2015-07-09T14:15:49.067-0400 I COMMAND [conn36] CMD: drop db61.tmp.mr.coll61_186 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.069-0400 m31200| 2015-07-09T14:15:49.068-0400 I COMMAND [conn36] CMD: drop db61.tmp.mr.coll61_186 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.071-0400 m31200| 2015-07-09T14:15:49.070-0400 I COMMAND [conn36] command db61.tmp.mrs.coll61_1436465748_75 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.071-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.072-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.072-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.074-0400 m31200| values...., out: "tmp.mrs.coll61_1436465748_75", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:3 reslen:212 locks:{ Global: { acquireCount: { r: 159, w: 74, W: 3 }, acquireWaitCount: { r: 2, W: 1 }, timeAcquiringMicros: { r: 13243, W: 195 } }, Database: { acquireCount: { r: 26, w: 66, R: 15, W: 11 }, acquireWaitCount: { r: 7, w: 21, R: 13, W: 8 }, timeAcquiringMicros: { r: 7561, w: 94991, R: 109652, W: 36028 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 540ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.081-0400 m31200| 2015-07-09T14:15:49.081-0400 I COMMAND [conn29] CMD: drop db61.tmp.mrs.coll61_1436465748_77 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.089-0400 m31200| 2015-07-09T14:15:49.086-0400 I COMMAND [conn29] CMD: drop db61.tmp.mr.coll61_189 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.089-0400 m31200| 2015-07-09T14:15:49.087-0400 I COMMAND [conn29] CMD: drop db61.tmp.mr.coll61_189 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.089-0400 m31200| 2015-07-09T14:15:49.088-0400 I COMMAND [conn29] CMD: drop db61.tmp.mr.coll61_189 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.090-0400 m31200| 2015-07-09T14:15:49.089-0400 I COMMAND [conn29] command db61.tmp.mrs.coll61_1436465748_77 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.090-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.090-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.090-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.091-0400 m31200| values...., out: "tmp.mrs.coll61_1436465748_77", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:5 reslen:212 locks:{ Global: { acquireCount: { r: 163, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 7347, w: 7077, W: 1197 } }, Database: { acquireCount: { r: 26, w: 66, R: 17, W: 11 }, acquireWaitCount: { r: 5, w: 9, R: 17, W: 9 }, timeAcquiringMicros: { r: 42125, w: 28460, R: 33677, W: 114378 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 498ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.092-0400 m31200| 2015-07-09T14:15:49.090-0400 I COMMAND [conn79] CMD: drop db61.tmp.mrs.coll61_1436465748_76 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.096-0400 m31200| 2015-07-09T14:15:49.095-0400 I COMMAND [conn79] CMD: drop db61.tmp.mr.coll61_188 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.098-0400 m31200| 2015-07-09T14:15:49.095-0400 I COMMAND [conn79] CMD: drop db61.tmp.mr.coll61_188 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.098-0400 m31200| 2015-07-09T14:15:49.096-0400 I COMMAND [conn79] CMD: drop db61.tmp.mr.coll61_188 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.098-0400 m31200| 2015-07-09T14:15:49.097-0400 I COMMAND [conn79] command db61.tmp.mrs.coll61_1436465748_76 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.098-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.099-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.099-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.099-0400 m31200| values...., out: "tmp.mrs.coll61_1436465748_76", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:4 reslen:212 locks:{ Global: { acquireCount: { r: 161, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 3 }, timeAcquiringMicros: { r: 6011, w: 21342 } }, Database: { acquireCount: { r: 26, w: 66, R: 16, W: 11 }, acquireWaitCount: { r: 8, w: 14, R: 16, W: 5 }, timeAcquiringMicros: { r: 70025, w: 44964, R: 19157, W: 71895 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 505ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.582-0400 m31100| 2015-07-09T14:15:49.581-0400 I COMMAND [conn191] CMD: drop db61.tmp.mrs.coll61_1436465748_65 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.587-0400 m31100| 2015-07-09T14:15:49.586-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_235 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.587-0400 m31100| 2015-07-09T14:15:49.586-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_235 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.589-0400 m31100| 2015-07-09T14:15:49.588-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_235 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.591-0400 m31100| 2015-07-09T14:15:49.591-0400 I COMMAND [conn191] command db61.tmp.mrs.coll61_1436465748_65 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.592-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.592-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.592-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.593-0400 m31100| values...., out: "tmp.mrs.coll61_1436465748_65", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:11 reslen:212 locks:{ Global: { acquireCount: { r: 173, w: 74, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 68660, W: 65 } }, Database: { acquireCount: { r: 26, w: 66, R: 22, W: 11 }, acquireWaitCount: { r: 2, w: 23, R: 8, W: 7 }, timeAcquiringMicros: { r: 20847, w: 263372, R: 149842, W: 10926 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 1133ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.593-0400 m31100| 2015-07-09T14:15:49.592-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_240 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.611-0400 m31100| 2015-07-09T14:15:49.611-0400 I COMMAND [conn186] CMD: drop db61.tmp.mrs.coll61_1436465748_75 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.622-0400 m31100| 2015-07-09T14:15:49.621-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_236 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.622-0400 m31100| 2015-07-09T14:15:49.622-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_236 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.625-0400 m31100| 2015-07-09T14:15:49.624-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_236 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.628-0400 m31100| 2015-07-09T14:15:49.626-0400 I COMMAND [conn186] command db61.tmp.mrs.coll61_1436465748_75 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.628-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.628-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.629-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.629-0400 m31100| values...., out: "tmp.mrs.coll61_1436465748_75", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:19 reslen:212 locks:{ Global: { acquireCount: { r: 189, w: 74, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 6281, W: 261 } }, Database: { acquireCount: { r: 26, w: 66, R: 30, W: 11 }, acquireWaitCount: { r: 6, w: 13, R: 14, W: 9 }, timeAcquiringMicros: { r: 22085, w: 107964, R: 127536, W: 19760 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 1095ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.629-0400 m31100| 2015-07-09T14:15:49.628-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_241 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.657-0400 m31100| 2015-07-09T14:15:49.656-0400 I COMMAND [conn45] CMD: drop db61.tmp.mrs.coll61_1436465748_66 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.670-0400 m31100| 2015-07-09T14:15:49.670-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_237 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.672-0400 m31100| 2015-07-09T14:15:49.670-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_237 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.674-0400 m31100| 2015-07-09T14:15:49.673-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_237 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.677-0400 m31100| 2015-07-09T14:15:49.676-0400 I COMMAND [conn45] command db61.tmp.mrs.coll61_1436465748_66 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.677-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.677-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.677-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.678-0400 m31100| values...., out: "tmp.mrs.coll61_1436465748_66", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:28 reslen:212 locks:{ Global: { acquireCount: { r: 207, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 11658, w: 5992, W: 795 } }, Database: { acquireCount: { r: 26, w: 66, R: 39, W: 11 }, acquireWaitCount: { r: 8, w: 13, R: 18, W: 9 }, timeAcquiringMicros: { r: 34102, w: 58155, R: 105342, W: 57428 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 1137ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.678-0400 m31100| 2015-07-09T14:15:49.678-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_242 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.705-0400 m31100| 2015-07-09T14:15:49.705-0400 I COMMAND [conn49] CMD: drop db61.tmp.mrs.coll61_1436465748_77 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.718-0400 m31100| 2015-07-09T14:15:49.717-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_239 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.718-0400 m31100| 2015-07-09T14:15:49.718-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_239 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.720-0400 m31100| 2015-07-09T14:15:49.720-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_239 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.724-0400 m31100| 2015-07-09T14:15:49.722-0400 I COMMAND [conn49] command db61.tmp.mrs.coll61_1436465748_77 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.724-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.724-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.724-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.725-0400 m31100| values...., out: "tmp.mrs.coll61_1436465748_77", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:24 reslen:212 locks:{ Global: { acquireCount: { r: 199, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 2, W: 1 }, timeAcquiringMicros: { r: 14340, w: 17502, W: 761 } }, Database: { acquireCount: { r: 26, w: 66, R: 35, W: 11 }, acquireWaitCount: { r: 10, w: 15, R: 12, W: 9 }, timeAcquiringMicros: { r: 80931, w: 122250, R: 37769, W: 58656 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 1130ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.725-0400 m31100| 2015-07-09T14:15:49.723-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_243 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.725-0400 m31100| 2015-07-09T14:15:49.724-0400 I COMMAND [conn178] CMD: drop db61.tmp.mrs.coll61_1436465748_76 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.737-0400 m31100| 2015-07-09T14:15:49.736-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_238 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.738-0400 m31100| 2015-07-09T14:15:49.737-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_238 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.738-0400 m31100| 2015-07-09T14:15:49.738-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_238 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.745-0400 m31100| 2015-07-09T14:15:49.744-0400 I COMMAND [conn178] command db61.tmp.mrs.coll61_1436465748_76 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.745-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.745-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.745-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.747-0400 m31100| values...., out: "tmp.mrs.coll61_1436465748_76", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:26 reslen:212 locks:{ Global: { acquireCount: { r: 203, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 1, W: 1 }, timeAcquiringMicros: { r: 28279, w: 11515, W: 466 } }, Database: { acquireCount: { r: 26, w: 66, R: 37, W: 11 }, acquireWaitCount: { r: 6, w: 18, R: 14, W: 7 }, timeAcquiringMicros: { r: 19915, w: 156926, R: 46215, W: 65593 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 1153ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.747-0400 m31100| 2015-07-09T14:15:49.747-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_244 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.857-0400 m31100| 2015-07-09T14:15:49.857-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_240 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.867-0400 m31100| 2015-07-09T14:15:49.867-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_240 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.871-0400 m31100| 2015-07-09T14:15:49.870-0400 I COMMAND [conn191] command db61.map_reduce_reduce_nonatomic command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.871-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.871-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.871-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.871-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.871-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.872-0400 m31100| }, out: { reduce: "map_reduce_reduce_nonatomic", nonAtomic: true } }, inputDB: "db61", shardedOutputCollection: "tmp.mrs.coll61_1436465748_65", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll61_1436465748_65", timeMillis: 1128, counts: { input: 983, emit: 983, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465749000|32, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll61_1436465748_65", timeMillis: 572, counts: { input: 1017, emit: 1017, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465749000|24, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 983, emit: 983, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1017, emit: 1017, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:239 locks:{ Global: { acquireCount: { r: 95, w: 67, W: 20 }, acquireWaitCount: { r: 1, w: 6, W: 20 }, timeAcquiringMicros: { r: 1494, w: 60943, W: 81378 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { r: 3, w: 10, W: 3 }, timeAcquiringMicros: { r: 4033, w: 42804, W: 3668 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 278ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.872-0400 m31100| 2015-07-09T14:15:49.871-0400 I COMMAND [conn38] CMD: drop db61.tmp.mrs.coll61_1436465748_65 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.875-0400 m31101| 2015-07-09T14:15:49.875-0400 I COMMAND [repl writer worker 3] CMD: drop db61.tmp.mr.coll61_240 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.878-0400 m31102| 2015-07-09T14:15:49.877-0400 I COMMAND [repl writer worker 10] CMD: drop db61.tmp.mr.coll61_240 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.880-0400 m31200| 2015-07-09T14:15:49.880-0400 I COMMAND [conn63] CMD: drop db61.tmp.mrs.coll61_1436465748_65 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.884-0400 m31202| 2015-07-09T14:15:49.883-0400 I COMMAND [repl writer worker 1] CMD: drop db61.tmp.mrs.coll61_1436465748_65 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.884-0400 m31101| 2015-07-09T14:15:49.884-0400 I COMMAND [repl writer worker 11] CMD: drop db61.tmp.mrs.coll61_1436465748_65 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.885-0400 m31201| 2015-07-09T14:15:49.884-0400 I COMMAND [repl writer worker 15] CMD: drop db61.tmp.mrs.coll61_1436465748_65 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.886-0400 m31100| 2015-07-09T14:15:49.885-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_241 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.887-0400 m31102| 2015-07-09T14:15:49.886-0400 I COMMAND [repl writer worker 11] CMD: drop db61.tmp.mrs.coll61_1436465748_65 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.888-0400 m31200| 2015-07-09T14:15:49.888-0400 I COMMAND [conn35] CMD: drop db61.tmp.mr.coll61_190 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.889-0400 m31100| 2015-07-09T14:15:49.889-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_241 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.891-0400 m31100| 2015-07-09T14:15:49.890-0400 I COMMAND [conn186] command db61.map_reduce_reduce_nonatomic command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.891-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.891-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.891-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.891-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.891-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.893-0400 m31100| }, out: { reduce: "map_reduce_reduce_nonatomic", nonAtomic: true } }, inputDB: "db61", shardedOutputCollection: "tmp.mrs.coll61_1436465748_75", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll61_1436465748_75", timeMillis: 1091, counts: { input: 983, emit: 983, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465749000|53, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll61_1436465748_75", timeMillis: 537, counts: { input: 1017, emit: 1017, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465749000|75, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 983, emit: 983, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1017, emit: 1017, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:239 locks:{ Global: { acquireCount: { r: 95, w: 67, W: 20 }, acquireWaitCount: { r: 3, w: 7, W: 20 }, timeAcquiringMicros: { r: 8828, w: 93542, W: 32274 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 8, W: 4 }, timeAcquiringMicros: { w: 25768, W: 4075 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 262ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.894-0400 m31102| 2015-07-09T14:15:49.891-0400 I COMMAND [repl writer worker 13] CMD: drop db61.tmp.mr.coll61_241 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.894-0400 m31100| 2015-07-09T14:15:49.891-0400 I COMMAND [conn39] CMD: drop db61.tmp.mrs.coll61_1436465748_75 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.894-0400 m31101| 2015-07-09T14:15:49.892-0400 I COMMAND [repl writer worker 9] CMD: drop db61.tmp.mr.coll61_241 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.894-0400 m31100| 2015-07-09T14:15:49.894-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_245 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.897-0400 m31200| 2015-07-09T14:15:49.897-0400 I COMMAND [conn34] CMD: drop db61.tmp.mrs.coll61_1436465748_75 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.899-0400 m31102| 2015-07-09T14:15:49.898-0400 I COMMAND [repl writer worker 0] CMD: drop db61.tmp.mrs.coll61_1436465748_75 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.900-0400 m31101| 2015-07-09T14:15:49.899-0400 I COMMAND [repl writer worker 7] CMD: drop db61.tmp.mrs.coll61_1436465748_75 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.904-0400 m31201| 2015-07-09T14:15:49.904-0400 I COMMAND [repl writer worker 13] CMD: drop db61.tmp.mrs.coll61_1436465748_75 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.904-0400 m31202| 2015-07-09T14:15:49.904-0400 I COMMAND [repl writer worker 9] CMD: drop db61.tmp.mrs.coll61_1436465748_75 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.929-0400 m31200| 2015-07-09T14:15:49.929-0400 I COMMAND [conn36] CMD: drop db61.tmp.mr.coll61_191 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:49.959-0400 m31100| 2015-07-09T14:15:49.959-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_246 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.074-0400 m31200| 2015-07-09T14:15:50.074-0400 I COMMAND [conn35] CMD: drop db61.tmp.mrs.coll61_1436465749_67 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.077-0400 m31200| 2015-07-09T14:15:50.077-0400 I COMMAND [conn35] CMD: drop db61.tmp.mr.coll61_190 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.078-0400 m31200| 2015-07-09T14:15:50.078-0400 I COMMAND [conn35] CMD: drop db61.tmp.mr.coll61_190 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.079-0400 m31200| 2015-07-09T14:15:50.078-0400 I COMMAND [conn35] CMD: drop db61.tmp.mr.coll61_190 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.079-0400 m31200| 2015-07-09T14:15:50.079-0400 I COMMAND [conn35] command db61.tmp.mrs.coll61_1436465749_67 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.079-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.079-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.079-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.080-0400 m31200| values...., out: "tmp.mrs.coll61_1436465749_67", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { w: 6, R: 4 }, timeAcquiringMicros: { w: 21215, R: 19531 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 191ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.090-0400 m31100| 2015-07-09T14:15:50.090-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_242 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.090-0400 m31100| 2015-07-09T14:15:50.090-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_243 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.097-0400 m31200| 2015-07-09T14:15:50.097-0400 I COMMAND [conn36] CMD: drop db61.tmp.mrs.coll61_1436465749_78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.104-0400 m31200| 2015-07-09T14:15:50.103-0400 I COMMAND [conn36] CMD: drop db61.tmp.mr.coll61_191 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.104-0400 m31200| 2015-07-09T14:15:50.103-0400 I COMMAND [conn36] CMD: drop db61.tmp.mr.coll61_191 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.105-0400 m31200| 2015-07-09T14:15:50.105-0400 I COMMAND [conn36] CMD: drop db61.tmp.mr.coll61_191 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.109-0400 m31200| 2015-07-09T14:15:50.109-0400 I COMMAND [conn36] command db61.tmp.mrs.coll61_1436465749_78 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.110-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.110-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.110-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.110-0400 m31200| values...., out: "tmp.mrs.coll61_1436465749_78", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { w: 2, R: 5, W: 2 }, timeAcquiringMicros: { w: 11652, R: 3628, W: 15223 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 182ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.112-0400 m31100| 2015-07-09T14:15:50.112-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_242 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.113-0400 m31100| 2015-07-09T14:15:50.113-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_243 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.119-0400 m31100| 2015-07-09T14:15:50.118-0400 I COMMAND [conn45] command db61.map_reduce_reduce_nonatomic command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.120-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.120-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.120-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.121-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.121-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.122-0400 m31100| }, out: { reduce: "map_reduce_reduce_nonatomic", nonAtomic: true } }, inputDB: "db61", shardedOutputCollection: "tmp.mrs.coll61_1436465748_66", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll61_1436465748_66", timeMillis: 1131, counts: { input: 983, emit: 983, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465749000|101, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll61_1436465748_66", timeMillis: 509, counts: { input: 1017, emit: 1017, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465749000|47, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 983, emit: 983, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1017, emit: 1017, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:239 locks:{ Global: { acquireCount: { r: 95, w: 67, W: 20 }, acquireWaitCount: { r: 1, w: 14, W: 20 }, timeAcquiringMicros: { r: 1271, w: 137854, W: 130979 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 9, W: 4 }, timeAcquiringMicros: { w: 35916, W: 17898 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 440ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.122-0400 m31100| 2015-07-09T14:15:50.119-0400 I COMMAND [conn38] CMD: drop db61.tmp.mrs.coll61_1436465748_66 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.123-0400 m31102| 2015-07-09T14:15:50.120-0400 I COMMAND [repl writer worker 6] CMD: drop db61.tmp.mr.coll61_242 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.123-0400 m31101| 2015-07-09T14:15:50.123-0400 I COMMAND [repl writer worker 8] CMD: drop db61.tmp.mr.coll61_242 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.127-0400 m31101| 2015-07-09T14:15:50.127-0400 I COMMAND [repl writer worker 15] CMD: drop db61.tmp.mr.coll61_243 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.128-0400 m31102| 2015-07-09T14:15:50.127-0400 I COMMAND [repl writer worker 2] CMD: drop db61.tmp.mr.coll61_243 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.130-0400 m31100| 2015-07-09T14:15:50.129-0400 I COMMAND [conn49] command db61.map_reduce_reduce_nonatomic command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.130-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.130-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.130-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.130-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.130-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.131-0400 m31100| }, out: { reduce: "map_reduce_reduce_nonatomic", nonAtomic: true } }, inputDB: "db61", shardedOutputCollection: "tmp.mrs.coll61_1436465748_77", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll61_1436465748_77", timeMillis: 1127, counts: { input: 983, emit: 983, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465749000|125, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll61_1436465748_77", timeMillis: 495, counts: { input: 1017, emit: 1017, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465749000|94, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 983, emit: 983, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1017, emit: 1017, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:239 locks:{ Global: { acquireCount: { r: 95, w: 67, W: 20 }, acquireWaitCount: { r: 3, w: 17, W: 20 }, timeAcquiringMicros: { r: 17965, w: 113781, W: 49801 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { r: 1, w: 12, W: 3 }, timeAcquiringMicros: { r: 12571, w: 65039, W: 34189 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 406ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.131-0400 m31100| 2015-07-09T14:15:50.130-0400 I COMMAND [conn39] CMD: drop db61.tmp.mrs.coll61_1436465748_77 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.132-0400 m31200| 2015-07-09T14:15:50.131-0400 I COMMAND [conn63] CMD: drop db61.tmp.mrs.coll61_1436465748_66 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.132-0400 m31102| 2015-07-09T14:15:50.132-0400 I COMMAND [repl writer worker 13] CMD: drop db61.tmp.mrs.coll61_1436465748_66 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.134-0400 m31201| 2015-07-09T14:15:50.134-0400 I COMMAND [repl writer worker 6] CMD: drop db61.tmp.mrs.coll61_1436465748_66 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.135-0400 m31202| 2015-07-09T14:15:50.134-0400 I COMMAND [repl writer worker 13] CMD: drop db61.tmp.mrs.coll61_1436465748_66 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.138-0400 m31101| 2015-07-09T14:15:50.137-0400 I COMMAND [repl writer worker 1] CMD: drop db61.tmp.mrs.coll61_1436465748_66 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.139-0400 m31200| 2015-07-09T14:15:50.139-0400 I COMMAND [conn34] CMD: drop db61.tmp.mrs.coll61_1436465748_77 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.140-0400 m31200| 2015-07-09T14:15:50.140-0400 I COMMAND [conn30] CMD: drop db61.tmp.mr.coll61_192 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.142-0400 m31202| 2015-07-09T14:15:50.142-0400 I COMMAND [repl writer worker 8] CMD: drop db61.tmp.mrs.coll61_1436465748_77 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.143-0400 m31201| 2015-07-09T14:15:50.142-0400 I COMMAND [repl writer worker 1] CMD: drop db61.tmp.mrs.coll61_1436465748_77 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.154-0400 m31102| 2015-07-09T14:15:50.153-0400 I COMMAND [repl writer worker 8] CMD: drop db61.tmp.mrs.coll61_1436465748_77 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.154-0400 m31101| 2015-07-09T14:15:50.154-0400 I COMMAND [repl writer worker 4] CMD: drop db61.tmp.mrs.coll61_1436465748_77 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.165-0400 m31100| 2015-07-09T14:15:50.164-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_247 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.165-0400 m31200| 2015-07-09T14:15:50.164-0400 I COMMAND [conn29] CMD: drop db61.tmp.mr.coll61_193 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.181-0400 m31100| 2015-07-09T14:15:50.181-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_248 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.322-0400 m31200| 2015-07-09T14:15:50.321-0400 I COMMAND [conn30] CMD: drop db61.tmp.mrs.coll61_1436465750_68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.327-0400 m31200| 2015-07-09T14:15:50.327-0400 I COMMAND [conn30] CMD: drop db61.tmp.mr.coll61_192 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.327-0400 m31200| 2015-07-09T14:15:50.327-0400 I COMMAND [conn30] CMD: drop db61.tmp.mr.coll61_192 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.328-0400 m31200| 2015-07-09T14:15:50.328-0400 I COMMAND [conn30] CMD: drop db61.tmp.mr.coll61_192 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.328-0400 m31200| 2015-07-09T14:15:50.328-0400 I COMMAND [conn30] command db61.tmp.mrs.coll61_1436465750_68 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.329-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.329-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.329-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.329-0400 m31200| values...., out: "tmp.mrs.coll61_1436465750_68", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 1, w: 2, R: 4 }, timeAcquiringMicros: { r: 203, w: 8741, R: 24791 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 188ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.342-0400 m31200| 2015-07-09T14:15:50.342-0400 I COMMAND [conn29] CMD: drop db61.tmp.mrs.coll61_1436465750_79 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.347-0400 m31200| 2015-07-09T14:15:50.347-0400 I COMMAND [conn29] CMD: drop db61.tmp.mr.coll61_193 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.348-0400 m31200| 2015-07-09T14:15:50.347-0400 I COMMAND [conn29] CMD: drop db61.tmp.mr.coll61_193 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.348-0400 m31200| 2015-07-09T14:15:50.348-0400 I COMMAND [conn29] CMD: drop db61.tmp.mr.coll61_193 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.349-0400 m31200| 2015-07-09T14:15:50.348-0400 I COMMAND [conn29] command db61.tmp.mrs.coll61_1436465750_79 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.349-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.349-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.349-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.350-0400 m31200| values...., out: "tmp.mrs.coll61_1436465750_79", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 6188 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 1, w: 2, R: 2, W: 2 }, timeAcquiringMicros: { r: 9080, w: 10637, R: 1772, W: 11250 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 193ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.429-0400 m31100| 2015-07-09T14:15:50.429-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_244 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.433-0400 m31100| 2015-07-09T14:15:50.432-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_244 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.434-0400 m31100| 2015-07-09T14:15:50.434-0400 I COMMAND [conn178] command db61.map_reduce_reduce_nonatomic command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.434-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.435-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.435-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.435-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.435-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.437-0400 m31100| }, out: { reduce: "map_reduce_reduce_nonatomic", nonAtomic: true } }, inputDB: "db61", shardedOutputCollection: "tmp.mrs.coll61_1436465748_76", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll61_1436465748_76", timeMillis: 1146, counts: { input: 983, emit: 983, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465749000|141, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll61_1436465748_76", timeMillis: 504, counts: { input: 1017, emit: 1017, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465749000|98, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 983, emit: 983, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1017, emit: 1017, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:239 locks:{ Global: { acquireCount: { r: 95, w: 67, W: 20 }, acquireWaitCount: { r: 1, w: 20, W: 20 }, timeAcquiringMicros: { r: 71140, w: 121096, W: 286962 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 11, W: 3 }, timeAcquiringMicros: { w: 77546, W: 2834 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 687ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.437-0400 m31100| 2015-07-09T14:15:50.434-0400 I COMMAND [conn39] CMD: drop db61.tmp.mrs.coll61_1436465748_76 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.437-0400 m31102| 2015-07-09T14:15:50.434-0400 I COMMAND [repl writer worker 13] CMD: drop db61.tmp.mr.coll61_244 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.437-0400 m31101| 2015-07-09T14:15:50.435-0400 I COMMAND [repl writer worker 1] CMD: drop db61.tmp.mr.coll61_244 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.442-0400 m31200| 2015-07-09T14:15:50.442-0400 I COMMAND [conn34] CMD: drop db61.tmp.mrs.coll61_1436465748_76 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.443-0400 m31102| 2015-07-09T14:15:50.443-0400 I COMMAND [repl writer worker 5] CMD: drop db61.tmp.mrs.coll61_1436465748_76 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.445-0400 m31101| 2015-07-09T14:15:50.445-0400 I COMMAND [repl writer worker 7] CMD: drop db61.tmp.mrs.coll61_1436465748_76 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.447-0400 m31201| 2015-07-09T14:15:50.446-0400 I COMMAND [repl writer worker 1] CMD: drop db61.tmp.mrs.coll61_1436465748_76 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.448-0400 m31202| 2015-07-09T14:15:50.447-0400 I COMMAND [repl writer worker 3] CMD: drop db61.tmp.mrs.coll61_1436465748_76 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.449-0400 m31200| 2015-07-09T14:15:50.449-0400 I COMMAND [conn79] CMD: drop db61.tmp.mr.coll61_194 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.450-0400 m31100| 2015-07-09T14:15:50.450-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_249 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.501-0400 m31100| 2015-07-09T14:15:50.500-0400 I COMMAND [conn191] CMD: drop db61.tmp.mrs.coll61_1436465749_67 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.511-0400 m31100| 2015-07-09T14:15:50.510-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_245 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.511-0400 m31100| 2015-07-09T14:15:50.510-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_245 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.520-0400 m31100| 2015-07-09T14:15:50.520-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_245 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.523-0400 m31100| 2015-07-09T14:15:50.522-0400 I COMMAND [conn191] command db61.tmp.mrs.coll61_1436465749_67 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.523-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.523-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.523-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.524-0400 m31100| values...., out: "tmp.mrs.coll61_1436465749_67", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 18, w: 28, W: 1 }, timeAcquiringMicros: { r: 107735, w: 109409, W: 11734 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 8, w: 20, R: 8, W: 7 }, timeAcquiringMicros: { r: 46108, w: 104748, R: 6140, W: 18690 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 635ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.524-0400 m31100| 2015-07-09T14:15:50.524-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_250 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.558-0400 m31100| 2015-07-09T14:15:50.557-0400 I COMMAND [conn186] CMD: drop db61.tmp.mrs.coll61_1436465749_78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.564-0400 m31200| 2015-07-09T14:15:50.564-0400 I COMMAND [conn79] CMD: drop db61.tmp.mrs.coll61_1436465750_80 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.565-0400 m31100| 2015-07-09T14:15:50.565-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_246 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.565-0400 m31100| 2015-07-09T14:15:50.565-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_246 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.569-0400 m31100| 2015-07-09T14:15:50.569-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_246 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.575-0400 m31200| 2015-07-09T14:15:50.575-0400 I COMMAND [conn79] CMD: drop db61.tmp.mr.coll61_194 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.575-0400 m31200| 2015-07-09T14:15:50.575-0400 I COMMAND [conn79] CMD: drop db61.tmp.mr.coll61_194 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.577-0400 m31200| 2015-07-09T14:15:50.576-0400 I COMMAND [conn79] CMD: drop db61.tmp.mr.coll61_194 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.578-0400 m31200| 2015-07-09T14:15:50.577-0400 I COMMAND [conn79] command db61.tmp.mrs.coll61_1436465750_80 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.578-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.578-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.578-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.578-0400 m31200| values...., out: "tmp.mrs.coll61_1436465750_80", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 128ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.585-0400 m31100| 2015-07-09T14:15:50.585-0400 I COMMAND [conn186] command db61.tmp.mrs.coll61_1436465749_78 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.586-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.586-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.587-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.588-0400 m31100| values...., out: "tmp.mrs.coll61_1436465749_78", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 16, w: 29, W: 1 }, timeAcquiringMicros: { r: 141300, w: 104223, W: 5088 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 10, w: 16, R: 6, W: 8 }, timeAcquiringMicros: { r: 85119, w: 68764, R: 25728, W: 35848 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 657ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.589-0400 m31100| 2015-07-09T14:15:50.586-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_251 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.614-0400 m31100| 2015-07-09T14:15:50.614-0400 I COMMAND [conn45] CMD: drop db61.tmp.mrs.coll61_1436465750_68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.626-0400 m31100| 2015-07-09T14:15:50.626-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_247 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.627-0400 m31100| 2015-07-09T14:15:50.626-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_247 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.632-0400 m31100| 2015-07-09T14:15:50.631-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_247 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.639-0400 m31100| 2015-07-09T14:15:50.638-0400 I COMMAND [conn45] command db61.tmp.mrs.coll61_1436465750_68 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.640-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.640-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.640-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.641-0400 m31100| values...., out: "tmp.mrs.coll61_1436465750_68", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 16, w: 14, W: 1 }, timeAcquiringMicros: { r: 75707, w: 51093, W: 4006 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 16, w: 20, R: 5, W: 5 }, timeAcquiringMicros: { r: 71235, w: 95270, R: 6160, W: 13545 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 498ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.642-0400 m31100| 2015-07-09T14:15:50.639-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_252 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.642-0400 m31100| 2015-07-09T14:15:50.639-0400 I COMMAND [conn49] CMD: drop db61.tmp.mrs.coll61_1436465750_79 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.649-0400 m31100| 2015-07-09T14:15:50.648-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_248 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.649-0400 m31100| 2015-07-09T14:15:50.649-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_248 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.651-0400 m31100| 2015-07-09T14:15:50.651-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_248 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.656-0400 m31100| 2015-07-09T14:15:50.655-0400 I COMMAND [conn49] command db61.tmp.mrs.coll61_1436465750_79 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.656-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.656-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.656-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.657-0400 m31100| values...., out: "tmp.mrs.coll61_1436465750_79", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 17, w: 12, W: 1 }, timeAcquiringMicros: { r: 58864, w: 54348, W: 921 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 16, w: 23, R: 7, W: 7 }, timeAcquiringMicros: { r: 74022, w: 91022, R: 13537, W: 7217 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 500ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.658-0400 m31100| 2015-07-09T14:15:50.656-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_253 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.754-0400 m31100| 2015-07-09T14:15:50.752-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_250 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.761-0400 m31100| 2015-07-09T14:15:50.761-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_250 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.762-0400 m31100| 2015-07-09T14:15:50.762-0400 I COMMAND [conn191] command db61.map_reduce_reduce_nonatomic command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.762-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.762-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.763-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.763-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.763-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.764-0400 m31100| }, out: { reduce: "map_reduce_reduce_nonatomic", nonAtomic: true } }, inputDB: "db61", shardedOutputCollection: "tmp.mrs.coll61_1436465749_67", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll61_1436465749_67", timeMillis: 623, counts: { input: 983, emit: 983, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465750000|115, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll61_1436465749_67", timeMillis: 190, counts: { input: 1017, emit: 1017, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465750000|21, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 983, emit: 983, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1017, emit: 1017, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:239 locks:{ Global: { acquireCount: { r: 95, w: 67, W: 20 }, acquireWaitCount: { r: 1, w: 7, W: 20 }, timeAcquiringMicros: { r: 1689, w: 41353, W: 27564 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 13, W: 3 }, timeAcquiringMicros: { w: 79094, W: 6602 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 238ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.764-0400 m31100| 2015-07-09T14:15:50.762-0400 I COMMAND [conn38] CMD: drop db61.tmp.mrs.coll61_1436465749_67 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.766-0400 m31102| 2015-07-09T14:15:50.766-0400 I COMMAND [repl writer worker 1] CMD: drop db61.tmp.mr.coll61_250 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.766-0400 m31101| 2015-07-09T14:15:50.766-0400 I COMMAND [repl writer worker 6] CMD: drop db61.tmp.mr.coll61_250 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.768-0400 m31200| 2015-07-09T14:15:50.767-0400 I COMMAND [conn63] CMD: drop db61.tmp.mrs.coll61_1436465749_67 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.771-0400 m31202| 2015-07-09T14:15:50.770-0400 I COMMAND [repl writer worker 14] CMD: drop db61.tmp.mrs.coll61_1436465749_67 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.772-0400 m31201| 2015-07-09T14:15:50.771-0400 I COMMAND [repl writer worker 1] CMD: drop db61.tmp.mrs.coll61_1436465749_67 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.773-0400 m31102| 2015-07-09T14:15:50.773-0400 I COMMAND [repl writer worker 15] CMD: drop db61.tmp.mrs.coll61_1436465749_67 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.773-0400 m31200| 2015-07-09T14:15:50.773-0400 I COMMAND [conn35] CMD: drop db61.tmp.mr.coll61_195 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.775-0400 m31101| 2015-07-09T14:15:50.775-0400 I COMMAND [repl writer worker 14] CMD: drop db61.tmp.mrs.coll61_1436465749_67 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.780-0400 m31100| 2015-07-09T14:15:50.779-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_254 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.804-0400 m31100| 2015-07-09T14:15:50.803-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_251 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.832-0400 m31100| 2015-07-09T14:15:50.832-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_251 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.840-0400 m31100| 2015-07-09T14:15:50.840-0400 I COMMAND [conn186] command db61.map_reduce_reduce_nonatomic command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.841-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.841-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.841-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.841-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.841-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.843-0400 m31100| }, out: { reduce: "map_reduce_reduce_nonatomic", nonAtomic: true } }, inputDB: "db61", shardedOutputCollection: "tmp.mrs.coll61_1436465749_78", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll61_1436465749_78", timeMillis: 638, counts: { input: 983, emit: 983, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465750000|138, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll61_1436465749_78", timeMillis: 176, counts: { input: 1017, emit: 1017, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465750000|42, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 983, emit: 983, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1017, emit: 1017, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:239 locks:{ Global: { acquireCount: { r: 95, w: 67, W: 20 }, acquireWaitCount: { r: 3, w: 12, W: 20 }, timeAcquiringMicros: { r: 5085, w: 47092, W: 44805 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 8, W: 3 }, timeAcquiringMicros: { w: 39633, W: 26140 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 253ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.843-0400 m31100| 2015-07-09T14:15:50.840-0400 I COMMAND [conn39] CMD: drop db61.tmp.mrs.coll61_1436465749_78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.843-0400 m31101| 2015-07-09T14:15:50.841-0400 I COMMAND [repl writer worker 11] CMD: drop db61.tmp.mr.coll61_251 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.845-0400 m31102| 2015-07-09T14:15:50.845-0400 I COMMAND [repl writer worker 13] CMD: drop db61.tmp.mr.coll61_251 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.846-0400 m31200| 2015-07-09T14:15:50.846-0400 I COMMAND [conn34] CMD: drop db61.tmp.mrs.coll61_1436465749_78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.848-0400 m31102| 2015-07-09T14:15:50.847-0400 I COMMAND [repl writer worker 9] CMD: drop db61.tmp.mrs.coll61_1436465749_78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.851-0400 m31202| 2015-07-09T14:15:50.850-0400 I COMMAND [repl writer worker 15] CMD: drop db61.tmp.mrs.coll61_1436465749_78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.851-0400 m31201| 2015-07-09T14:15:50.850-0400 I COMMAND [repl writer worker 15] CMD: drop db61.tmp.mrs.coll61_1436465749_78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.893-0400 m31200| 2015-07-09T14:15:50.892-0400 I COMMAND [conn35] CMD: drop db61.tmp.mrs.coll61_1436465750_69 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.902-0400 m31200| 2015-07-09T14:15:50.902-0400 I COMMAND [conn35] CMD: drop db61.tmp.mr.coll61_195 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.902-0400 m31200| 2015-07-09T14:15:50.902-0400 I COMMAND [conn35] CMD: drop db61.tmp.mr.coll61_195 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.904-0400 m31200| 2015-07-09T14:15:50.903-0400 I COMMAND [conn35] CMD: drop db61.tmp.mr.coll61_195 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.905-0400 m31200| 2015-07-09T14:15:50.903-0400 I COMMAND [conn35] command db61.tmp.mrs.coll61_1436465750_69 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.905-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.905-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.905-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.906-0400 m31200| values...., out: "tmp.mrs.coll61_1436465750_69", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 130ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.919-0400 m31100| 2015-07-09T14:15:50.919-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_252 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.921-0400 m31100| 2015-07-09T14:15:50.919-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_253 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.924-0400 m31101| 2015-07-09T14:15:50.923-0400 I COMMAND [repl writer worker 1] CMD: drop db61.tmp.mrs.coll61_1436465749_78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.927-0400 m31100| 2015-07-09T14:15:50.927-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_252 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.929-0400 m31100| 2015-07-09T14:15:50.929-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_253 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.929-0400 m31102| 2015-07-09T14:15:50.929-0400 I COMMAND [repl writer worker 1] CMD: drop db61.tmp.mr.coll61_252 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.931-0400 m31200| 2015-07-09T14:15:50.931-0400 I COMMAND [conn36] CMD: drop db61.tmp.mr.coll61_196 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.932-0400 m31102| 2015-07-09T14:15:50.932-0400 I COMMAND [repl writer worker 9] CMD: drop db61.tmp.mr.coll61_253 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.933-0400 m31100| 2015-07-09T14:15:50.933-0400 I COMMAND [conn45] command db61.map_reduce_reduce_nonatomic command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.934-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.934-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.935-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.935-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.936-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.937-0400 m31100| }, out: { reduce: "map_reduce_reduce_nonatomic", nonAtomic: true } }, inputDB: "db61", shardedOutputCollection: "tmp.mrs.coll61_1436465750_68", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll61_1436465750_68", timeMillis: 486, counts: { input: 983, emit: 983, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465750000|161, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll61_1436465750_68", timeMillis: 187, counts: { input: 1017, emit: 1017, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465750000|67, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 983, emit: 983, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1017, emit: 1017, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:239 locks:{ Global: { acquireCount: { r: 95, w: 67, W: 20 }, acquireWaitCount: { r: 2, w: 18, W: 20 }, timeAcquiringMicros: { r: 18421, w: 71887, W: 53551 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { r: 3, w: 5, W: 4 }, timeAcquiringMicros: { r: 28634, w: 18808, W: 10868 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 293ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.938-0400 m31100| 2015-07-09T14:15:50.934-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_255 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.938-0400 m31101| 2015-07-09T14:15:50.934-0400 I COMMAND [repl writer worker 7] CMD: drop db61.tmp.mr.coll61_252 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.938-0400 m31100| 2015-07-09T14:15:50.934-0400 I COMMAND [conn49] command db61.map_reduce_reduce_nonatomic command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.938-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.938-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.938-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.939-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.939-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.940-0400 m31100| }, out: { reduce: "map_reduce_reduce_nonatomic", nonAtomic: true } }, inputDB: "db61", shardedOutputCollection: "tmp.mrs.coll61_1436465750_79", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll61_1436465750_79", timeMillis: 494, counts: { input: 983, emit: 983, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465750000|177, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll61_1436465750_79", timeMillis: 192, counts: { input: 1017, emit: 1017, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465750000|88, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 983, emit: 983, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1017, emit: 1017, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:239 locks:{ Global: { acquireCount: { r: 95, w: 67, W: 20 }, acquireWaitCount: { r: 2, w: 19, W: 20 }, timeAcquiringMicros: { r: 14063, w: 70240, W: 37088 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { r: 1, w: 6, W: 4 }, timeAcquiringMicros: { r: 11668, w: 31318, W: 29248 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 277ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.941-0400 m31100| 2015-07-09T14:15:50.934-0400 I COMMAND [conn38] CMD: drop db61.tmp.mrs.coll61_1436465750_68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.941-0400 m31100| 2015-07-09T14:15:50.936-0400 I COMMAND [conn39] CMD: drop db61.tmp.mrs.coll61_1436465750_79 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.941-0400 m31101| 2015-07-09T14:15:50.939-0400 I COMMAND [repl writer worker 9] CMD: drop db61.tmp.mr.coll61_253 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.942-0400 m31200| 2015-07-09T14:15:50.940-0400 I COMMAND [conn34] CMD: drop db61.tmp.mrs.coll61_1436465750_79 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.943-0400 m31102| 2015-07-09T14:15:50.943-0400 I COMMAND [repl writer worker 13] CMD: drop db61.tmp.mrs.coll61_1436465750_79 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.945-0400 m31101| 2015-07-09T14:15:50.944-0400 I COMMAND [repl writer worker 3] CMD: drop db61.tmp.mrs.coll61_1436465750_79 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.945-0400 m31202| 2015-07-09T14:15:50.945-0400 I COMMAND [repl writer worker 15] CMD: drop db61.tmp.mrs.coll61_1436465750_79 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.946-0400 m31201| 2015-07-09T14:15:50.946-0400 I COMMAND [repl writer worker 15] CMD: drop db61.tmp.mrs.coll61_1436465750_79 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.949-0400 m31200| 2015-07-09T14:15:50.948-0400 I COMMAND [conn63] CMD: drop db61.tmp.mrs.coll61_1436465750_68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.955-0400 m31100| 2015-07-09T14:15:50.955-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_256 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.956-0400 m31102| 2015-07-09T14:15:50.955-0400 I COMMAND [repl writer worker 4] CMD: drop db61.tmp.mrs.coll61_1436465750_68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.956-0400 m31101| 2015-07-09T14:15:50.956-0400 I COMMAND [repl writer worker 0] CMD: drop db61.tmp.mrs.coll61_1436465750_68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.960-0400 m31200| 2015-07-09T14:15:50.959-0400 I COMMAND [conn29] CMD: drop db61.tmp.mr.coll61_197 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.974-0400 m31202| 2015-07-09T14:15:50.973-0400 I COMMAND [repl writer worker 12] CMD: drop db61.tmp.mrs.coll61_1436465750_68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.974-0400 m31201| 2015-07-09T14:15:50.974-0400 I COMMAND [repl writer worker 10] CMD: drop db61.tmp.mrs.coll61_1436465750_68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.988-0400 m31100| 2015-07-09T14:15:50.987-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_257 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:50.996-0400 m31200| 2015-07-09T14:15:50.996-0400 I COMMAND [conn30] CMD: drop db61.tmp.mr.coll61_198 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.130-0400 m31100| 2015-07-09T14:15:51.129-0400 I COMMAND [conn178] CMD: drop db61.tmp.mrs.coll61_1436465750_80 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.138-0400 m31100| 2015-07-09T14:15:51.137-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_249 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.139-0400 m31100| 2015-07-09T14:15:51.137-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_249 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.164-0400 m31100| 2015-07-09T14:15:51.163-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_249 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.181-0400 m31100| 2015-07-09T14:15:51.179-0400 I COMMAND [conn178] command db61.tmp.mrs.coll61_1436465750_80 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.181-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.181-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.181-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.182-0400 m31100| values...., out: "tmp.mrs.coll61_1436465750_80", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 13, w: 18, W: 1 }, timeAcquiringMicros: { r: 66842, w: 143707, W: 46172 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 11, w: 22, R: 9, W: 9 }, timeAcquiringMicros: { r: 108238, w: 124541, R: 25488, W: 47141 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 730ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.182-0400 m31100| 2015-07-09T14:15:51.180-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_258 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.207-0400 m31200| 2015-07-09T14:15:51.207-0400 I COMMAND [conn36] CMD: drop db61.tmp.mrs.coll61_1436465750_81 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.215-0400 m31200| 2015-07-09T14:15:51.211-0400 I COMMAND [conn36] CMD: drop db61.tmp.mr.coll61_196 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.215-0400 m31200| 2015-07-09T14:15:51.211-0400 I COMMAND [conn36] CMD: drop db61.tmp.mr.coll61_196 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.219-0400 m31200| 2015-07-09T14:15:51.219-0400 I COMMAND [conn36] CMD: drop db61.tmp.mr.coll61_196 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.227-0400 m31200| 2015-07-09T14:15:51.226-0400 I COMMAND [conn36] command db61.tmp.mrs.coll61_1436465750_81 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.227-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.228-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.228-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.229-0400 m31200| values...., out: "tmp.mrs.coll61_1436465750_81", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:212 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 37 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 1, w: 9, R: 8, W: 3 }, timeAcquiringMicros: { r: 1197, w: 33404, R: 38362, W: 8139 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 295ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.230-0400 m31200| 2015-07-09T14:15:51.229-0400 I COMMAND [conn29] CMD: drop db61.tmp.mrs.coll61_1436465750_82 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.238-0400 m31200| 2015-07-09T14:15:51.237-0400 I COMMAND [conn29] CMD: drop db61.tmp.mr.coll61_197 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.238-0400 m31200| 2015-07-09T14:15:51.238-0400 I COMMAND [conn29] CMD: drop db61.tmp.mr.coll61_197 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.239-0400 m31200| 2015-07-09T14:15:51.239-0400 I COMMAND [conn29] CMD: drop db61.tmp.mr.coll61_197 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.240-0400 m31200| 2015-07-09T14:15:51.240-0400 I COMMAND [conn29] command db61.tmp.mrs.coll61_1436465750_82 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.241-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.241-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.241-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.243-0400 m31200| values...., out: "tmp.mrs.coll61_1436465750_82", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 4274 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 3, w: 7, R: 7, W: 4 }, timeAcquiringMicros: { r: 16374, w: 33876, R: 37082, W: 15078 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 289ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.256-0400 m31200| 2015-07-09T14:15:51.255-0400 I COMMAND [conn30] CMD: drop db61.tmp.mrs.coll61_1436465750_70 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.267-0400 m31200| 2015-07-09T14:15:51.266-0400 I COMMAND [conn30] CMD: drop db61.tmp.mr.coll61_198 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.267-0400 m31200| 2015-07-09T14:15:51.267-0400 I COMMAND [conn30] CMD: drop db61.tmp.mr.coll61_198 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.270-0400 m31200| 2015-07-09T14:15:51.270-0400 I COMMAND [conn30] CMD: drop db61.tmp.mr.coll61_198 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.271-0400 m31200| 2015-07-09T14:15:51.270-0400 I COMMAND [conn30] command db61.tmp.mrs.coll61_1436465750_70 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.271-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.272-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.272-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.273-0400 m31200| values...., out: "tmp.mrs.coll61_1436465750_70", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:212 locks:{ Global: { acquireCount: { r: 157, w: 74, W: 3 } }, Database: { acquireCount: { r: 26, w: 66, R: 14, W: 11 }, acquireWaitCount: { r: 1, w: 3, R: 7, W: 4 }, timeAcquiringMicros: { r: 8327, w: 27076, R: 17153, W: 32200 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 291ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.296-0400 m31100| 2015-07-09T14:15:51.296-0400 I COMMAND [conn191] CMD: drop db61.tmp.mrs.coll61_1436465750_69 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.305-0400 m31100| 2015-07-09T14:15:51.305-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_254 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.305-0400 m31100| 2015-07-09T14:15:51.305-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_254 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.313-0400 m31100| 2015-07-09T14:15:51.313-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_254 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.315-0400 m31100| 2015-07-09T14:15:51.315-0400 I COMMAND [conn191] command db61.tmp.mrs.coll61_1436465750_69 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.316-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.316-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.316-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.317-0400 m31100| values...., out: "tmp.mrs.coll61_1436465750_69", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 5, w: 6, W: 1 }, timeAcquiringMicros: { r: 71452, w: 26649, W: 742 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 1, w: 17, R: 9, W: 6 }, timeAcquiringMicros: { r: 564, w: 212316, R: 61276, W: 8504 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 542ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.318-0400 m31100| 2015-07-09T14:15:51.316-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_259 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.333-0400 m31100| 2015-07-09T14:15:51.333-0400 I COMMAND [conn186] CMD: drop db61.tmp.mrs.coll61_1436465750_81 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.337-0400 m31100| 2015-07-09T14:15:51.337-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_255 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.338-0400 m31100| 2015-07-09T14:15:51.337-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_255 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.340-0400 m31100| 2015-07-09T14:15:51.339-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_255 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.344-0400 m31100| 2015-07-09T14:15:51.343-0400 I COMMAND [conn186] command db61.tmp.mrs.coll61_1436465750_81 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.345-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.345-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.345-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.346-0400 m31100| values...., out: "tmp.mrs.coll61_1436465750_81", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 3, w: 1, W: 1 }, timeAcquiringMicros: { r: 19253, w: 2831, W: 318 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 6, w: 13, R: 11, W: 9 }, timeAcquiringMicros: { r: 21699, w: 99311, R: 62217, W: 27058 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 413ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.347-0400 m31100| 2015-07-09T14:15:51.347-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_260 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.402-0400 m31100| 2015-07-09T14:15:51.401-0400 I COMMAND [conn49] CMD: drop db61.tmp.mrs.coll61_1436465750_82 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.406-0400 m31100| 2015-07-09T14:15:51.406-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_256 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.407-0400 m31100| 2015-07-09T14:15:51.406-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_256 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.411-0400 m31100| 2015-07-09T14:15:51.411-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_256 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.415-0400 m31100| 2015-07-09T14:15:51.415-0400 I COMMAND [conn49] command db61.tmp.mrs.coll61_1436465750_82 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.415-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.415-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.415-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.416-0400 m31100| values...., out: "tmp.mrs.coll61_1436465750_82", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 12, w: 13, W: 1 }, timeAcquiringMicros: { r: 61534, w: 26864, W: 659 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 6, w: 13, R: 9, W: 7 }, timeAcquiringMicros: { r: 24526, w: 95382, R: 20487, W: 73186 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 464ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.416-0400 m31100| 2015-07-09T14:15:51.416-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_261 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.444-0400 m31100| 2015-07-09T14:15:51.444-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_258 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.448-0400 m31100| 2015-07-09T14:15:51.448-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_258 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.454-0400 m31100| 2015-07-09T14:15:51.453-0400 I COMMAND [conn178] command db61.map_reduce_reduce_nonatomic command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.454-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.454-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.454-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.454-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.455-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.457-0400 m31100| }, out: { reduce: "map_reduce_reduce_nonatomic", nonAtomic: true } }, inputDB: "db61", shardedOutputCollection: "tmp.mrs.coll61_1436465750_80", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll61_1436465750_80", timeMillis: 688, counts: { input: 983, emit: 983, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465751000|8, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll61_1436465750_80", timeMillis: 126, counts: { input: 1017, emit: 1017, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465750000|111, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 983, emit: 983, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1017, emit: 1017, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:239 locks:{ Global: { acquireCount: { r: 95, w: 67, W: 20 }, acquireWaitCount: { r: 1, w: 4, W: 20 }, timeAcquiringMicros: { r: 1371, w: 19406, W: 70374 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { r: 2, w: 9, W: 3 }, timeAcquiringMicros: { r: 1328, w: 60331, W: 28654 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 273ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.457-0400 m31100| 2015-07-09T14:15:51.454-0400 I COMMAND [conn39] CMD: drop db61.tmp.mrs.coll61_1436465750_80 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.458-0400 m31200| 2015-07-09T14:15:51.458-0400 I COMMAND [conn34] CMD: drop db61.tmp.mrs.coll61_1436465750_80 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.462-0400 m31101| 2015-07-09T14:15:51.462-0400 I COMMAND [repl writer worker 11] CMD: drop db61.tmp.mr.coll61_258 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.464-0400 m31202| 2015-07-09T14:15:51.464-0400 I COMMAND [repl writer worker 3] CMD: drop db61.tmp.mrs.coll61_1436465750_80 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.464-0400 m31201| 2015-07-09T14:15:51.464-0400 I COMMAND [repl writer worker 5] CMD: drop db61.tmp.mrs.coll61_1436465750_80 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.466-0400 m31200| 2015-07-09T14:15:51.466-0400 I COMMAND [conn79] CMD: drop db61.tmp.mr.coll61_199 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.467-0400 m31101| 2015-07-09T14:15:51.466-0400 I COMMAND [repl writer worker 13] CMD: drop db61.tmp.mrs.coll61_1436465750_80 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.467-0400 m31102| 2015-07-09T14:15:51.467-0400 I COMMAND [repl writer worker 12] CMD: drop db61.tmp.mr.coll61_258 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.470-0400 m31100| 2015-07-09T14:15:51.468-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_262 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.474-0400 m31102| 2015-07-09T14:15:51.472-0400 I COMMAND [repl writer worker 8] CMD: drop db61.tmp.mrs.coll61_1436465750_80 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.512-0400 m31100| 2015-07-09T14:15:51.512-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_259 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.520-0400 m31100| 2015-07-09T14:15:51.519-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_259 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.520-0400 m31101| 2015-07-09T14:15:51.520-0400 I COMMAND [repl writer worker 9] CMD: drop db61.tmp.mr.coll61_259 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.523-0400 m31102| 2015-07-09T14:15:51.522-0400 I COMMAND [repl writer worker 13] CMD: drop db61.tmp.mr.coll61_259 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.530-0400 m31100| 2015-07-09T14:15:51.529-0400 I COMMAND [conn191] command db61.map_reduce_reduce_nonatomic command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.530-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.530-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.530-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.530-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.530-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.532-0400 m31100| }, out: { reduce: "map_reduce_reduce_nonatomic", nonAtomic: true } }, inputDB: "db61", shardedOutputCollection: "tmp.mrs.coll61_1436465750_69", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll61_1436465750_69", timeMillis: 532, counts: { input: 983, emit: 983, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465751000|57, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll61_1436465750_69", timeMillis: 129, counts: { input: 1017, emit: 1017, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465750000|135, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 983, emit: 983, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1017, emit: 1017, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:239 locks:{ Global: { acquireCount: { r: 95, w: 67, W: 20 }, acquireWaitCount: { r: 4, w: 21, W: 20 }, timeAcquiringMicros: { r: 6881, w: 57944, W: 50333 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 2, W: 3 }, timeAcquiringMicros: { w: 1290, W: 11589 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 213ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.532-0400 m31100| 2015-07-09T14:15:51.530-0400 I COMMAND [conn38] CMD: drop db61.tmp.mrs.coll61_1436465750_69 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.532-0400 m31200| 2015-07-09T14:15:51.532-0400 I COMMAND [conn63] CMD: drop db61.tmp.mrs.coll61_1436465750_69 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.535-0400 m31101| 2015-07-09T14:15:51.535-0400 I COMMAND [repl writer worker 2] CMD: drop db61.tmp.mrs.coll61_1436465750_69 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.535-0400 m31102| 2015-07-09T14:15:51.535-0400 I COMMAND [repl writer worker 4] CMD: drop db61.tmp.mrs.coll61_1436465750_69 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.542-0400 m31202| 2015-07-09T14:15:51.542-0400 I COMMAND [repl writer worker 13] CMD: drop db61.tmp.mrs.coll61_1436465750_69 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.543-0400 m31201| 2015-07-09T14:15:51.542-0400 I COMMAND [repl writer worker 3] CMD: drop db61.tmp.mrs.coll61_1436465750_69 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.551-0400 m31200| 2015-07-09T14:15:51.550-0400 I COMMAND [conn35] CMD: drop db61.tmp.mr.coll61_200 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.559-0400 m31100| 2015-07-09T14:15:51.558-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_263 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.623-0400 m31200| 2015-07-09T14:15:51.623-0400 I COMMAND [conn79] CMD: drop db61.tmp.mrs.coll61_1436465751_83 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.626-0400 m31200| 2015-07-09T14:15:51.626-0400 I COMMAND [conn79] CMD: drop db61.tmp.mr.coll61_199 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.627-0400 m31200| 2015-07-09T14:15:51.626-0400 I COMMAND [conn79] CMD: drop db61.tmp.mr.coll61_199 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.638-0400 m31200| 2015-07-09T14:15:51.638-0400 I COMMAND [conn79] CMD: drop db61.tmp.mr.coll61_199 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.643-0400 m31200| 2015-07-09T14:15:51.642-0400 I COMMAND [conn79] command db61.tmp.mrs.coll61_1436465751_83 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.643-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.643-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.643-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.644-0400 m31200| values...., out: "tmp.mrs.coll61_1436465751_83", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { w: 3, R: 1, W: 3 }, timeAcquiringMicros: { w: 11226, R: 1007, W: 14626 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 176ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.664-0400 m31100| 2015-07-09T14:15:51.663-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_260 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.669-0400 m31100| 2015-07-09T14:15:51.668-0400 I COMMAND [conn45] CMD: drop db61.tmp.mrs.coll61_1436465750_70 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.669-0400 m31200| 2015-07-09T14:15:51.669-0400 I COMMAND [conn35] CMD: drop db61.tmp.mrs.coll61_1436465751_71 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.673-0400 m31100| 2015-07-09T14:15:51.673-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_257 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.673-0400 m31100| 2015-07-09T14:15:51.673-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_257 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.675-0400 m31200| 2015-07-09T14:15:51.675-0400 I COMMAND [conn35] CMD: drop db61.tmp.mr.coll61_200 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.676-0400 m31200| 2015-07-09T14:15:51.675-0400 I COMMAND [conn35] CMD: drop db61.tmp.mr.coll61_200 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.681-0400 m31200| 2015-07-09T14:15:51.680-0400 I COMMAND [conn35] CMD: drop db61.tmp.mr.coll61_200 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.682-0400 m31200| 2015-07-09T14:15:51.680-0400 I COMMAND [conn35] command db61.tmp.mrs.coll61_1436465751_71 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.682-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.682-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.682-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.683-0400 m31200| values...., out: "tmp.mrs.coll61_1436465751_71", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 1237 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { R: 4 }, timeAcquiringMicros: { R: 1448 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 130ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.689-0400 m31100| 2015-07-09T14:15:51.688-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_260 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.690-0400 m31101| 2015-07-09T14:15:51.689-0400 I COMMAND [repl writer worker 9] CMD: drop db61.tmp.mr.coll61_260 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.690-0400 m31102| 2015-07-09T14:15:51.690-0400 I COMMAND [repl writer worker 13] CMD: drop db61.tmp.mr.coll61_260 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.697-0400 m31100| 2015-07-09T14:15:51.697-0400 I COMMAND [conn186] command db61.map_reduce_reduce_nonatomic command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.698-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.698-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.698-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.698-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.698-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.699-0400 m31100| }, out: { reduce: "map_reduce_reduce_nonatomic", nonAtomic: true } }, inputDB: "db61", shardedOutputCollection: "tmp.mrs.coll61_1436465750_81", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll61_1436465750_81", timeMillis: 407, counts: { input: 983, emit: 983, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465751000|82, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll61_1436465750_81", timeMillis: 280, counts: { input: 1017, emit: 1017, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465751000|27, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 983, emit: 983, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1017, emit: 1017, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:239 locks:{ Global: { acquireCount: { r: 95, w: 67, W: 20 }, acquireWaitCount: { r: 4, w: 25, W: 20 }, timeAcquiringMicros: { r: 6650, w: 74045, W: 157498 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 3 }, timeAcquiringMicros: { w: 2536 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 350ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.699-0400 m31100| 2015-07-09T14:15:51.698-0400 I COMMAND [conn39] CMD: drop db61.tmp.mrs.coll61_1436465750_81 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.704-0400 m31100| 2015-07-09T14:15:51.703-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_257 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.706-0400 m31200| 2015-07-09T14:15:51.705-0400 I COMMAND [conn34] CMD: drop db61.tmp.mrs.coll61_1436465750_81 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.708-0400 m31100| 2015-07-09T14:15:51.707-0400 I COMMAND [conn45] command db61.tmp.mrs.coll61_1436465750_70 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.709-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.709-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.709-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.709-0400 m31100| values...., out: "tmp.mrs.coll61_1436465750_70", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:212 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { r: 24, w: 37, W: 1 }, timeAcquiringMicros: { r: 137833, w: 133816, W: 5355 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 5, w: 13, R: 13, W: 8 }, timeAcquiringMicros: { r: 12047, w: 111116, R: 53590, W: 84459 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 728ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.710-0400 m31102| 2015-07-09T14:15:51.708-0400 I COMMAND [repl writer worker 9] CMD: drop db61.tmp.mrs.coll61_1436465750_81 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.710-0400 m31100| 2015-07-09T14:15:51.708-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_264 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.710-0400 m31101| 2015-07-09T14:15:51.708-0400 I COMMAND [repl writer worker 10] CMD: drop db61.tmp.mrs.coll61_1436465750_81 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.714-0400 m31202| 2015-07-09T14:15:51.713-0400 I COMMAND [repl writer worker 10] CMD: drop db61.tmp.mrs.coll61_1436465750_81 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.714-0400 m31201| 2015-07-09T14:15:51.713-0400 I COMMAND [repl writer worker 1] CMD: drop db61.tmp.mrs.coll61_1436465750_81 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.719-0400 m31200| 2015-07-09T14:15:51.719-0400 I COMMAND [conn36] CMD: drop db61.tmp.mr.coll61_201 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.753-0400 m31100| 2015-07-09T14:15:51.753-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_265 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.867-0400 m31100| 2015-07-09T14:15:51.867-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_261 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.871-0400 m31100| 2015-07-09T14:15:51.871-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_261 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.874-0400 m31100| 2015-07-09T14:15:51.872-0400 I COMMAND [conn49] command db61.map_reduce_reduce_nonatomic command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.874-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.874-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.874-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.874-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.874-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.876-0400 m31100| }, out: { reduce: "map_reduce_reduce_nonatomic", nonAtomic: true } }, inputDB: "db61", shardedOutputCollection: "tmp.mrs.coll61_1436465750_82", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll61_1436465750_82", timeMillis: 456, counts: { input: 983, emit: 983, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465751000|132, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll61_1436465750_82", timeMillis: 287, counts: { input: 1017, emit: 1017, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465751000|44, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 983, emit: 983, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1017, emit: 1017, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:239 locks:{ Global: { acquireCount: { r: 95, w: 67, W: 20 }, acquireWaitCount: { r: 3, w: 23, W: 20 }, timeAcquiringMicros: { r: 8247, w: 144333, W: 172386 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 9, W: 4 }, timeAcquiringMicros: { w: 33966, W: 4673 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 456ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.877-0400 m31100| 2015-07-09T14:15:51.872-0400 I COMMAND [conn39] CMD: drop db61.tmp.mrs.coll61_1436465750_82 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.877-0400 m31200| 2015-07-09T14:15:51.874-0400 I COMMAND [conn34] CMD: drop db61.tmp.mrs.coll61_1436465750_82 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.877-0400 m31102| 2015-07-09T14:15:51.875-0400 I COMMAND [repl writer worker 7] CMD: drop db61.tmp.mr.coll61_261 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.879-0400 m31101| 2015-07-09T14:15:51.879-0400 I COMMAND [repl writer worker 10] CMD: drop db61.tmp.mr.coll61_261 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.880-0400 m31201| 2015-07-09T14:15:51.879-0400 I COMMAND [repl writer worker 3] CMD: drop db61.tmp.mrs.coll61_1436465750_82 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.881-0400 m31202| 2015-07-09T14:15:51.880-0400 I COMMAND [repl writer worker 13] CMD: drop db61.tmp.mrs.coll61_1436465750_82 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.882-0400 m31102| 2015-07-09T14:15:51.881-0400 I COMMAND [repl writer worker 10] CMD: drop db61.tmp.mrs.coll61_1436465750_82 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.884-0400 m31200| 2015-07-09T14:15:51.883-0400 I COMMAND [conn29] CMD: drop db61.tmp.mr.coll61_202 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.884-0400 m31101| 2015-07-09T14:15:51.884-0400 I COMMAND [repl writer worker 15] CMD: drop db61.tmp.mrs.coll61_1436465750_82 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.885-0400 m31100| 2015-07-09T14:15:51.885-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_266 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.951-0400 m31200| 2015-07-09T14:15:51.950-0400 I COMMAND [conn36] CMD: drop db61.tmp.mrs.coll61_1436465751_84 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.956-0400 m31200| 2015-07-09T14:15:51.956-0400 I COMMAND [conn36] CMD: drop db61.tmp.mr.coll61_201 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.956-0400 m31200| 2015-07-09T14:15:51.956-0400 I COMMAND [conn36] CMD: drop db61.tmp.mr.coll61_201 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.957-0400 m31200| 2015-07-09T14:15:51.956-0400 I COMMAND [conn36] CMD: drop db61.tmp.mr.coll61_201 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.963-0400 m31200| 2015-07-09T14:15:51.963-0400 I COMMAND [conn36] command db61.tmp.mrs.coll61_1436465751_84 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.964-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.964-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.964-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:51.965-0400 m31200| values...., out: "tmp.mrs.coll61_1436465751_84", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:212 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 6369 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { w: 10, W: 1 }, timeAcquiringMicros: { w: 47363, W: 204 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 243ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.020-0400 m31200| 2015-07-09T14:15:52.020-0400 I COMMAND [conn29] CMD: drop db61.tmp.mrs.coll61_1436465751_85 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.022-0400 m31100| 2015-07-09T14:15:52.021-0400 I COMMAND [conn178] CMD: drop db61.tmp.mrs.coll61_1436465751_83 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.024-0400 m31200| 2015-07-09T14:15:52.023-0400 I COMMAND [conn29] CMD: drop db61.tmp.mr.coll61_202 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.025-0400 m31200| 2015-07-09T14:15:52.024-0400 I COMMAND [conn29] CMD: drop db61.tmp.mr.coll61_202 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.026-0400 m31200| 2015-07-09T14:15:52.025-0400 I COMMAND [conn29] CMD: drop db61.tmp.mr.coll61_202 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.026-0400 m31200| 2015-07-09T14:15:52.026-0400 I COMMAND [conn29] command db61.tmp.mrs.coll61_1436465751_85 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.026-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.027-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.027-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.028-0400 m31200| values...., out: "tmp.mrs.coll61_1436465751_85", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 5671 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { R: 6, W: 5 }, timeAcquiringMicros: { R: 4252, W: 7405 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 144ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.028-0400 m31100| 2015-07-09T14:15:52.026-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_262 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.029-0400 m31100| 2015-07-09T14:15:52.026-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_262 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.034-0400 m31100| 2015-07-09T14:15:52.034-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_262 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.043-0400 m31100| 2015-07-09T14:15:52.042-0400 I COMMAND [conn178] command db61.tmp.mrs.coll61_1436465751_83 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.043-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.043-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.043-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.044-0400 m31100| values...., out: "tmp.mrs.coll61_1436465751_83", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 29, w: 41, W: 1 }, timeAcquiringMicros: { r: 89053, w: 173498, W: 1712 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { w: 16, R: 6, W: 7 }, timeAcquiringMicros: { w: 105884, R: 32915, W: 6627 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 576ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.044-0400 m31100| 2015-07-09T14:15:52.043-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_267 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.071-0400 m31100| 2015-07-09T14:15:52.071-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_264 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.090-0400 m31100| 2015-07-09T14:15:52.090-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_264 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.091-0400 m31102| 2015-07-09T14:15:52.091-0400 I COMMAND [repl writer worker 13] CMD: drop db61.tmp.mr.coll61_264 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.092-0400 m31101| 2015-07-09T14:15:52.092-0400 I COMMAND [repl writer worker 7] CMD: drop db61.tmp.mr.coll61_264 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.096-0400 m31100| 2015-07-09T14:15:52.096-0400 I COMMAND [conn45] command db61.map_reduce_reduce_nonatomic command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.097-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.097-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.097-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.097-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.097-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.098-0400 m31100| }, out: { reduce: "map_reduce_reduce_nonatomic", nonAtomic: true } }, inputDB: "db61", shardedOutputCollection: "tmp.mrs.coll61_1436465750_70", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll61_1436465750_70", timeMillis: 694, counts: { input: 983, emit: 983, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465751000|244, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll61_1436465750_70", timeMillis: 288, counts: { input: 1017, emit: 1017, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465751000|65, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 983, emit: 983, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1017, emit: 1017, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:239 locks:{ Global: { acquireCount: { r: 95, w: 67, W: 20 }, acquireWaitCount: { w: 18, W: 20 }, timeAcquiringMicros: { w: 67846, W: 144333 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { r: 1, w: 6, W: 3 }, timeAcquiringMicros: { r: 15658, w: 29006, W: 24202 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 388ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.098-0400 m31100| 2015-07-09T14:15:52.097-0400 I COMMAND [conn38] CMD: drop db61.tmp.mrs.coll61_1436465750_70 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.098-0400 m31200| 2015-07-09T14:15:52.098-0400 I COMMAND [conn63] CMD: drop db61.tmp.mrs.coll61_1436465750_70 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.101-0400 m31100| 2015-07-09T14:15:52.100-0400 I COMMAND [conn191] CMD: drop db61.tmp.mrs.coll61_1436465751_71 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.101-0400 m31101| 2015-07-09T14:15:52.101-0400 I COMMAND [repl writer worker 15] CMD: drop db61.tmp.mrs.coll61_1436465750_70 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.102-0400 m31102| 2015-07-09T14:15:52.101-0400 I COMMAND [repl writer worker 11] CMD: drop db61.tmp.mrs.coll61_1436465750_70 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.105-0400 m31201| 2015-07-09T14:15:52.104-0400 I COMMAND [repl writer worker 13] CMD: drop db61.tmp.mrs.coll61_1436465750_70 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.105-0400 m31202| 2015-07-09T14:15:52.105-0400 I COMMAND [repl writer worker 7] CMD: drop db61.tmp.mrs.coll61_1436465750_70 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.106-0400 m31100| 2015-07-09T14:15:52.106-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_263 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.107-0400 m31100| 2015-07-09T14:15:52.106-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_263 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.108-0400 m31100| 2015-07-09T14:15:52.108-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_263 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.110-0400 m31100| 2015-07-09T14:15:52.109-0400 I COMMAND [conn191] command db61.tmp.mrs.coll61_1436465751_71 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.110-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.110-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.110-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.111-0400 m31100| values...., out: "tmp.mrs.coll61_1436465751_71", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 29, w: 33, W: 1 }, timeAcquiringMicros: { r: 118740, w: 122001, W: 50 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 10, w: 12, R: 7, W: 6 }, timeAcquiringMicros: { r: 58661, w: 46335, R: 18249, W: 6117 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 559ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.111-0400 m31200| 2015-07-09T14:15:52.111-0400 I COMMAND [conn30] CMD: drop db61.tmp.mr.coll61_203 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.112-0400 m31100| 2015-07-09T14:15:52.112-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_269 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.113-0400 m31100| 2015-07-09T14:15:52.113-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_268 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.226-0400 m31100| 2015-07-09T14:15:52.226-0400 I COMMAND [conn186] CMD: drop db61.tmp.mrs.coll61_1436465751_84 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.230-0400 m31100| 2015-07-09T14:15:52.230-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_265 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.230-0400 m31100| 2015-07-09T14:15:52.230-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_265 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.240-0400 m31100| 2015-07-09T14:15:52.239-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_265 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.245-0400 m31100| 2015-07-09T14:15:52.245-0400 I COMMAND [conn186] command db61.tmp.mrs.coll61_1436465751_84 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.246-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.246-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.246-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.247-0400 m31100| values...., out: "tmp.mrs.coll61_1436465751_84", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 18, w: 32, W: 1 }, timeAcquiringMicros: { r: 69228, w: 82240, W: 307 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 10, w: 17, R: 6, W: 3 }, timeAcquiringMicros: { r: 37823, w: 126716, R: 5920, W: 1186 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 525ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.248-0400 m31100| 2015-07-09T14:15:52.248-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_270 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.259-0400 m31200| 2015-07-09T14:15:52.258-0400 I COMMAND [conn30] CMD: drop db61.tmp.mrs.coll61_1436465752_72 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.266-0400 m31200| 2015-07-09T14:15:52.264-0400 I COMMAND [conn30] CMD: drop db61.tmp.mr.coll61_203 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.266-0400 m31200| 2015-07-09T14:15:52.265-0400 I COMMAND [conn30] CMD: drop db61.tmp.mr.coll61_203 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.267-0400 m31200| 2015-07-09T14:15:52.267-0400 I COMMAND [conn30] CMD: drop db61.tmp.mr.coll61_203 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.268-0400 m31200| 2015-07-09T14:15:52.267-0400 I COMMAND [conn30] command db61.tmp.mrs.coll61_1436465752_72 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.268-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.268-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.268-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.270-0400 m31200| values...., out: "tmp.mrs.coll61_1436465752_72", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 156ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.344-0400 m31100| 2015-07-09T14:15:52.344-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_267 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.351-0400 m31100| 2015-07-09T14:15:52.351-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_267 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.352-0400 m31100| 2015-07-09T14:15:52.352-0400 I COMMAND [conn178] command db61.map_reduce_reduce_nonatomic command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.352-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.353-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.353-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.353-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.353-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.355-0400 m31100| }, out: { reduce: "map_reduce_reduce_nonatomic", nonAtomic: true } }, inputDB: "db61", shardedOutputCollection: "tmp.mrs.coll61_1436465751_83", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll61_1436465751_83", timeMillis: 560, counts: { input: 983, emit: 983, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465752000|22, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll61_1436465751_83", timeMillis: 160, counts: { input: 1017, emit: 1017, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465751000|90, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 983, emit: 983, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1017, emit: 1017, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:239 locks:{ Global: { acquireCount: { r: 95, w: 67, W: 20 }, acquireWaitCount: { r: 1, w: 6, W: 20 }, timeAcquiringMicros: { r: 1117, w: 14576, W: 91779 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 16, W: 3 }, timeAcquiringMicros: { w: 100324, W: 1649 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 308ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.355-0400 m31102| 2015-07-09T14:15:52.353-0400 I COMMAND [repl writer worker 11] CMD: drop db61.tmp.mr.coll61_267 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.355-0400 m31101| 2015-07-09T14:15:52.353-0400 I COMMAND [repl writer worker 13] CMD: drop db61.tmp.mr.coll61_267 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.355-0400 m31100| 2015-07-09T14:15:52.353-0400 I COMMAND [conn39] CMD: drop db61.tmp.mrs.coll61_1436465751_83 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.357-0400 m31200| 2015-07-09T14:15:52.357-0400 I COMMAND [conn34] CMD: drop db61.tmp.mrs.coll61_1436465751_83 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.361-0400 m31201| 2015-07-09T14:15:52.360-0400 I COMMAND [repl writer worker 9] CMD: drop db61.tmp.mrs.coll61_1436465751_83 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.362-0400 m31202| 2015-07-09T14:15:52.361-0400 I COMMAND [repl writer worker 1] CMD: drop db61.tmp.mrs.coll61_1436465751_83 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.362-0400 m31101| 2015-07-09T14:15:52.361-0400 I COMMAND [repl writer worker 10] CMD: drop db61.tmp.mrs.coll61_1436465751_83 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.362-0400 m31102| 2015-07-09T14:15:52.361-0400 I COMMAND [repl writer worker 4] CMD: drop db61.tmp.mrs.coll61_1436465751_83 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.364-0400 m31200| 2015-07-09T14:15:52.363-0400 I COMMAND [conn79] CMD: drop db61.tmp.mr.coll61_204 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.369-0400 m31100| 2015-07-09T14:15:52.369-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_271 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.469-0400 m31100| 2015-07-09T14:15:52.468-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_269 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.478-0400 m31100| 2015-07-09T14:15:52.477-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_269 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.483-0400 m31102| 2015-07-09T14:15:52.483-0400 I COMMAND [repl writer worker 8] CMD: drop db61.tmp.mr.coll61_269 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.483-0400 m31101| 2015-07-09T14:15:52.483-0400 I COMMAND [repl writer worker 5] CMD: drop db61.tmp.mr.coll61_269 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.485-0400 m31100| 2015-07-09T14:15:52.483-0400 I COMMAND [conn191] command db61.map_reduce_reduce_nonatomic command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.485-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.486-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.486-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.486-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.486-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.487-0400 m31100| }, out: { reduce: "map_reduce_reduce_nonatomic", nonAtomic: true } }, inputDB: "db61", shardedOutputCollection: "tmp.mrs.coll61_1436465751_71", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll61_1436465751_71", timeMillis: 556, counts: { input: 983, emit: 983, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465752000|51, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll61_1436465751_71", timeMillis: 125, counts: { input: 1017, emit: 1017, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465751000|111, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 983, emit: 983, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1017, emit: 1017, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:239 locks:{ Global: { acquireCount: { r: 95, w: 67, W: 20 }, acquireWaitCount: { r: 4, w: 18, W: 20 }, timeAcquiringMicros: { r: 9355, w: 72011, W: 97548 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 7, W: 4 }, timeAcquiringMicros: { w: 28263, W: 27701 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 371ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.488-0400 m31100| 2015-07-09T14:15:52.485-0400 I COMMAND [conn38] CMD: drop db61.tmp.mrs.coll61_1436465751_71 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.493-0400 m31200| 2015-07-09T14:15:52.493-0400 I COMMAND [conn63] CMD: drop db61.tmp.mrs.coll61_1436465751_71 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.497-0400 m31101| 2015-07-09T14:15:52.496-0400 I COMMAND [repl writer worker 12] CMD: drop db61.tmp.mrs.coll61_1436465751_71 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.500-0400 m31102| 2015-07-09T14:15:52.497-0400 I COMMAND [repl writer worker 13] CMD: drop db61.tmp.mrs.coll61_1436465751_71 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.508-0400 m31201| 2015-07-09T14:15:52.508-0400 I COMMAND [repl writer worker 6] CMD: drop db61.tmp.mrs.coll61_1436465751_71 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.509-0400 m31202| 2015-07-09T14:15:52.508-0400 I COMMAND [repl writer worker 8] CMD: drop db61.tmp.mrs.coll61_1436465751_71 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.511-0400 m31200| 2015-07-09T14:15:52.510-0400 I COMMAND [conn35] CMD: drop db61.tmp.mr.coll61_205 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.521-0400 m31100| 2015-07-09T14:15:52.519-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_272 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.568-0400 m31200| 2015-07-09T14:15:52.568-0400 I COMMAND [conn79] CMD: drop db61.tmp.mrs.coll61_1436465752_86 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.570-0400 m31100| 2015-07-09T14:15:52.570-0400 I COMMAND [conn49] CMD: drop db61.tmp.mrs.coll61_1436465751_85 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.573-0400 m31200| 2015-07-09T14:15:52.573-0400 I COMMAND [conn79] CMD: drop db61.tmp.mr.coll61_204 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.574-0400 m31200| 2015-07-09T14:15:52.573-0400 I COMMAND [conn79] CMD: drop db61.tmp.mr.coll61_204 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.581-0400 m31100| 2015-07-09T14:15:52.581-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_266 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.581-0400 m31100| 2015-07-09T14:15:52.581-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_266 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.591-0400 m31200| 2015-07-09T14:15:52.591-0400 I COMMAND [conn79] CMD: drop db61.tmp.mr.coll61_204 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.593-0400 m31100| 2015-07-09T14:15:52.592-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_266 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.597-0400 m31200| 2015-07-09T14:15:52.595-0400 I COMMAND [conn79] command db61.tmp.mrs.coll61_1436465752_86 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.597-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.597-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.597-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.597-0400 m31200| values...., out: "tmp.mrs.coll61_1436465752_86", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:212 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { w: 9, R: 1, W: 3 }, timeAcquiringMicros: { w: 36591, R: 1397, W: 16949 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 232ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.607-0400 m31100| 2015-07-09T14:15:52.606-0400 I COMMAND [conn49] command db61.tmp.mrs.coll61_1436465751_85 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.607-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.607-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.608-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.608-0400 m31100| values...., out: "tmp.mrs.coll61_1436465751_85", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 31, w: 43, W: 1 }, timeAcquiringMicros: { r: 82733, w: 203403, W: 20169 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 3, w: 19, R: 11, W: 7 }, timeAcquiringMicros: { r: 25689, w: 141296, R: 23041, W: 17660 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 725ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.608-0400 m31100| 2015-07-09T14:15:52.607-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_273 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.623-0400 m31200| 2015-07-09T14:15:52.622-0400 I COMMAND [conn35] CMD: drop db61.tmp.mrs.coll61_1436465752_73 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.629-0400 m31200| 2015-07-09T14:15:52.629-0400 I COMMAND [conn35] CMD: drop db61.tmp.mr.coll61_205 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.629-0400 m31200| 2015-07-09T14:15:52.629-0400 I COMMAND [conn35] CMD: drop db61.tmp.mr.coll61_205 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.632-0400 m31200| 2015-07-09T14:15:52.631-0400 I COMMAND [conn35] CMD: drop db61.tmp.mr.coll61_205 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.632-0400 m31200| 2015-07-09T14:15:52.631-0400 I COMMAND [conn35] command db61.tmp.mrs.coll61_1436465752_73 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.632-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.632-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.632-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.633-0400 m31200| values...., out: "tmp.mrs.coll61_1436465752_73", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 4726 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { R: 8, W: 2 }, timeAcquiringMicros: { R: 8389, W: 860 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 120ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.655-0400 m31100| 2015-07-09T14:15:52.654-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_270 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.657-0400 m31100| 2015-07-09T14:15:52.657-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_270 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.659-0400 m31101| 2015-07-09T14:15:52.658-0400 I COMMAND [repl writer worker 7] CMD: drop db61.tmp.mr.coll61_270 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.659-0400 m31102| 2015-07-09T14:15:52.659-0400 I COMMAND [repl writer worker 4] CMD: drop db61.tmp.mr.coll61_270 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.660-0400 m31100| 2015-07-09T14:15:52.659-0400 I COMMAND [conn186] command db61.map_reduce_reduce_nonatomic command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.660-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.660-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.660-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.660-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.661-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.661-0400 m31100| }, out: { reduce: "map_reduce_reduce_nonatomic", nonAtomic: true } }, inputDB: "db61", shardedOutputCollection: "tmp.mrs.coll61_1436465751_84", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll61_1436465751_84", timeMillis: 511, counts: { input: 983, emit: 983, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465752000|95, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll61_1436465751_84", timeMillis: 236, counts: { input: 1017, emit: 1017, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465751000|136, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 983, emit: 983, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1017, emit: 1017, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:239 locks:{ Global: { acquireCount: { r: 95, w: 67, W: 20 }, acquireWaitCount: { r: 3, w: 23, W: 20 }, timeAcquiringMicros: { r: 8860, w: 93619, W: 178023 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 3, W: 4 }, timeAcquiringMicros: { w: 17804, W: 3981 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 411ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.662-0400 m31100| 2015-07-09T14:15:52.659-0400 I COMMAND [conn39] CMD: drop db61.tmp.mrs.coll61_1436465751_84 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.666-0400 m31200| 2015-07-09T14:15:52.665-0400 I COMMAND [conn34] CMD: drop db61.tmp.mrs.coll61_1436465751_84 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.667-0400 m31102| 2015-07-09T14:15:52.667-0400 I COMMAND [repl writer worker 9] CMD: drop db61.tmp.mrs.coll61_1436465751_84 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.667-0400 m31101| 2015-07-09T14:15:52.667-0400 I COMMAND [repl writer worker 3] CMD: drop db61.tmp.mrs.coll61_1436465751_84 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.669-0400 m31201| 2015-07-09T14:15:52.669-0400 I COMMAND [repl writer worker 15] CMD: drop db61.tmp.mrs.coll61_1436465751_84 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.670-0400 m31202| 2015-07-09T14:15:52.669-0400 I COMMAND [repl writer worker 0] CMD: drop db61.tmp.mrs.coll61_1436465751_84 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.674-0400 m31200| 2015-07-09T14:15:52.674-0400 I COMMAND [conn36] CMD: drop db61.tmp.mr.coll61_206 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.683-0400 m31100| 2015-07-09T14:15:52.682-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_274 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.715-0400 m31100| 2015-07-09T14:15:52.713-0400 I COMMAND [conn45] CMD: drop db61.tmp.mrs.coll61_1436465752_72 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.722-0400 m31100| 2015-07-09T14:15:52.721-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_268 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.722-0400 m31100| 2015-07-09T14:15:52.721-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_268 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.752-0400 m31100| 2015-07-09T14:15:52.752-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_268 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.769-0400 m31100| 2015-07-09T14:15:52.766-0400 I COMMAND [conn45] command db61.tmp.mrs.coll61_1436465752_72 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.769-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.769-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.769-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.769-0400 m31100| values...., out: "tmp.mrs.coll61_1436465752_72", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 19, w: 33, W: 1 }, timeAcquiringMicros: { r: 83048, w: 138338, W: 187 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 10, w: 19, R: 8, W: 9 }, timeAcquiringMicros: { r: 61879, w: 72205, R: 27073, W: 68555 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 654ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.770-0400 m31100| 2015-07-09T14:15:52.767-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_275 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.816-0400 m31200| 2015-07-09T14:15:52.816-0400 I COMMAND [conn36] CMD: drop db61.tmp.mrs.coll61_1436465752_87 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.820-0400 m31200| 2015-07-09T14:15:52.819-0400 I COMMAND [conn36] CMD: drop db61.tmp.mr.coll61_206 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.820-0400 m31200| 2015-07-09T14:15:52.819-0400 I COMMAND [conn36] CMD: drop db61.tmp.mr.coll61_206 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.821-0400 m31200| 2015-07-09T14:15:52.821-0400 I COMMAND [conn36] CMD: drop db61.tmp.mr.coll61_206 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.821-0400 m31200| 2015-07-09T14:15:52.821-0400 I COMMAND [conn36] command db61.tmp.mrs.coll61_1436465752_87 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.821-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.821-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.821-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.822-0400 m31200| values...., out: "tmp.mrs.coll61_1436465752_87", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 147ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.836-0400 m31100| 2015-07-09T14:15:52.836-0400 I COMMAND [conn178] CMD: drop db61.tmp.mrs.coll61_1436465752_86 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.843-0400 m31100| 2015-07-09T14:15:52.842-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_271 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.843-0400 m31100| 2015-07-09T14:15:52.843-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_271 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.850-0400 m31100| 2015-07-09T14:15:52.849-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_271 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.855-0400 m31100| 2015-07-09T14:15:52.854-0400 I COMMAND [conn178] command db61.tmp.mrs.coll61_1436465752_86 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.855-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.855-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.855-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.856-0400 m31100| values...., out: "tmp.mrs.coll61_1436465752_86", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 20, w: 28, W: 1 }, timeAcquiringMicros: { r: 57269, w: 108593, W: 214 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 7, w: 20, R: 7, W: 7 }, timeAcquiringMicros: { r: 26585, w: 122092, R: 6431, W: 6597 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 490ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.867-0400 m31100| 2015-07-09T14:15:52.867-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_276 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.912-0400 m31100| 2015-07-09T14:15:52.912-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_273 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.914-0400 m31100| 2015-07-09T14:15:52.914-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_273 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.916-0400 m31100| 2015-07-09T14:15:52.915-0400 I COMMAND [conn49] command db61.map_reduce_reduce_nonatomic command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.916-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.916-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.916-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.916-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.916-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.918-0400 m31100| }, out: { reduce: "map_reduce_reduce_nonatomic", nonAtomic: true } }, inputDB: "db61", shardedOutputCollection: "tmp.mrs.coll61_1436465751_85", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll61_1436465751_85", timeMillis: 700, counts: { input: 983, emit: 983, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465752000|206, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll61_1436465751_85", timeMillis: 142, counts: { input: 1017, emit: 1017, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465752000|21, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 983, emit: 983, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1017, emit: 1017, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:239 locks:{ Global: { acquireCount: { r: 95, w: 67, W: 20 }, acquireWaitCount: { r: 1, w: 11, W: 20 }, timeAcquiringMicros: { r: 1914, w: 26817, W: 86595 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { r: 2, w: 15, W: 2 }, timeAcquiringMicros: { r: 13501, w: 90877, W: 70 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 308ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.918-0400 m31100| 2015-07-09T14:15:52.916-0400 I COMMAND [conn39] CMD: drop db61.tmp.mrs.coll61_1436465751_85 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.918-0400 m31102| 2015-07-09T14:15:52.917-0400 I COMMAND [repl writer worker 11] CMD: drop db61.tmp.mr.coll61_273 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.919-0400 m31101| 2015-07-09T14:15:52.918-0400 I COMMAND [repl writer worker 0] CMD: drop db61.tmp.mr.coll61_273 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.920-0400 m31200| 2015-07-09T14:15:52.920-0400 I COMMAND [conn34] CMD: drop db61.tmp.mrs.coll61_1436465751_85 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.922-0400 m31101| 2015-07-09T14:15:52.922-0400 I COMMAND [repl writer worker 2] CMD: drop db61.tmp.mrs.coll61_1436465751_85 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.923-0400 m31102| 2015-07-09T14:15:52.923-0400 I COMMAND [repl writer worker 4] CMD: drop db61.tmp.mrs.coll61_1436465751_85 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.924-0400 m31202| 2015-07-09T14:15:52.924-0400 I COMMAND [repl writer worker 8] CMD: drop db61.tmp.mrs.coll61_1436465751_85 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.924-0400 m31201| 2015-07-09T14:15:52.924-0400 I COMMAND [repl writer worker 10] CMD: drop db61.tmp.mrs.coll61_1436465751_85 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.926-0400 m31200| 2015-07-09T14:15:52.926-0400 I COMMAND [conn29] CMD: drop db61.tmp.mr.coll61_207 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.931-0400 m31100| 2015-07-09T14:15:52.931-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_277 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.987-0400 m31100| 2015-07-09T14:15:52.987-0400 I COMMAND [conn191] CMD: drop db61.tmp.mrs.coll61_1436465752_73 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.993-0400 m31100| 2015-07-09T14:15:52.992-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_272 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:52.993-0400 m31100| 2015-07-09T14:15:52.993-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_272 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.022-0400 m31100| 2015-07-09T14:15:53.022-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_275 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.031-0400 m31100| 2015-07-09T14:15:53.031-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_272 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.036-0400 m31100| 2015-07-09T14:15:53.036-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_275 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.045-0400 m31102| 2015-07-09T14:15:53.045-0400 I COMMAND [repl writer worker 4] CMD: drop db61.tmp.mr.coll61_275 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.046-0400 m31100| 2015-07-09T14:15:53.045-0400 I COMMAND [conn45] command db61.map_reduce_reduce_nonatomic command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.046-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.046-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.046-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.046-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.047-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.048-0400 m31100| }, out: { reduce: "map_reduce_reduce_nonatomic", nonAtomic: true } }, inputDB: "db61", shardedOutputCollection: "tmp.mrs.coll61_1436465752_72", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll61_1436465752_72", timeMillis: 610, counts: { input: 983, emit: 983, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465752000|251, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll61_1436465752_72", timeMillis: 154, counts: { input: 1017, emit: 1017, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465752000|44, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 983, emit: 983, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1017, emit: 1017, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:239 locks:{ Global: { acquireCount: { r: 95, w: 67, W: 20 }, acquireWaitCount: { r: 4, w: 22, W: 20 }, timeAcquiringMicros: { r: 5452, w: 77039, W: 84240 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 4, W: 4 }, timeAcquiringMicros: { w: 21269, W: 3965 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 277ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.048-0400 m31101| 2015-07-09T14:15:53.045-0400 I COMMAND [repl writer worker 14] CMD: drop db61.tmp.mr.coll61_275 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.048-0400 m31100| 2015-07-09T14:15:53.046-0400 I COMMAND [conn38] CMD: drop db61.tmp.mrs.coll61_1436465752_72 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.049-0400 m31200| 2015-07-09T14:15:53.049-0400 I COMMAND [conn63] CMD: drop db61.tmp.mrs.coll61_1436465752_72 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.050-0400 m31100| 2015-07-09T14:15:53.049-0400 I COMMAND [conn191] command db61.tmp.mrs.coll61_1436465752_73 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.050-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.050-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.050-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.050-0400 m31100| values...., out: "tmp.mrs.coll61_1436465752_73", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 31, w: 37, W: 1 }, timeAcquiringMicros: { r: 96308, w: 162160, W: 7161 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 3, w: 11, R: 11, W: 5 }, timeAcquiringMicros: { r: 20271, w: 70523, R: 20190, W: 2545 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 538ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.051-0400 m31100| 2015-07-09T14:15:53.051-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_278 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.053-0400 m31101| 2015-07-09T14:15:53.053-0400 I COMMAND [repl writer worker 9] CMD: drop db61.tmp.mrs.coll61_1436465752_72 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.053-0400 m31102| 2015-07-09T14:15:53.053-0400 I COMMAND [repl writer worker 8] CMD: drop db61.tmp.mrs.coll61_1436465752_72 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.053-0400 m31202| 2015-07-09T14:15:53.053-0400 I COMMAND [repl writer worker 2] CMD: drop db61.tmp.mrs.coll61_1436465752_72 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.054-0400 m31201| 2015-07-09T14:15:53.054-0400 I COMMAND [repl writer worker 12] CMD: drop db61.tmp.mrs.coll61_1436465752_72 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.058-0400 m31200| 2015-07-09T14:15:53.058-0400 I COMMAND [conn29] CMD: drop db61.tmp.mrs.coll61_1436465752_88 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.062-0400 m31200| 2015-07-09T14:15:53.061-0400 I COMMAND [conn29] CMD: drop db61.tmp.mr.coll61_207 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.062-0400 m31200| 2015-07-09T14:15:53.062-0400 I COMMAND [conn29] CMD: drop db61.tmp.mr.coll61_207 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.062-0400 m31200| 2015-07-09T14:15:53.062-0400 I COMMAND [conn30] CMD: drop db61.tmp.mr.coll61_208 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.063-0400 m31200| 2015-07-09T14:15:53.063-0400 I COMMAND [conn29] CMD: drop db61.tmp.mr.coll61_207 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.070-0400 m31100| 2015-07-09T14:15:53.069-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_279 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.090-0400 m31200| 2015-07-09T14:15:53.090-0400 I COMMAND [conn29] command db61.tmp.mrs.coll61_1436465752_88 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.091-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.091-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.091-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.094-0400 m31200| values...., out: "tmp.mrs.coll61_1436465752_88", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { w: 1, W: 3 }, timeAcquiringMicros: { w: 2613, W: 27184 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 164ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.178-0400 m31200| 2015-07-09T14:15:53.177-0400 I COMMAND [conn30] CMD: drop db61.tmp.mrs.coll61_1436465753_74 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.184-0400 m31200| 2015-07-09T14:15:53.184-0400 I COMMAND [conn30] CMD: drop db61.tmp.mr.coll61_208 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.185-0400 m31200| 2015-07-09T14:15:53.184-0400 I COMMAND [conn30] CMD: drop db61.tmp.mr.coll61_208 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.186-0400 m31200| 2015-07-09T14:15:53.186-0400 I COMMAND [conn30] CMD: drop db61.tmp.mr.coll61_208 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.186-0400 m31200| 2015-07-09T14:15:53.186-0400 I COMMAND [conn30] command db61.tmp.mrs.coll61_1436465753_74 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.187-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.187-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.187-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.188-0400 m31200| values...., out: "tmp.mrs.coll61_1436465753_74", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 2636 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 40, W: 294 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 126ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.210-0400 m31100| 2015-07-09T14:15:53.209-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_276 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.212-0400 m31100| 2015-07-09T14:15:53.211-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_276 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.213-0400 m31100| 2015-07-09T14:15:53.212-0400 I COMMAND [conn178] command db61.map_reduce_reduce_nonatomic command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.213-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.213-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.213-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.214-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.214-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.215-0400 m31100| }, out: { reduce: "map_reduce_reduce_nonatomic", nonAtomic: true } }, inputDB: "db61", shardedOutputCollection: "tmp.mrs.coll61_1436465752_86", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll61_1436465752_86", timeMillis: 478, counts: { input: 983, emit: 983, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465752000|289, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll61_1436465752_86", timeMillis: 210, counts: { input: 1017, emit: 1017, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465752000|69, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 983, emit: 983, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1017, emit: 1017, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:239 locks:{ Global: { acquireCount: { r: 95, w: 67, W: 20 }, acquireWaitCount: { r: 3, w: 23, W: 20 }, timeAcquiringMicros: { r: 15756, w: 48951, W: 192607 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 3, W: 3 }, timeAcquiringMicros: { w: 2514, W: 1852 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 357ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.215-0400 m31102| 2015-07-09T14:15:53.213-0400 I COMMAND [repl writer worker 5] CMD: drop db61.tmp.mr.coll61_276 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.215-0400 m31100| 2015-07-09T14:15:53.213-0400 I COMMAND [conn39] CMD: drop db61.tmp.mrs.coll61_1436465752_86 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.215-0400 m31200| 2015-07-09T14:15:53.215-0400 I COMMAND [conn34] CMD: drop db61.tmp.mrs.coll61_1436465752_86 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.216-0400 m31101| 2015-07-09T14:15:53.216-0400 I COMMAND [repl writer worker 11] CMD: drop db61.tmp.mr.coll61_276 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.219-0400 m31102| 2015-07-09T14:15:53.218-0400 I COMMAND [repl writer worker 7] CMD: drop db61.tmp.mrs.coll61_1436465752_86 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.219-0400 m31201| 2015-07-09T14:15:53.218-0400 I COMMAND [repl writer worker 4] CMD: drop db61.tmp.mrs.coll61_1436465752_86 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.219-0400 m31202| 2015-07-09T14:15:53.219-0400 I COMMAND [repl writer worker 1] CMD: drop db61.tmp.mrs.coll61_1436465752_86 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.222-0400 m31101| 2015-07-09T14:15:53.222-0400 I COMMAND [repl writer worker 2] CMD: drop db61.tmp.mrs.coll61_1436465752_86 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.223-0400 m31200| 2015-07-09T14:15:53.223-0400 I COMMAND [conn79] CMD: drop db61.tmp.mr.coll61_209 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.229-0400 m31100| 2015-07-09T14:15:53.229-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_280 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.236-0400 m31100| 2015-07-09T14:15:53.235-0400 I COMMAND [conn186] CMD: drop db61.tmp.mrs.coll61_1436465752_87 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.241-0400 m31100| 2015-07-09T14:15:53.240-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_274 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.241-0400 m31100| 2015-07-09T14:15:53.241-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_274 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.242-0400 m31100| 2015-07-09T14:15:53.242-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_274 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.275-0400 m31100| 2015-07-09T14:15:53.274-0400 I COMMAND [conn186] command db61.tmp.mrs.coll61_1436465752_87 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.275-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.275-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.275-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.276-0400 m31100| values...., out: "tmp.mrs.coll61_1436465752_87", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 23, w: 37, W: 1 }, timeAcquiringMicros: { r: 107393, w: 138626, W: 6822 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 3, w: 17, R: 11, W: 8 }, timeAcquiringMicros: { r: 3317, w: 96670, R: 25901, W: 44311 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 600ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.277-0400 m31100| 2015-07-09T14:15:53.277-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_281 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.330-0400 m31200| 2015-07-09T14:15:53.329-0400 I COMMAND [conn79] CMD: drop db61.tmp.mrs.coll61_1436465753_89 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.335-0400 m31200| 2015-07-09T14:15:53.334-0400 I COMMAND [conn79] CMD: drop db61.tmp.mr.coll61_209 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.336-0400 m31200| 2015-07-09T14:15:53.335-0400 I COMMAND [conn79] CMD: drop db61.tmp.mr.coll61_209 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.338-0400 m31200| 2015-07-09T14:15:53.338-0400 I COMMAND [conn79] CMD: drop db61.tmp.mr.coll61_209 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.338-0400 m31200| 2015-07-09T14:15:53.338-0400 I COMMAND [conn79] command db61.tmp.mrs.coll61_1436465753_89 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.339-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.339-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.339-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.339-0400 m31200| values...., out: "tmp.mrs.coll61_1436465753_89", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 115ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.430-0400 m31100| 2015-07-09T14:15:53.429-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_278 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.440-0400 m31100| 2015-07-09T14:15:53.440-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_278 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.441-0400 m31101| 2015-07-09T14:15:53.440-0400 I COMMAND [repl writer worker 11] CMD: drop db61.tmp.mr.coll61_278 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.441-0400 m31102| 2015-07-09T14:15:53.440-0400 I COMMAND [repl writer worker 0] CMD: drop db61.tmp.mr.coll61_278 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.453-0400 m31100| 2015-07-09T14:15:53.453-0400 I COMMAND [conn49] CMD: drop db61.tmp.mrs.coll61_1436465752_88 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.456-0400 m31100| 2015-07-09T14:15:53.455-0400 I COMMAND [conn191] command db61.map_reduce_reduce_nonatomic command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.456-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.457-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.457-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.457-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.457-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.458-0400 m31100| }, out: { reduce: "map_reduce_reduce_nonatomic", nonAtomic: true } }, inputDB: "db61", shardedOutputCollection: "tmp.mrs.coll61_1436465752_73", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll61_1436465752_73", timeMillis: 482, counts: { input: 983, emit: 983, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465752000|376, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll61_1436465752_73", timeMillis: 118, counts: { input: 1017, emit: 1017, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465752000|90, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 983, emit: 983, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1017, emit: 1017, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:239 locks:{ Global: { acquireCount: { r: 95, w: 67, W: 20 }, acquireWaitCount: { r: 1, w: 20, W: 20 }, timeAcquiringMicros: { r: 1748, w: 79766, W: 125379 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { r: 3, w: 10, W: 2 }, timeAcquiringMicros: { r: 29440, w: 53729, W: 10448 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 405ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.458-0400 m31100| 2015-07-09T14:15:53.456-0400 I COMMAND [conn38] CMD: drop db61.tmp.mrs.coll61_1436465752_73 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.458-0400 m31100| 2015-07-09T14:15:53.457-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_277 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.458-0400 m31100| 2015-07-09T14:15:53.457-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_277 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.467-0400 m31200| 2015-07-09T14:15:53.467-0400 I COMMAND [conn63] CMD: drop db61.tmp.mrs.coll61_1436465752_73 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.470-0400 m31102| 2015-07-09T14:15:53.470-0400 I COMMAND [repl writer worker 6] CMD: drop db61.tmp.mrs.coll61_1436465752_73 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.471-0400 m31101| 2015-07-09T14:15:53.470-0400 I COMMAND [repl writer worker 7] CMD: drop db61.tmp.mrs.coll61_1436465752_73 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.471-0400 m31202| 2015-07-09T14:15:53.471-0400 I COMMAND [repl writer worker 14] CMD: drop db61.tmp.mrs.coll61_1436465752_73 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.472-0400 m31100| 2015-07-09T14:15:53.472-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_277 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.472-0400 m31201| 2015-07-09T14:15:53.472-0400 I COMMAND [repl writer worker 13] CMD: drop db61.tmp.mrs.coll61_1436465752_73 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.474-0400 m31100| 2015-07-09T14:15:53.474-0400 I COMMAND [conn49] command db61.tmp.mrs.coll61_1436465752_88 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.475-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.475-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.475-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.476-0400 m31100| values...., out: "tmp.mrs.coll61_1436465752_88", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 35, w: 38, W: 1 }, timeAcquiringMicros: { r: 135809, w: 84827, W: 1178 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 1, w: 18, R: 3, W: 7 }, timeAcquiringMicros: { r: 21177, w: 111252, R: 15608, W: 13254 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 548ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.476-0400 m31200| 2015-07-09T14:15:53.475-0400 I COMMAND [conn35] CMD: drop db61.tmp.mr.coll61_210 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.476-0400 m31100| 2015-07-09T14:15:53.475-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_282 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.504-0400 m31100| 2015-07-09T14:15:53.503-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_283 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.548-0400 m31100| 2015-07-09T14:15:53.547-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_281 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.558-0400 m31100| 2015-07-09T14:15:53.558-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_281 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.561-0400 m31101| 2015-07-09T14:15:53.560-0400 I COMMAND [repl writer worker 6] CMD: drop db61.tmp.mr.coll61_281 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.564-0400 m31102| 2015-07-09T14:15:53.564-0400 I COMMAND [repl writer worker 2] CMD: drop db61.tmp.mr.coll61_281 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.583-0400 m31100| 2015-07-09T14:15:53.582-0400 I COMMAND [conn45] CMD: drop db61.tmp.mrs.coll61_1436465753_74 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.583-0400 m31100| 2015-07-09T14:15:53.582-0400 I COMMAND [conn186] command db61.map_reduce_reduce_nonatomic command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.584-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.584-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.584-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.584-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.585-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.586-0400 m31100| }, out: { reduce: "map_reduce_reduce_nonatomic", nonAtomic: true } }, inputDB: "db61", shardedOutputCollection: "tmp.mrs.coll61_1436465752_87", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll61_1436465752_87", timeMillis: 567, counts: { input: 983, emit: 983, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465753000|68, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll61_1436465752_87", timeMillis: 145, counts: { input: 1017, emit: 1017, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465752000|113, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 983, emit: 983, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1017, emit: 1017, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:239 locks:{ Global: { acquireCount: { r: 95, w: 67, W: 20 }, acquireWaitCount: { r: 3, w: 23, W: 20 }, timeAcquiringMicros: { r: 4482, w: 69136, W: 88467 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 3, W: 3 }, timeAcquiringMicros: { w: 22122, W: 19271 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 306ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.587-0400 m31100| 2015-07-09T14:15:53.583-0400 I COMMAND [conn39] CMD: drop db61.tmp.mrs.coll61_1436465752_87 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.587-0400 m31100| 2015-07-09T14:15:53.585-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_279 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.587-0400 m31100| 2015-07-09T14:15:53.585-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_279 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.592-0400 m31200| 2015-07-09T14:15:53.591-0400 I COMMAND [conn34] CMD: drop db61.tmp.mrs.coll61_1436465752_87 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.593-0400 m31100| 2015-07-09T14:15:53.592-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_279 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.595-0400 m31100| 2015-07-09T14:15:53.594-0400 I COMMAND [conn45] command db61.tmp.mrs.coll61_1436465753_74 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.596-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.596-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.597-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.598-0400 m31100| values...., out: "tmp.mrs.coll61_1436465753_74", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 27, w: 42, W: 1 }, timeAcquiringMicros: { r: 65447, w: 136479, W: 1402 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 5, w: 16, R: 7, W: 6 }, timeAcquiringMicros: { r: 10281, w: 112691, R: 28071, W: 24072 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 535ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.599-0400 m31100| 2015-07-09T14:15:53.596-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_284 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.600-0400 m31101| 2015-07-09T14:15:53.599-0400 I COMMAND [repl writer worker 0] CMD: drop db61.tmp.mrs.coll61_1436465752_87 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.606-0400 m31201| 2015-07-09T14:15:53.606-0400 I COMMAND [repl writer worker 2] CMD: drop db61.tmp.mrs.coll61_1436465752_87 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.608-0400 m31202| 2015-07-09T14:15:53.607-0400 I COMMAND [repl writer worker 7] CMD: drop db61.tmp.mrs.coll61_1436465752_87 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.608-0400 m31102| 2015-07-09T14:15:53.607-0400 I COMMAND [repl writer worker 0] CMD: drop db61.tmp.mrs.coll61_1436465752_87 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.612-0400 m31200| 2015-07-09T14:15:53.612-0400 I COMMAND [conn36] CMD: drop db61.tmp.mr.coll61_211 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.669-0400 m31100| 2015-07-09T14:15:53.669-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_285 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.687-0400 m31100| 2015-07-09T14:15:53.686-0400 I COMMAND [conn178] CMD: drop db61.tmp.mrs.coll61_1436465753_89 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.691-0400 m31100| 2015-07-09T14:15:53.690-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_280 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.691-0400 m31100| 2015-07-09T14:15:53.691-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_280 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.694-0400 m31200| 2015-07-09T14:15:53.693-0400 I COMMAND [conn35] CMD: drop db61.tmp.mrs.coll61_1436465753_75 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.700-0400 m31200| 2015-07-09T14:15:53.700-0400 I COMMAND [conn35] CMD: drop db61.tmp.mr.coll61_210 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.701-0400 m31200| 2015-07-09T14:15:53.700-0400 I COMMAND [conn35] CMD: drop db61.tmp.mr.coll61_210 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.701-0400 m31200| 2015-07-09T14:15:53.701-0400 I COMMAND [conn35] CMD: drop db61.tmp.mr.coll61_210 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.713-0400 m31200| 2015-07-09T14:15:53.712-0400 I COMMAND [conn35] command db61.tmp.mrs.coll61_1436465753_75 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.713-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.713-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.713-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.714-0400 m31200| values...., out: "tmp.mrs.coll61_1436465753_75", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { w: 9, R: 1, W: 2 }, timeAcquiringMicros: { w: 57118, R: 4146, W: 11291 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 238ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.715-0400 m31100| 2015-07-09T14:15:53.715-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_280 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.732-0400 m31100| 2015-07-09T14:15:53.732-0400 I COMMAND [conn178] command db61.tmp.mrs.coll61_1436465753_89 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.733-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.733-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.733-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.733-0400 m31100| values...., out: "tmp.mrs.coll61_1436465753_89", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 14, w: 27, W: 1 }, timeAcquiringMicros: { r: 25403, w: 108691, W: 1334 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 6, w: 15, R: 7, W: 8 }, timeAcquiringMicros: { r: 39733, w: 93438, R: 13271, W: 23490 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 509ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.733-0400 m31100| 2015-07-09T14:15:53.733-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_286 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.752-0400 m31200| 2015-07-09T14:15:53.751-0400 I COMMAND [conn36] CMD: drop db61.tmp.mrs.coll61_1436465753_90 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.759-0400 m31200| 2015-07-09T14:15:53.759-0400 I COMMAND [conn36] CMD: drop db61.tmp.mr.coll61_211 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.759-0400 m31200| 2015-07-09T14:15:53.759-0400 I COMMAND [conn36] CMD: drop db61.tmp.mr.coll61_211 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.760-0400 m31200| 2015-07-09T14:15:53.760-0400 I COMMAND [conn36] CMD: drop db61.tmp.mr.coll61_211 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.761-0400 m31200| 2015-07-09T14:15:53.760-0400 I COMMAND [conn36] command db61.tmp.mrs.coll61_1436465753_90 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.761-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.761-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.762-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.763-0400 m31200| values...., out: "tmp.mrs.coll61_1436465753_90", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 839 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { R: 9, W: 2 }, timeAcquiringMicros: { R: 7682, W: 298 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 148ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.823-0400 m31100| 2015-07-09T14:15:53.823-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_282 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.830-0400 m31100| 2015-07-09T14:15:53.830-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_282 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.831-0400 m31102| 2015-07-09T14:15:53.831-0400 I COMMAND [repl writer worker 0] CMD: drop db61.tmp.mr.coll61_282 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.831-0400 m31101| 2015-07-09T14:15:53.831-0400 I COMMAND [repl writer worker 1] CMD: drop db61.tmp.mr.coll61_282 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.847-0400 m31100| 2015-07-09T14:15:53.846-0400 I COMMAND [conn49] command db61.map_reduce_reduce_nonatomic command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.847-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.847-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.847-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.847-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.847-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.849-0400 m31100| }, out: { reduce: "map_reduce_reduce_nonatomic", nonAtomic: true } }, inputDB: "db61", shardedOutputCollection: "tmp.mrs.coll61_1436465752_88", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll61_1436465752_88", timeMillis: 531, counts: { input: 983, emit: 983, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465753000|150, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll61_1436465752_88", timeMillis: 136, counts: { input: 1017, emit: 1017, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465753000|22, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 983, emit: 983, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1017, emit: 1017, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:239 locks:{ Global: { acquireCount: { r: 95, w: 67, W: 20 }, acquireWaitCount: { r: 1, w: 12, W: 20 }, timeAcquiringMicros: { r: 1221, w: 32524, W: 136993 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 11, W: 3 }, timeAcquiringMicros: { w: 90999, W: 779 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 371ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.849-0400 m31100| 2015-07-09T14:15:53.847-0400 I COMMAND [conn39] CMD: drop db61.tmp.mrs.coll61_1436465752_88 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.852-0400 m31200| 2015-07-09T14:15:53.851-0400 I COMMAND [conn34] CMD: drop db61.tmp.mrs.coll61_1436465752_88 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.855-0400 m31101| 2015-07-09T14:15:53.855-0400 I COMMAND [repl writer worker 10] CMD: drop db61.tmp.mrs.coll61_1436465752_88 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.855-0400 m31202| 2015-07-09T14:15:53.855-0400 I COMMAND [repl writer worker 1] CMD: drop db61.tmp.mrs.coll61_1436465752_88 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.856-0400 m31201| 2015-07-09T14:15:53.855-0400 I COMMAND [repl writer worker 11] CMD: drop db61.tmp.mrs.coll61_1436465752_88 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.856-0400 m31102| 2015-07-09T14:15:53.855-0400 I COMMAND [repl writer worker 12] CMD: drop db61.tmp.mrs.coll61_1436465752_88 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.858-0400 m31200| 2015-07-09T14:15:53.858-0400 I COMMAND [conn29] CMD: drop db61.tmp.mr.coll61_212 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.907-0400 m31100| 2015-07-09T14:15:53.907-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_284 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.910-0400 m31100| 2015-07-09T14:15:53.910-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_287 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.911-0400 m31100| 2015-07-09T14:15:53.911-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_284 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.914-0400 m31101| 2015-07-09T14:15:53.914-0400 I COMMAND [repl writer worker 4] CMD: drop db61.tmp.mr.coll61_284 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.915-0400 m31102| 2015-07-09T14:15:53.914-0400 I COMMAND [repl writer worker 2] CMD: drop db61.tmp.mr.coll61_284 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.923-0400 m31100| 2015-07-09T14:15:53.922-0400 I COMMAND [conn45] command db61.map_reduce_reduce_nonatomic command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.923-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.923-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.923-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.923-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.924-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.925-0400 m31100| }, out: { reduce: "map_reduce_reduce_nonatomic", nonAtomic: true } }, inputDB: "db61", shardedOutputCollection: "tmp.mrs.coll61_1436465753_74", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll61_1436465753_74", timeMillis: 526, counts: { input: 983, emit: 983, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465753000|186, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll61_1436465753_74", timeMillis: 125, counts: { input: 1017, emit: 1017, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465753000|44, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 983, emit: 983, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1017, emit: 1017, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:239 locks:{ Global: { acquireCount: { r: 95, w: 67, W: 20 }, acquireWaitCount: { r: 4, w: 21, W: 20 }, timeAcquiringMicros: { r: 4582, w: 92240, W: 60783 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 8, W: 3 }, timeAcquiringMicros: { w: 56557, W: 16766 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 326ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.925-0400 m31100| 2015-07-09T14:15:53.923-0400 I COMMAND [conn38] CMD: drop db61.tmp.mrs.coll61_1436465753_74 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.927-0400 m31200| 2015-07-09T14:15:53.927-0400 I COMMAND [conn63] CMD: drop db61.tmp.mrs.coll61_1436465753_74 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.929-0400 m31101| 2015-07-09T14:15:53.929-0400 I COMMAND [repl writer worker 11] CMD: drop db61.tmp.mrs.coll61_1436465753_74 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.931-0400 m31102| 2015-07-09T14:15:53.931-0400 I COMMAND [repl writer worker 13] CMD: drop db61.tmp.mrs.coll61_1436465753_74 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.934-0400 m31202| 2015-07-09T14:15:53.934-0400 I COMMAND [repl writer worker 2] CMD: drop db61.tmp.mrs.coll61_1436465753_74 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.935-0400 m31201| 2015-07-09T14:15:53.934-0400 I COMMAND [repl writer worker 10] CMD: drop db61.tmp.mrs.coll61_1436465753_74 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.945-0400 m31200| 2015-07-09T14:15:53.944-0400 I COMMAND [conn30] CMD: drop db61.tmp.mr.coll61_213 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:53.977-0400 m31100| 2015-07-09T14:15:53.976-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_288 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.031-0400 m31100| 2015-07-09T14:15:54.030-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_286 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.032-0400 m31200| 2015-07-09T14:15:54.031-0400 I COMMAND [conn29] CMD: drop db61.tmp.mrs.coll61_1436465753_91 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.038-0400 m31200| 2015-07-09T14:15:54.038-0400 I COMMAND [conn29] CMD: drop db61.tmp.mr.coll61_212 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.039-0400 m31200| 2015-07-09T14:15:54.039-0400 I COMMAND [conn29] CMD: drop db61.tmp.mr.coll61_212 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.051-0400 m31100| 2015-07-09T14:15:54.049-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_286 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.051-0400 m31200| 2015-07-09T14:15:54.049-0400 I COMMAND [conn29] CMD: drop db61.tmp.mr.coll61_212 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.052-0400 m31102| 2015-07-09T14:15:54.052-0400 I COMMAND [repl writer worker 12] CMD: drop db61.tmp.mr.coll61_286 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.058-0400 m31101| 2015-07-09T14:15:54.057-0400 I COMMAND [repl writer worker 15] CMD: drop db61.tmp.mr.coll61_286 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.071-0400 m31100| 2015-07-09T14:15:54.071-0400 I COMMAND [conn178] command db61.map_reduce_reduce_nonatomic command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.072-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.072-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.072-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.072-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.072-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.074-0400 m31100| }, out: { reduce: "map_reduce_reduce_nonatomic", nonAtomic: true } }, inputDB: "db61", shardedOutputCollection: "tmp.mrs.coll61_1436465753_89", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll61_1436465753_89", timeMillis: 467, counts: { input: 983, emit: 983, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465753000|230, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll61_1436465753_89", timeMillis: 111, counts: { input: 1017, emit: 1017, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465753000|67, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 983, emit: 983, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1017, emit: 1017, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:239 locks:{ Global: { acquireCount: { r: 95, w: 67, W: 20 }, acquireWaitCount: { r: 3, w: 23, W: 20 }, timeAcquiringMicros: { r: 4131, w: 58616, W: 121118 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 6, W: 3 }, timeAcquiringMicros: { w: 23565, W: 34062 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 337ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.074-0400 m31200| 2015-07-09T14:15:54.071-0400 I COMMAND [conn29] command db61.tmp.mrs.coll61_1436465753_91 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.074-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.075-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.075-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.075-0400 m31200| values...., out: "tmp.mrs.coll61_1436465753_91", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { w: 7, R: 3, W: 3 }, timeAcquiringMicros: { w: 42053, R: 2968, W: 11690 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 214ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.076-0400 m31100| 2015-07-09T14:15:54.076-0400 I COMMAND [conn39] CMD: drop db61.tmp.mrs.coll61_1436465753_89 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.079-0400 m31200| 2015-07-09T14:15:54.079-0400 I COMMAND [conn34] CMD: drop db61.tmp.mrs.coll61_1436465753_89 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.081-0400 m31100| 2015-07-09T14:15:54.080-0400 I COMMAND [conn191] CMD: drop db61.tmp.mrs.coll61_1436465753_75 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.081-0400 m31101| 2015-07-09T14:15:54.081-0400 I COMMAND [repl writer worker 2] CMD: drop db61.tmp.mrs.coll61_1436465753_89 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.086-0400 m31202| 2015-07-09T14:15:54.084-0400 I COMMAND [repl writer worker 3] CMD: drop db61.tmp.mrs.coll61_1436465753_89 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.086-0400 m31100| 2015-07-09T14:15:54.085-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_283 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.087-0400 m31201| 2015-07-09T14:15:54.085-0400 I COMMAND [repl writer worker 5] CMD: drop db61.tmp.mrs.coll61_1436465753_89 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.087-0400 m31100| 2015-07-09T14:15:54.086-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_283 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.088-0400 m31102| 2015-07-09T14:15:54.088-0400 I COMMAND [repl writer worker 9] CMD: drop db61.tmp.mrs.coll61_1436465753_89 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.090-0400 m31200| 2015-07-09T14:15:54.088-0400 I COMMAND [conn79] CMD: drop db61.tmp.mr.coll61_214 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.108-0400 m31100| 2015-07-09T14:15:54.106-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_283 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.115-0400 m31100| 2015-07-09T14:15:54.115-0400 I COMMAND [conn191] command db61.tmp.mrs.coll61_1436465753_75 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.115-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.116-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.116-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.116-0400 m31100| values...., out: "tmp.mrs.coll61_1436465753_75", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 24, w: 41, W: 1 }, timeAcquiringMicros: { r: 98053, w: 149146, W: 97 } }, Database: { acquireCount: { r: 27, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 4, w: 16, R: 9, W: 5 }, timeAcquiringMicros: { r: 19947, w: 94371, R: 41318, W: 25112 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 639ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.120-0400 m31100| 2015-07-09T14:15:54.119-0400 I COMMAND [conn186] CMD: drop db61.tmp.mrs.coll61_1436465753_90 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.125-0400 m31100| 2015-07-09T14:15:54.125-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_290 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.128-0400 m31100| 2015-07-09T14:15:54.127-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_285 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.129-0400 m31100| 2015-07-09T14:15:54.129-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_285 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.132-0400 m31100| 2015-07-09T14:15:54.129-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_289 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.146-0400 m31100| 2015-07-09T14:15:54.146-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_285 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.153-0400 m31200| 2015-07-09T14:15:54.152-0400 I COMMAND [conn30] CMD: drop db61.tmp.mrs.coll61_1436465753_76 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.161-0400 m31200| 2015-07-09T14:15:54.159-0400 I COMMAND [conn30] CMD: drop db61.tmp.mr.coll61_213 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.161-0400 m31200| 2015-07-09T14:15:54.160-0400 I COMMAND [conn30] CMD: drop db61.tmp.mr.coll61_213 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.161-0400 m31200| 2015-07-09T14:15:54.160-0400 I COMMAND [conn30] CMD: drop db61.tmp.mr.coll61_213 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.173-0400 m31200| 2015-07-09T14:15:54.173-0400 I COMMAND [conn30] command db61.tmp.mrs.coll61_1436465753_76 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.174-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.174-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.174-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.175-0400 m31200| values...., out: "tmp.mrs.coll61_1436465753_76", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 1398 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 2, w: 12, R: 8, W: 3 }, timeAcquiringMicros: { r: 11393, w: 49338, R: 4535, W: 13083 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 228ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.198-0400 m31100| 2015-07-09T14:15:54.198-0400 I COMMAND [conn186] command db61.tmp.mrs.coll61_1436465753_90 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.198-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.198-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.198-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.199-0400 m31100| values...., out: "tmp.mrs.coll61_1436465753_90", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 13, w: 22 }, timeAcquiringMicros: { r: 23543, w: 131559 } }, Database: { acquireCount: { r: 27, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 3, w: 14, R: 7, W: 6 }, timeAcquiringMicros: { r: 18359, w: 88582, R: 43151, W: 78009 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 587ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.200-0400 m31100| 2015-07-09T14:15:54.199-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_291 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.208-0400 m31200| 2015-07-09T14:15:54.208-0400 I COMMAND [conn79] CMD: drop db61.tmp.mrs.coll61_1436465754_92 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.218-0400 m31200| 2015-07-09T14:15:54.214-0400 I COMMAND [conn79] CMD: drop db61.tmp.mr.coll61_214 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.218-0400 m31200| 2015-07-09T14:15:54.214-0400 I COMMAND [conn79] CMD: drop db61.tmp.mr.coll61_214 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.219-0400 m31200| 2015-07-09T14:15:54.215-0400 I COMMAND [conn79] CMD: drop db61.tmp.mr.coll61_214 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.219-0400 m31200| 2015-07-09T14:15:54.215-0400 I COMMAND [conn79] command db61.tmp.mrs.coll61_1436465754_92 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.219-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.219-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.219-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.220-0400 m31200| values...., out: "tmp.mrs.coll61_1436465754_92", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 4340 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { R: 9, W: 5 }, timeAcquiringMicros: { R: 4198, W: 1288 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 128ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.288-0400 m31100| 2015-07-09T14:15:54.287-0400 I COMMAND [conn49] CMD: drop db61.tmp.mrs.coll61_1436465753_91 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.293-0400 m31100| 2015-07-09T14:15:54.293-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_287 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.293-0400 m31100| 2015-07-09T14:15:54.293-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_287 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.294-0400 m31100| 2015-07-09T14:15:54.294-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_287 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.305-0400 m31100| 2015-07-09T14:15:54.305-0400 I COMMAND [conn49] command db61.tmp.mrs.coll61_1436465753_91 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.305-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.305-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.305-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.306-0400 m31100| values...., out: "tmp.mrs.coll61_1436465753_91", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 10, w: 6, W: 1 }, timeAcquiringMicros: { r: 42691, w: 11062, W: 167 } }, Database: { acquireCount: { r: 27, w: 66, R: 11, W: 11 }, acquireWaitCount: { w: 15, R: 7, W: 5 }, timeAcquiringMicros: { w: 131071, R: 39167, W: 11614 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 447ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.306-0400 m31100| 2015-07-09T14:15:54.306-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_292 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.339-0400 m31100| 2015-07-09T14:15:54.339-0400 I COMMAND [conn45] CMD: drop db61.tmp.mrs.coll61_1436465753_76 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.348-0400 m31100| 2015-07-09T14:15:54.347-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_288 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.348-0400 m31100| 2015-07-09T14:15:54.347-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_288 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.353-0400 m31100| 2015-07-09T14:15:54.352-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_288 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.356-0400 m31100| 2015-07-09T14:15:54.355-0400 I COMMAND [conn45] command db61.tmp.mrs.coll61_1436465753_76 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.356-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.357-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.357-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.357-0400 m31100| values...., out: "tmp.mrs.coll61_1436465753_76", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 9, w: 12, W: 1 }, timeAcquiringMicros: { r: 28632, w: 23105, W: 536 } }, Database: { acquireCount: { r: 27, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 2, w: 11, R: 9, W: 7 }, timeAcquiringMicros: { r: 986, w: 86646, R: 33448, W: 8316 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 409ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.358-0400 m31100| 2015-07-09T14:15:54.358-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_293 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.401-0400 m31100| 2015-07-09T14:15:54.401-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_290 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.406-0400 m31100| 2015-07-09T14:15:54.406-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_290 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.409-0400 m31101| 2015-07-09T14:15:54.409-0400 I COMMAND [repl writer worker 15] CMD: drop db61.tmp.mr.coll61_290 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.409-0400 m31100| 2015-07-09T14:15:54.408-0400 I COMMAND [conn191] command db61.map_reduce_reduce_nonatomic command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.409-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.409-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.410-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.410-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.410-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.411-0400 m31100| }, out: { reduce: "map_reduce_reduce_nonatomic", nonAtomic: true } }, inputDB: "db61", shardedOutputCollection: "tmp.mrs.coll61_1436465753_75", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll61_1436465753_75", timeMillis: 610, counts: { input: 983, emit: 983, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465754000|18, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll61_1436465753_75", timeMillis: 225, counts: { input: 1017, emit: 1017, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465753000|92, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 983, emit: 983, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1017, emit: 1017, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:239 locks:{ Global: { acquireCount: { r: 95, w: 67, W: 20 }, acquireWaitCount: { r: 1, w: 4, W: 20 }, timeAcquiringMicros: { r: 1616, w: 11161, W: 69109 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { r: 3, w: 11, W: 3 }, timeAcquiringMicros: { r: 5434, w: 78129, W: 12189 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 291ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.411-0400 m31100| 2015-07-09T14:15:54.409-0400 I COMMAND [conn38] CMD: drop db61.tmp.mrs.coll61_1436465753_75 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.412-0400 m31102| 2015-07-09T14:15:54.409-0400 I COMMAND [repl writer worker 12] CMD: drop db61.tmp.mr.coll61_290 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.424-0400 m31200| 2015-07-09T14:15:54.424-0400 I COMMAND [conn63] CMD: drop db61.tmp.mrs.coll61_1436465753_75 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.429-0400 m31202| 2015-07-09T14:15:54.429-0400 I COMMAND [repl writer worker 1] CMD: drop db61.tmp.mrs.coll61_1436465753_75 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.430-0400 m31201| 2015-07-09T14:15:54.429-0400 I COMMAND [repl writer worker 0] CMD: drop db61.tmp.mrs.coll61_1436465753_75 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.436-0400 m31200| 2015-07-09T14:15:54.436-0400 I COMMAND [conn35] CMD: drop db61.tmp.mr.coll61_215 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.438-0400 m31100| 2015-07-09T14:15:54.437-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_291 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.439-0400 m31100| 2015-07-09T14:15:54.438-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_294 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.439-0400 m31102| 2015-07-09T14:15:54.438-0400 I COMMAND [repl writer worker 14] CMD: drop db61.tmp.mrs.coll61_1436465753_75 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.440-0400 m31101| 2015-07-09T14:15:54.440-0400 I COMMAND [repl writer worker 2] CMD: drop db61.tmp.mrs.coll61_1436465753_75 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.445-0400 m31100| 2015-07-09T14:15:54.444-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_291 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.446-0400 m31102| 2015-07-09T14:15:54.446-0400 I COMMAND [repl writer worker 8] CMD: drop db61.tmp.mr.coll61_291 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.447-0400 m31101| 2015-07-09T14:15:54.447-0400 I COMMAND [repl writer worker 8] CMD: drop db61.tmp.mr.coll61_291 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.449-0400 m31100| 2015-07-09T14:15:54.447-0400 I COMMAND [conn186] command db61.map_reduce_reduce_nonatomic command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.449-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.449-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.449-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.450-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.450-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.453-0400 m31100| }, out: { reduce: "map_reduce_reduce_nonatomic", nonAtomic: true } }, inputDB: "db61", shardedOutputCollection: "tmp.mrs.coll61_1436465753_90", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll61_1436465753_90", timeMillis: 518, counts: { input: 983, emit: 983, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465754000|34, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll61_1436465753_90", timeMillis: 147, counts: { input: 1017, emit: 1017, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465753000|113, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 983, emit: 983, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1017, emit: 1017, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:239 locks:{ Global: { acquireCount: { r: 95, w: 67, W: 20 }, acquireWaitCount: { r: 3, w: 7, W: 20 }, timeAcquiringMicros: { r: 13131, w: 18806, W: 48615 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { r: 1, w: 11, W: 4 }, timeAcquiringMicros: { r: 1693, w: 62730, W: 8895 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 248ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.453-0400 m31100| 2015-07-09T14:15:54.449-0400 I COMMAND [conn39] CMD: drop db61.tmp.mrs.coll61_1436465753_90 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.474-0400 m31200| 2015-07-09T14:15:54.473-0400 I COMMAND [conn34] CMD: drop db61.tmp.mrs.coll61_1436465753_90 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.474-0400 m31101| 2015-07-09T14:15:54.474-0400 I COMMAND [repl writer worker 4] CMD: drop db61.tmp.mrs.coll61_1436465753_90 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.475-0400 m31102| 2015-07-09T14:15:54.474-0400 I COMMAND [repl writer worker 3] CMD: drop db61.tmp.mrs.coll61_1436465753_90 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.482-0400 m31202| 2015-07-09T14:15:54.482-0400 I COMMAND [repl writer worker 5] CMD: drop db61.tmp.mrs.coll61_1436465753_90 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.486-0400 m31201| 2015-07-09T14:15:54.485-0400 I COMMAND [repl writer worker 9] CMD: drop db61.tmp.mrs.coll61_1436465753_90 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.499-0400 m31200| 2015-07-09T14:15:54.499-0400 I COMMAND [conn36] CMD: drop db61.tmp.mr.coll61_216 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.506-0400 m31100| 2015-07-09T14:15:54.506-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_295 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.597-0400 m31200| 2015-07-09T14:15:54.597-0400 I COMMAND [conn35] CMD: drop db61.tmp.mrs.coll61_1436465754_77 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.600-0400 m31200| 2015-07-09T14:15:54.600-0400 I COMMAND [conn35] CMD: drop db61.tmp.mr.coll61_215 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.601-0400 m31200| 2015-07-09T14:15:54.600-0400 I COMMAND [conn35] CMD: drop db61.tmp.mr.coll61_215 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.613-0400 m31200| 2015-07-09T14:15:54.613-0400 I COMMAND [conn35] CMD: drop db61.tmp.mr.coll61_215 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.618-0400 m31200| 2015-07-09T14:15:54.617-0400 I COMMAND [conn35] command db61.tmp.mrs.coll61_1436465754_77 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.618-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.618-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.618-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.619-0400 m31200| values...., out: "tmp.mrs.coll61_1436465754_77", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 5535 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { w: 3, R: 4, W: 3 }, timeAcquiringMicros: { w: 17567, R: 8016, W: 16046 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 182ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.648-0400 m31200| 2015-07-09T14:15:54.648-0400 I COMMAND [conn36] CMD: drop db61.tmp.mrs.coll61_1436465754_93 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.655-0400 m31200| 2015-07-09T14:15:54.655-0400 I COMMAND [conn36] CMD: drop db61.tmp.mr.coll61_216 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.655-0400 m31200| 2015-07-09T14:15:54.655-0400 I COMMAND [conn36] CMD: drop db61.tmp.mr.coll61_216 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.656-0400 m31200| 2015-07-09T14:15:54.656-0400 I COMMAND [conn36] CMD: drop db61.tmp.mr.coll61_216 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.671-0400 m31200| 2015-07-09T14:15:54.671-0400 I COMMAND [conn36] command db61.tmp.mrs.coll61_1436465754_93 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.672-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.672-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.672-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.673-0400 m31200| values...., out: "tmp.mrs.coll61_1436465754_93", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 4901 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { w: 2, R: 5, W: 2 }, timeAcquiringMicros: { w: 7856, R: 1663, W: 9778 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 172ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.680-0400 m31100| 2015-07-09T14:15:54.679-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_292 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.692-0400 m31100| 2015-07-09T14:15:54.692-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_292 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.693-0400 m31102| 2015-07-09T14:15:54.692-0400 I COMMAND [repl writer worker 7] CMD: drop db61.tmp.mr.coll61_292 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.693-0400 m31101| 2015-07-09T14:15:54.693-0400 I COMMAND [repl writer worker 13] CMD: drop db61.tmp.mr.coll61_292 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.698-0400 m31100| 2015-07-09T14:15:54.697-0400 I COMMAND [conn49] command db61.map_reduce_reduce_nonatomic command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.698-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.698-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.698-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.698-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.698-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.699-0400 m31100| }, out: { reduce: "map_reduce_reduce_nonatomic", nonAtomic: true } }, inputDB: "db61", shardedOutputCollection: "tmp.mrs.coll61_1436465753_91", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll61_1436465753_91", timeMillis: 435, counts: { input: 983, emit: 983, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465754000|104, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll61_1436465753_91", timeMillis: 181, counts: { input: 1017, emit: 1017, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465754000|19, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 983, emit: 983, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1017, emit: 1017, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:239 locks:{ Global: { acquireCount: { r: 95, w: 67, W: 20 }, acquireWaitCount: { r: 1, w: 16, W: 20 }, timeAcquiringMicros: { r: 1172, w: 104397, W: 109499 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { r: 2, w: 11, W: 1 }, timeAcquiringMicros: { r: 3956, w: 72175, W: 193 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 391ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.699-0400 m31100| 2015-07-09T14:15:54.698-0400 I COMMAND [conn39] CMD: drop db61.tmp.mrs.coll61_1436465753_91 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.703-0400 m31200| 2015-07-09T14:15:54.703-0400 I COMMAND [conn34] CMD: drop db61.tmp.mrs.coll61_1436465753_91 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.706-0400 m31102| 2015-07-09T14:15:54.706-0400 I COMMAND [repl writer worker 8] CMD: drop db61.tmp.mrs.coll61_1436465753_91 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.707-0400 m31201| 2015-07-09T14:15:54.706-0400 I COMMAND [repl writer worker 0] CMD: drop db61.tmp.mrs.coll61_1436465753_91 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.707-0400 m31101| 2015-07-09T14:15:54.706-0400 I COMMAND [repl writer worker 8] CMD: drop db61.tmp.mrs.coll61_1436465753_91 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.707-0400 m31202| 2015-07-09T14:15:54.706-0400 I COMMAND [repl writer worker 1] CMD: drop db61.tmp.mrs.coll61_1436465753_91 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.709-0400 m31200| 2015-07-09T14:15:54.708-0400 I COMMAND [conn29] CMD: drop db61.tmp.mr.coll61_217 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.712-0400 m31100| 2015-07-09T14:15:54.712-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_296 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.784-0400 m31100| 2015-07-09T14:15:54.784-0400 I COMMAND [conn178] CMD: drop db61.tmp.mrs.coll61_1436465754_92 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.787-0400 m31100| 2015-07-09T14:15:54.787-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_289 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.788-0400 m31100| 2015-07-09T14:15:54.787-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_289 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.789-0400 m31100| 2015-07-09T14:15:54.788-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_293 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.800-0400 m31100| 2015-07-09T14:15:54.800-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_293 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.801-0400 m31102| 2015-07-09T14:15:54.800-0400 I COMMAND [repl writer worker 4] CMD: drop db61.tmp.mr.coll61_293 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.801-0400 m31101| 2015-07-09T14:15:54.801-0400 I COMMAND [repl writer worker 0] CMD: drop db61.tmp.mr.coll61_293 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.805-0400 m31100| 2015-07-09T14:15:54.805-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_289 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.806-0400 m31100| 2015-07-09T14:15:54.805-0400 I COMMAND [conn45] command db61.map_reduce_reduce_nonatomic command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.806-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.806-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.806-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.807-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.807-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.808-0400 m31100| }, out: { reduce: "map_reduce_reduce_nonatomic", nonAtomic: true } }, inputDB: "db61", shardedOutputCollection: "tmp.mrs.coll61_1436465753_76", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll61_1436465753_76", timeMillis: 401, counts: { input: 983, emit: 983, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465754000|126, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll61_1436465753_76", timeMillis: 215, counts: { input: 1017, emit: 1017, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465754000|42, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 983, emit: 983, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1017, emit: 1017, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:239 locks:{ Global: { acquireCount: { r: 95, w: 67, W: 20 }, acquireWaitCount: { r: 3, w: 14, W: 20 }, timeAcquiringMicros: { r: 6634, w: 107251, W: 117191 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 16, W: 4 }, timeAcquiringMicros: { w: 116806, W: 12656 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 448ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.808-0400 m31100| 2015-07-09T14:15:54.806-0400 I COMMAND [conn38] CMD: drop db61.tmp.mrs.coll61_1436465753_76 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.812-0400 m31200| 2015-07-09T14:15:54.811-0400 I COMMAND [conn63] CMD: drop db61.tmp.mrs.coll61_1436465753_76 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.812-0400 m31100| 2015-07-09T14:15:54.811-0400 I COMMAND [conn178] command db61.tmp.mrs.coll61_1436465754_92 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.813-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.813-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.813-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.813-0400 m31100| values...., out: "tmp.mrs.coll61_1436465754_92", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 21, w: 30, W: 1 }, timeAcquiringMicros: { r: 85017, w: 188503, W: 2190 } }, Database: { acquireCount: { r: 27, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 7, w: 21, R: 10, W: 9 }, timeAcquiringMicros: { r: 39513, w: 138163, R: 32028, W: 63967 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 724ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.813-0400 m31100| 2015-07-09T14:15:54.812-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_297 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.814-0400 m31101| 2015-07-09T14:15:54.813-0400 I COMMAND [repl writer worker 2] CMD: drop db61.tmp.mrs.coll61_1436465753_76 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.815-0400 m31201| 2015-07-09T14:15:54.815-0400 I COMMAND [repl writer worker 9] CMD: drop db61.tmp.mrs.coll61_1436465753_76 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.820-0400 m31102| 2015-07-09T14:15:54.819-0400 I COMMAND [repl writer worker 12] CMD: drop db61.tmp.mrs.coll61_1436465753_76 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.820-0400 m31202| 2015-07-09T14:15:54.819-0400 I COMMAND [repl writer worker 10] CMD: drop db61.tmp.mrs.coll61_1436465753_76 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.823-0400 m31200| 2015-07-09T14:15:54.823-0400 I COMMAND [conn30] CMD: drop db61.tmp.mr.coll61_218 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.824-0400 m31100| 2015-07-09T14:15:54.824-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_298 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.899-0400 m31200| 2015-07-09T14:15:54.899-0400 I COMMAND [conn29] CMD: drop db61.tmp.mrs.coll61_1436465754_94 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.906-0400 m31200| 2015-07-09T14:15:54.905-0400 I COMMAND [conn29] CMD: drop db61.tmp.mr.coll61_217 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.906-0400 m31200| 2015-07-09T14:15:54.906-0400 I COMMAND [conn29] CMD: drop db61.tmp.mr.coll61_217 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.910-0400 m31200| 2015-07-09T14:15:54.910-0400 I COMMAND [conn29] CMD: drop db61.tmp.mr.coll61_217 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.925-0400 m31200| 2015-07-09T14:15:54.924-0400 I COMMAND [conn29] command db61.tmp.mrs.coll61_1436465754_94 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.925-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.925-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.926-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.927-0400 m31200| values...., out: "tmp.mrs.coll61_1436465754_94", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { w: 4, R: 2, W: 2 }, timeAcquiringMicros: { w: 20581, R: 22662, W: 13675 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 216ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.945-0400 m31100| 2015-07-09T14:15:54.945-0400 I COMMAND [conn191] CMD: drop db61.tmp.mrs.coll61_1436465754_77 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.950-0400 m31100| 2015-07-09T14:15:54.950-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_294 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.951-0400 m31100| 2015-07-09T14:15:54.950-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_294 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.963-0400 m31100| 2015-07-09T14:15:54.962-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_294 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.966-0400 m31200| 2015-07-09T14:15:54.966-0400 I COMMAND [conn30] CMD: drop db61.tmp.mrs.coll61_1436465754_78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.974-0400 m31200| 2015-07-09T14:15:54.974-0400 I COMMAND [conn30] CMD: drop db61.tmp.mr.coll61_218 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.975-0400 m31200| 2015-07-09T14:15:54.974-0400 I COMMAND [conn30] CMD: drop db61.tmp.mr.coll61_218 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.977-0400 m31100| 2015-07-09T14:15:54.975-0400 I COMMAND [conn191] command db61.tmp.mrs.coll61_1436465754_77 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.978-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.978-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.978-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.979-0400 m31100| values...., out: "tmp.mrs.coll61_1436465754_77", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 8, w: 21, W: 1 }, timeAcquiringMicros: { r: 16938, w: 93346, W: 2036 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 9, w: 21, R: 11, W: 9 }, timeAcquiringMicros: { r: 58432, w: 120394, R: 34273, W: 38521 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 539ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.980-0400 m31100| 2015-07-09T14:15:54.977-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_299 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.980-0400 m31200| 2015-07-09T14:15:54.979-0400 I COMMAND [conn30] CMD: drop db61.tmp.mr.coll61_218 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.981-0400 m31200| 2015-07-09T14:15:54.980-0400 I COMMAND [conn30] command db61.tmp.mrs.coll61_1436465754_78 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.981-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.981-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.981-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:54.982-0400 m31200| values...., out: "tmp.mrs.coll61_1436465754_78", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { w: 1, R: 5 }, timeAcquiringMicros: { w: 3643, R: 2221 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 157ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.015-0400 m31100| 2015-07-09T14:15:55.014-0400 I COMMAND [conn186] CMD: drop db61.tmp.mrs.coll61_1436465754_93 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.019-0400 m31100| 2015-07-09T14:15:55.018-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_295 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.019-0400 m31100| 2015-07-09T14:15:55.018-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_295 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.020-0400 m31100| 2015-07-09T14:15:55.019-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_295 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.023-0400 m31100| 2015-07-09T14:15:55.022-0400 I COMMAND [conn186] command db61.tmp.mrs.coll61_1436465754_93 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.023-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.023-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.024-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.024-0400 m31100| values...., out: "tmp.mrs.coll61_1436465754_93", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 12, w: 14, W: 1 }, timeAcquiringMicros: { r: 69675, w: 34590, W: 6573 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 6, w: 24, R: 5, W: 9 }, timeAcquiringMicros: { r: 53946, w: 149714, R: 12171, W: 24454 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 524ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.026-0400 m31100| 2015-07-09T14:15:55.026-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_300 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.105-0400 m31100| 2015-07-09T14:15:55.105-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_297 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.113-0400 m31100| 2015-07-09T14:15:55.112-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_297 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.113-0400 m31101| 2015-07-09T14:15:55.113-0400 I COMMAND [repl writer worker 2] CMD: drop db61.tmp.mr.coll61_297 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.113-0400 m31102| 2015-07-09T14:15:55.113-0400 I COMMAND [repl writer worker 11] CMD: drop db61.tmp.mr.coll61_297 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.115-0400 m31100| 2015-07-09T14:15:55.114-0400 I COMMAND [conn178] command db61.map_reduce_reduce_nonatomic command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.115-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.115-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.115-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.115-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.115-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.116-0400 m31100| }, out: { reduce: "map_reduce_reduce_nonatomic", nonAtomic: true } }, inputDB: "db61", shardedOutputCollection: "tmp.mrs.coll61_1436465754_92", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll61_1436465754_92", timeMillis: 700, counts: { input: 983, emit: 983, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465754000|270, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll61_1436465754_92", timeMillis: 127, counts: { input: 1017, emit: 1017, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465754000|63, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 983, emit: 983, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1017, emit: 1017, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:239 locks:{ Global: { acquireCount: { r: 95, w: 67, W: 20 }, acquireWaitCount: { r: 1, w: 4, W: 20 }, timeAcquiringMicros: { r: 1353, w: 21017, W: 41558 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { r: 1, w: 19, W: 3 }, timeAcquiringMicros: { r: 134, w: 119004, W: 14458 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 302ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.116-0400 m31100| 2015-07-09T14:15:55.115-0400 I COMMAND [conn39] CMD: drop db61.tmp.mrs.coll61_1436465754_92 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.120-0400 m31200| 2015-07-09T14:15:55.119-0400 I COMMAND [conn34] CMD: drop db61.tmp.mrs.coll61_1436465754_92 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.121-0400 m31100| 2015-07-09T14:15:55.120-0400 I COMMAND [conn49] CMD: drop db61.tmp.mrs.coll61_1436465754_94 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.123-0400 m31202| 2015-07-09T14:15:55.123-0400 I COMMAND [repl writer worker 2] CMD: drop db61.tmp.mrs.coll61_1436465754_92 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.123-0400 m31201| 2015-07-09T14:15:55.123-0400 I COMMAND [repl writer worker 2] CMD: drop db61.tmp.mrs.coll61_1436465754_92 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.126-0400 m31100| 2015-07-09T14:15:55.126-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_296 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.126-0400 m31100| 2015-07-09T14:15:55.126-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_296 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.128-0400 m31102| 2015-07-09T14:15:55.128-0400 I COMMAND [repl writer worker 9] CMD: drop db61.tmp.mrs.coll61_1436465754_92 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.129-0400 m31200| 2015-07-09T14:15:55.129-0400 I COMMAND [conn79] CMD: drop db61.tmp.mr.coll61_219 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.130-0400 m31101| 2015-07-09T14:15:55.130-0400 I COMMAND [repl writer worker 3] CMD: drop db61.tmp.mrs.coll61_1436465754_92 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.131-0400 m31100| 2015-07-09T14:15:55.130-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_296 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.132-0400 m31100| 2015-07-09T14:15:55.132-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_301 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.135-0400 m31100| 2015-07-09T14:15:55.133-0400 I COMMAND [conn49] command db61.tmp.mrs.coll61_1436465754_94 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.135-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.136-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.136-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.137-0400 m31100| values...., out: "tmp.mrs.coll61_1436465754_94", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 23, w: 24, W: 1 }, timeAcquiringMicros: { r: 34633, w: 84885, W: 970 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 3, w: 21, R: 10, W: 6 }, timeAcquiringMicros: { r: 2430, w: 113747, R: 23709, W: 4220 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 425ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.137-0400 m31100| 2015-07-09T14:15:55.135-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_302 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.177-0400 m31100| 2015-07-09T14:15:55.177-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_299 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.198-0400 m31100| 2015-07-09T14:15:55.198-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_299 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.199-0400 m31101| 2015-07-09T14:15:55.198-0400 I COMMAND [repl writer worker 7] CMD: drop db61.tmp.mr.coll61_299 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.213-0400 m31100| 2015-07-09T14:15:55.212-0400 I COMMAND [conn191] command db61.map_reduce_reduce_nonatomic command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.214-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.214-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.214-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.214-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.214-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.216-0400 m31100| }, out: { reduce: "map_reduce_reduce_nonatomic", nonAtomic: true } }, inputDB: "db61", shardedOutputCollection: "tmp.mrs.coll61_1436465754_77", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll61_1436465754_77", timeMillis: 514, counts: { input: 983, emit: 983, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465754000|315, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll61_1436465754_77", timeMillis: 164, counts: { input: 1017, emit: 1017, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465754000|88, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 983, emit: 983, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1017, emit: 1017, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:239 locks:{ Global: { acquireCount: { r: 95, w: 67, W: 20 }, acquireWaitCount: { r: 4, w: 18, W: 20 }, timeAcquiringMicros: { r: 5983, w: 46132, W: 54242 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 6, W: 4 }, timeAcquiringMicros: { w: 22991, W: 30284 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 235ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.216-0400 m31100| 2015-07-09T14:15:55.213-0400 I COMMAND [conn38] CMD: drop db61.tmp.mrs.coll61_1436465754_77 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.229-0400 m31102| 2015-07-09T14:15:55.229-0400 I COMMAND [repl writer worker 2] CMD: drop db61.tmp.mr.coll61_299 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.230-0400 m31200| 2015-07-09T14:15:55.230-0400 I COMMAND [conn63] CMD: drop db61.tmp.mrs.coll61_1436465754_77 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.235-0400 m31201| 2015-07-09T14:15:55.235-0400 I COMMAND [repl writer worker 1] CMD: drop db61.tmp.mrs.coll61_1436465754_77 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.236-0400 m31202| 2015-07-09T14:15:55.236-0400 I COMMAND [repl writer worker 0] CMD: drop db61.tmp.mrs.coll61_1436465754_77 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.241-0400 m31101| 2015-07-09T14:15:55.240-0400 I COMMAND [repl writer worker 15] CMD: drop db61.tmp.mrs.coll61_1436465754_77 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.242-0400 m31200| 2015-07-09T14:15:55.241-0400 I COMMAND [conn35] CMD: drop db61.tmp.mr.coll61_220 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.249-0400 m31100| 2015-07-09T14:15:55.248-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_303 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.269-0400 m31102| 2015-07-09T14:15:55.269-0400 I COMMAND [repl writer worker 13] CMD: drop db61.tmp.mrs.coll61_1436465754_77 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.303-0400 m31200| 2015-07-09T14:15:55.302-0400 I COMMAND [conn79] CMD: drop db61.tmp.mrs.coll61_1436465755_95 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.310-0400 m31200| 2015-07-09T14:15:55.309-0400 I COMMAND [conn79] CMD: drop db61.tmp.mr.coll61_219 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.310-0400 m31200| 2015-07-09T14:15:55.309-0400 I COMMAND [conn79] CMD: drop db61.tmp.mr.coll61_219 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.313-0400 m31200| 2015-07-09T14:15:55.312-0400 I COMMAND [conn79] CMD: drop db61.tmp.mr.coll61_219 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.334-0400 m31200| 2015-07-09T14:15:55.333-0400 I COMMAND [conn79] command db61.tmp.mrs.coll61_1436465755_95 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.334-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.334-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.334-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.335-0400 m31200| values...., out: "tmp.mrs.coll61_1436465755_95", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { w: 4, R: 2, W: 2 }, timeAcquiringMicros: { w: 29043, R: 10292, W: 20557 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 204ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.385-0400 m31100| 2015-07-09T14:15:55.384-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_300 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.387-0400 m31200| 2015-07-09T14:15:55.386-0400 I COMMAND [conn35] CMD: drop db61.tmp.mrs.coll61_1436465755_79 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.392-0400 m31200| 2015-07-09T14:15:55.392-0400 I COMMAND [conn35] CMD: drop db61.tmp.mr.coll61_220 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.392-0400 m31200| 2015-07-09T14:15:55.392-0400 I COMMAND [conn35] CMD: drop db61.tmp.mr.coll61_220 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.393-0400 m31200| 2015-07-09T14:15:55.393-0400 I COMMAND [conn35] CMD: drop db61.tmp.mr.coll61_220 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.393-0400 m31200| 2015-07-09T14:15:55.393-0400 I COMMAND [conn35] command db61.tmp.mrs.coll61_1436465755_79 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.394-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.394-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.394-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.394-0400 m31200| values...., out: "tmp.mrs.coll61_1436465755_79", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 4733 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { w: 1, R: 6, W: 1 }, timeAcquiringMicros: { w: 3848, R: 5022, W: 1223 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 151ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.395-0400 m31100| 2015-07-09T14:15:55.395-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_300 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.397-0400 m31101| 2015-07-09T14:15:55.397-0400 I COMMAND [repl writer worker 9] CMD: drop db61.tmp.mr.coll61_300 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.398-0400 m31102| 2015-07-09T14:15:55.397-0400 I COMMAND [repl writer worker 15] CMD: drop db61.tmp.mr.coll61_300 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.403-0400 m31100| 2015-07-09T14:15:55.402-0400 I COMMAND [conn186] command db61.map_reduce_reduce_nonatomic command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.403-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.403-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.403-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.404-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.404-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.405-0400 m31100| }, out: { reduce: "map_reduce_reduce_nonatomic", nonAtomic: true } }, inputDB: "db61", shardedOutputCollection: "tmp.mrs.coll61_1436465754_93", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll61_1436465754_93", timeMillis: 519, counts: { input: 983, emit: 983, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465755000|12, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll61_1436465754_93", timeMillis: 156, counts: { input: 1017, emit: 1017, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465754000|109, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 983, emit: 983, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1017, emit: 1017, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:239 locks:{ Global: { acquireCount: { r: 95, w: 67, W: 20 }, acquireWaitCount: { r: 3, w: 23, W: 20 }, timeAcquiringMicros: { r: 2847, w: 53154, W: 203746 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 3, W: 3 }, timeAcquiringMicros: { w: 5681, W: 13609 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 379ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.405-0400 m31100| 2015-07-09T14:15:55.403-0400 I COMMAND [conn39] CMD: drop db61.tmp.mrs.coll61_1436465754_93 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.407-0400 m31100| 2015-07-09T14:15:55.407-0400 I COMMAND [conn45] CMD: drop db61.tmp.mrs.coll61_1436465754_78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.412-0400 m31100| 2015-07-09T14:15:55.412-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_298 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.413-0400 m31100| 2015-07-09T14:15:55.412-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_298 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.419-0400 m31200| 2015-07-09T14:15:55.419-0400 I COMMAND [conn34] CMD: drop db61.tmp.mrs.coll61_1436465754_93 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.421-0400 m31101| 2015-07-09T14:15:55.421-0400 I COMMAND [repl writer worker 7] CMD: drop db61.tmp.mrs.coll61_1436465754_93 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.423-0400 m31102| 2015-07-09T14:15:55.423-0400 I COMMAND [repl writer worker 0] CMD: drop db61.tmp.mrs.coll61_1436465754_93 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.424-0400 m31201| 2015-07-09T14:15:55.423-0400 I COMMAND [repl writer worker 1] CMD: drop db61.tmp.mrs.coll61_1436465754_93 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.425-0400 m31202| 2015-07-09T14:15:55.424-0400 I COMMAND [repl writer worker 11] CMD: drop db61.tmp.mrs.coll61_1436465754_93 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.428-0400 m31100| 2015-07-09T14:15:55.428-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_298 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.429-0400 m31100| 2015-07-09T14:15:55.429-0400 I COMMAND [conn45] command db61.tmp.mrs.coll61_1436465754_78 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.429-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.429-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.429-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.430-0400 m31100| values...., out: "tmp.mrs.coll61_1436465754_78", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 22, w: 39, W: 1 }, timeAcquiringMicros: { r: 72446, w: 156161, W: 4159 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 8, w: 13, R: 10, W: 9 }, timeAcquiringMicros: { r: 36248, w: 102536, R: 25870, W: 33727 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 605ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.430-0400 m31200| 2015-07-09T14:15:55.429-0400 I COMMAND [conn36] CMD: drop db61.tmp.mr.coll61_221 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.430-0400 m31100| 2015-07-09T14:15:55.430-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_305 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.431-0400 m31100| 2015-07-09T14:15:55.431-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_304 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.545-0400 m31200| 2015-07-09T14:15:55.545-0400 I COMMAND [conn36] CMD: drop db61.tmp.mrs.coll61_1436465755_96 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.550-0400 m31200| 2015-07-09T14:15:55.550-0400 I COMMAND [conn36] CMD: drop db61.tmp.mr.coll61_221 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.555-0400 m31200| 2015-07-09T14:15:55.550-0400 I COMMAND [conn36] CMD: drop db61.tmp.mr.coll61_221 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.556-0400 m31200| 2015-07-09T14:15:55.551-0400 I COMMAND [conn36] CMD: drop db61.tmp.mr.coll61_221 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.569-0400 m31200| 2015-07-09T14:15:55.568-0400 I COMMAND [conn36] command db61.tmp.mrs.coll61_1436465755_96 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.569-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.569-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.569-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.569-0400 m31200| values...., out: "tmp.mrs.coll61_1436465755_96", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 138ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.625-0400 m31100| 2015-07-09T14:15:55.624-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_302 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.630-0400 m31100| 2015-07-09T14:15:55.630-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_302 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.636-0400 m31100| 2015-07-09T14:15:55.635-0400 I COMMAND [conn49] command db61.map_reduce_reduce_nonatomic command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.636-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.637-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.637-0400 m31101| 2015-07-09T14:15:55.636-0400 I COMMAND [repl writer worker 13] CMD: drop db61.tmp.mr.coll61_302 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.637-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.637-0400 m31102| 2015-07-09T14:15:55.636-0400 I COMMAND [repl writer worker 1] CMD: drop db61.tmp.mr.coll61_302 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.637-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.638-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.639-0400 m31100| }, out: { reduce: "map_reduce_reduce_nonatomic", nonAtomic: true } }, inputDB: "db61", shardedOutputCollection: "tmp.mrs.coll61_1436465754_94", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll61_1436465754_94", timeMillis: 418, counts: { input: 983, emit: 983, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465755000|102, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll61_1436465754_94", timeMillis: 197, counts: { input: 1017, emit: 1017, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465754000|134, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 983, emit: 983, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1017, emit: 1017, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:239 locks:{ Global: { acquireCount: { r: 95, w: 67, W: 20 }, acquireWaitCount: { r: 1, w: 21, W: 20 }, timeAcquiringMicros: { r: 1622, w: 124117, W: 154203 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { r: 2, w: 13, W: 2 }, timeAcquiringMicros: { r: 1030, w: 99381, W: 369 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 500ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.639-0400 m31100| 2015-07-09T14:15:55.636-0400 I COMMAND [conn39] CMD: drop db61.tmp.mrs.coll61_1436465754_94 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.640-0400 m31200| 2015-07-09T14:15:55.640-0400 I COMMAND [conn34] CMD: drop db61.tmp.mrs.coll61_1436465754_94 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.643-0400 m31101| 2015-07-09T14:15:55.643-0400 I COMMAND [repl writer worker 8] CMD: drop db61.tmp.mrs.coll61_1436465754_94 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.644-0400 m31102| 2015-07-09T14:15:55.643-0400 I COMMAND [repl writer worker 2] CMD: drop db61.tmp.mrs.coll61_1436465754_94 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.644-0400 m31202| 2015-07-09T14:15:55.644-0400 I COMMAND [repl writer worker 11] CMD: drop db61.tmp.mrs.coll61_1436465754_94 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.645-0400 m31201| 2015-07-09T14:15:55.645-0400 I COMMAND [repl writer worker 15] CMD: drop db61.tmp.mrs.coll61_1436465754_94 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.648-0400 m31100| 2015-07-09T14:15:55.647-0400 I COMMAND [conn178] CMD: drop db61.tmp.mrs.coll61_1436465755_95 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.649-0400 m31200| 2015-07-09T14:15:55.649-0400 I COMMAND [conn29] CMD: drop db61.tmp.mr.coll61_222 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.654-0400 m31100| 2015-07-09T14:15:55.653-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_301 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.654-0400 m31100| 2015-07-09T14:15:55.653-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_301 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.656-0400 m31100| 2015-07-09T14:15:55.656-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_306 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.657-0400 m31100| 2015-07-09T14:15:55.657-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_301 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.660-0400 m31100| 2015-07-09T14:15:55.659-0400 I COMMAND [conn178] command db61.tmp.mrs.coll61_1436465755_95 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.661-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.661-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.661-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.662-0400 m31100| values...., out: "tmp.mrs.coll61_1436465755_95", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 35, w: 43, W: 1 }, timeAcquiringMicros: { r: 108141, w: 206261, W: 444 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 2, w: 10, R: 6, W: 3 }, timeAcquiringMicros: { r: 1076, w: 49368, R: 5923, W: 837 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 530ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.662-0400 m31100| 2015-07-09T14:15:55.661-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_307 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.777-0400 m31100| 2015-07-09T14:15:55.776-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_305 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.784-0400 m31200| 2015-07-09T14:15:55.784-0400 I COMMAND [conn29] CMD: drop db61.tmp.mrs.coll61_1436465755_97 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.789-0400 m31200| 2015-07-09T14:15:55.789-0400 I COMMAND [conn29] CMD: drop db61.tmp.mr.coll61_222 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.791-0400 m31200| 2015-07-09T14:15:55.791-0400 I COMMAND [conn29] CMD: drop db61.tmp.mr.coll61_222 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.793-0400 m31200| 2015-07-09T14:15:55.793-0400 I COMMAND [conn29] CMD: drop db61.tmp.mr.coll61_222 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.794-0400 m31200| 2015-07-09T14:15:55.793-0400 I COMMAND [conn29] command db61.tmp.mrs.coll61_1436465755_97 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.794-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.794-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.794-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.795-0400 m31200| values...., out: "tmp.mrs.coll61_1436465755_97", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 144ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.796-0400 m31100| 2015-07-09T14:15:55.796-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_305 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.797-0400 m31101| 2015-07-09T14:15:55.797-0400 I COMMAND [repl writer worker 0] CMD: drop db61.tmp.mr.coll61_305 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.798-0400 m31102| 2015-07-09T14:15:55.798-0400 I COMMAND [repl writer worker 4] CMD: drop db61.tmp.mr.coll61_305 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.802-0400 m31100| 2015-07-09T14:15:55.801-0400 I COMMAND [conn45] command db61.map_reduce_reduce_nonatomic command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.802-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.802-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.802-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.803-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.803-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.805-0400 m31100| }, out: { reduce: "map_reduce_reduce_nonatomic", nonAtomic: true } }, inputDB: "db61", shardedOutputCollection: "tmp.mrs.coll61_1436465754_78", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll61_1436465754_78", timeMillis: 589, counts: { input: 983, emit: 983, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465755000|177, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll61_1436465754_78", timeMillis: 151, counts: { input: 1017, emit: 1017, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465754000|155, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 983, emit: 983, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1017, emit: 1017, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:239 locks:{ Global: { acquireCount: { r: 95, w: 67, W: 20 }, acquireWaitCount: { r: 3, w: 23, W: 20 }, timeAcquiringMicros: { r: 1815, w: 56162, W: 134526 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 7, W: 3 }, timeAcquiringMicros: { w: 38078, W: 18717 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 371ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.805-0400 m31100| 2015-07-09T14:15:55.803-0400 I COMMAND [conn38] CMD: drop db61.tmp.mrs.coll61_1436465754_78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.811-0400 m31100| 2015-07-09T14:15:55.811-0400 I COMMAND [conn191] CMD: drop db61.tmp.mrs.coll61_1436465755_79 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.817-0400 m31100| 2015-07-09T14:15:55.817-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_303 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.817-0400 m31100| 2015-07-09T14:15:55.817-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_303 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.819-0400 m31200| 2015-07-09T14:15:55.818-0400 I COMMAND [conn63] CMD: drop db61.tmp.mrs.coll61_1436465754_78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.822-0400 m31201| 2015-07-09T14:15:55.822-0400 I COMMAND [repl writer worker 7] CMD: drop db61.tmp.mrs.coll61_1436465754_78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.823-0400 m31100| 2015-07-09T14:15:55.823-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_303 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.825-0400 m31202| 2015-07-09T14:15:55.824-0400 I COMMAND [repl writer worker 8] CMD: drop db61.tmp.mrs.coll61_1436465754_78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.827-0400 m31102| 2015-07-09T14:15:55.826-0400 I COMMAND [repl writer worker 0] CMD: drop db61.tmp.mrs.coll61_1436465754_78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.828-0400 m31200| 2015-07-09T14:15:55.827-0400 I COMMAND [conn30] CMD: drop db61.tmp.mr.coll61_223 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.829-0400 m31100| 2015-07-09T14:15:55.828-0400 I COMMAND [conn191] command db61.tmp.mrs.coll61_1436465755_79 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.829-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.829-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.829-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.830-0400 m31100| values...., out: "tmp.mrs.coll61_1436465755_79", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 31, w: 45, W: 1 }, timeAcquiringMicros: { r: 80153, w: 160503, W: 34891 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 4, w: 11, R: 7, W: 7 }, timeAcquiringMicros: { r: 24715, w: 99062, R: 14952, W: 14668 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 586ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.830-0400 m31100| 2015-07-09T14:15:55.829-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_309 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.830-0400 m31101| 2015-07-09T14:15:55.830-0400 I COMMAND [repl writer worker 9] CMD: drop db61.tmp.mrs.coll61_1436465754_78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.831-0400 m31100| 2015-07-09T14:15:55.830-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_308 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.967-0400 m31100| 2015-07-09T14:15:55.966-0400 I COMMAND [conn186] CMD: drop db61.tmp.mrs.coll61_1436465755_96 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.971-0400 m31100| 2015-07-09T14:15:55.971-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_304 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.972-0400 m31100| 2015-07-09T14:15:55.971-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_304 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.975-0400 m31200| 2015-07-09T14:15:55.975-0400 I COMMAND [conn30] CMD: drop db61.tmp.mrs.coll61_1436465755_80 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.980-0400 m31200| 2015-07-09T14:15:55.979-0400 I COMMAND [conn30] CMD: drop db61.tmp.mr.coll61_223 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.980-0400 m31200| 2015-07-09T14:15:55.979-0400 I COMMAND [conn30] CMD: drop db61.tmp.mr.coll61_223 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.981-0400 m31200| 2015-07-09T14:15:55.981-0400 I COMMAND [conn30] CMD: drop db61.tmp.mr.coll61_223 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.982-0400 m31200| 2015-07-09T14:15:55.981-0400 I COMMAND [conn30] command db61.tmp.mrs.coll61_1436465755_80 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.982-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.982-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.984-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.985-0400 m31200| values...., out: "tmp.mrs.coll61_1436465755_80", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 31 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 154ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.986-0400 m31100| 2015-07-09T14:15:55.986-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_304 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.994-0400 m31100| 2015-07-09T14:15:55.993-0400 I COMMAND [conn186] command db61.tmp.mrs.coll61_1436465755_96 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.994-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.994-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.994-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.995-0400 m31100| values...., out: "tmp.mrs.coll61_1436465755_96", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 23, w: 38, W: 1 }, timeAcquiringMicros: { r: 57605, w: 174654, W: 3666 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 7, w: 19, R: 9, W: 7 }, timeAcquiringMicros: { r: 54998, w: 81536, R: 7143, W: 8284 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 564ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:55.995-0400 m31100| 2015-07-09T14:15:55.994-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_310 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.038-0400 m31100| 2015-07-09T14:15:56.037-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_307 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.042-0400 m31100| 2015-07-09T14:15:56.042-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_307 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.043-0400 m31100| 2015-07-09T14:15:56.042-0400 I COMMAND [conn178] command db61.map_reduce_reduce_nonatomic command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.043-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.043-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.043-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.043-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.043-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.044-0400 m31100| }, out: { reduce: "map_reduce_reduce_nonatomic", nonAtomic: true } }, inputDB: "db61", shardedOutputCollection: "tmp.mrs.coll61_1436465755_95", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll61_1436465755_95", timeMillis: 524, counts: { input: 983, emit: 983, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465755000|265, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll61_1436465755_95", timeMillis: 181, counts: { input: 1017, emit: 1017, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465755000|25, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 983, emit: 983, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1017, emit: 1017, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:239 locks:{ Global: { acquireCount: { r: 95, w: 67, W: 20 }, acquireWaitCount: { r: 1, w: 15, W: 20 }, timeAcquiringMicros: { r: 2391, w: 63202, W: 89306 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 16, W: 2 }, timeAcquiringMicros: { w: 125244, W: 686 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 381ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.044-0400 m31100| 2015-07-09T14:15:56.043-0400 I COMMAND [conn39] CMD: drop db61.tmp.mrs.coll61_1436465755_95 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.045-0400 m31102| 2015-07-09T14:15:56.044-0400 I COMMAND [repl writer worker 13] CMD: drop db61.tmp.mr.coll61_307 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.045-0400 m31101| 2015-07-09T14:15:56.045-0400 I COMMAND [repl writer worker 5] CMD: drop db61.tmp.mr.coll61_307 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.046-0400 m31200| 2015-07-09T14:15:56.046-0400 I COMMAND [conn34] CMD: drop db61.tmp.mrs.coll61_1436465755_95 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.050-0400 m31202| 2015-07-09T14:15:56.049-0400 I COMMAND [repl writer worker 14] CMD: drop db61.tmp.mrs.coll61_1436465755_95 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.050-0400 m31201| 2015-07-09T14:15:56.049-0400 I COMMAND [repl writer worker 7] CMD: drop db61.tmp.mrs.coll61_1436465755_95 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.051-0400 m31101| 2015-07-09T14:15:56.050-0400 I COMMAND [repl writer worker 0] CMD: drop db61.tmp.mrs.coll61_1436465755_95 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.051-0400 m31100| 2015-07-09T14:15:56.051-0400 I COMMAND [conn49] CMD: drop db61.tmp.mrs.coll61_1436465755_97 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.052-0400 m31200| 2015-07-09T14:15:56.052-0400 I COMMAND [conn79] CMD: drop db61.tmp.mr.coll61_224 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.053-0400 m31102| 2015-07-09T14:15:56.053-0400 I COMMAND [repl writer worker 0] CMD: drop db61.tmp.mrs.coll61_1436465755_95 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.062-0400 m31100| 2015-07-09T14:15:56.061-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_306 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.062-0400 m31100| 2015-07-09T14:15:56.062-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_306 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.065-0400 m31100| 2015-07-09T14:15:56.065-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_311 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.066-0400 m31100| 2015-07-09T14:15:56.065-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_306 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.088-0400 m31100| 2015-07-09T14:15:56.088-0400 I COMMAND [conn49] command db61.tmp.mrs.coll61_1436465755_97 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.089-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.090-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.090-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.090-0400 m31100| values...., out: "tmp.mrs.coll61_1436465755_97", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 29, w: 24, W: 1 }, timeAcquiringMicros: { r: 96193, w: 86986, W: 2190 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 2, w: 17, R: 8, W: 4 }, timeAcquiringMicros: { r: 2329, w: 88689, R: 11095, W: 1259 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 439ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.092-0400 m31100| 2015-07-09T14:15:56.090-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_312 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.199-0400 m31100| 2015-07-09T14:15:56.199-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_309 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.385-0400 m31100| 2015-07-09T14:15:56.210-0400 I COMMAND [conn191] CMD: drop db61.tmp.mr.coll61_309 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.385-0400 m31101| 2015-07-09T14:15:56.212-0400 I COMMAND [repl writer worker 0] CMD: drop db61.tmp.mr.coll61_309 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.385-0400 m31100| 2015-07-09T14:15:56.215-0400 I COMMAND [conn191] command db61.map_reduce_reduce_nonatomic command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.386-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.386-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.386-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.386-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.386-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.387-0400 m31100| }, out: { reduce: "map_reduce_reduce_nonatomic", nonAtomic: true } }, inputDB: "db61", shardedOutputCollection: "tmp.mrs.coll61_1436465755_79", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll61_1436465755_79", timeMillis: 575, counts: { input: 983, emit: 983, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465755000|304, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll61_1436465755_79", timeMillis: 150, counts: { input: 1017, emit: 1017, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465755000|46, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 983, emit: 983, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1017, emit: 1017, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:239 locks:{ Global: { acquireCount: { r: 95, w: 67, W: 20 }, acquireWaitCount: { r: 4, w: 17, W: 20 }, timeAcquiringMicros: { r: 7316, w: 81924, W: 132432 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 12, W: 4 }, timeAcquiringMicros: { w: 49992, W: 3239 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 386ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.387-0400 m31100| 2015-07-09T14:15:56.216-0400 I COMMAND [conn38] CMD: drop db61.tmp.mrs.coll61_1436465755_79 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.387-0400 m31200| 2015-07-09T14:15:56.219-0400 I COMMAND [conn79] CMD: drop db61.tmp.mrs.coll61_1436465756_98 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.387-0400 m31102| 2015-07-09T14:15:56.219-0400 I COMMAND [repl writer worker 3] CMD: drop db61.tmp.mr.coll61_309 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.388-0400 m31200| 2015-07-09T14:15:56.224-0400 I COMMAND [conn63] CMD: drop db61.tmp.mrs.coll61_1436465755_79 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.388-0400 m31200| 2015-07-09T14:15:56.228-0400 I COMMAND [conn79] CMD: drop db61.tmp.mr.coll61_224 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.388-0400 m31200| 2015-07-09T14:15:56.228-0400 I COMMAND [conn79] CMD: drop db61.tmp.mr.coll61_224 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.388-0400 m31102| 2015-07-09T14:15:56.230-0400 I COMMAND [repl writer worker 6] CMD: drop db61.tmp.mrs.coll61_1436465755_79 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.388-0400 m31101| 2015-07-09T14:15:56.230-0400 I COMMAND [repl writer worker 6] CMD: drop db61.tmp.mrs.coll61_1436465755_79 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.388-0400 m31200| 2015-07-09T14:15:56.236-0400 I COMMAND [conn79] CMD: drop db61.tmp.mr.coll61_224 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.388-0400 m31202| 2015-07-09T14:15:56.251-0400 I COMMAND [repl writer worker 1] CMD: drop db61.tmp.mrs.coll61_1436465755_79 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.389-0400 m31200| 2015-07-09T14:15:56.254-0400 I COMMAND [conn79] command db61.tmp.mrs.coll61_1436465756_98 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.389-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.389-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.389-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.390-0400 m31200| values...., out: "tmp.mrs.coll61_1436465756_98", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:212 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 16193 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 202ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.391-0400 m30999| 2015-07-09T14:15:56.255-0400 I NETWORK [conn395] end connection 127.0.0.1:63897 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.391-0400 m31201| 2015-07-09T14:15:56.259-0400 I COMMAND [repl writer worker 6] CMD: drop db61.tmp.mrs.coll61_1436465755_79 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.391-0400 m31100| 2015-07-09T14:15:56.283-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_310 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.392-0400 m31100| 2015-07-09T14:15:56.285-0400 I COMMAND [conn186] CMD: drop db61.tmp.mr.coll61_310 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.392-0400 m31100| 2015-07-09T14:15:56.285-0400 I COMMAND [conn186] command db61.map_reduce_reduce_nonatomic command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.392-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.392-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.392-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.392-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.392-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.393-0400 m31100| }, out: { reduce: "map_reduce_reduce_nonatomic", nonAtomic: true } }, inputDB: "db61", shardedOutputCollection: "tmp.mrs.coll61_1436465755_96", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll61_1436465755_96", timeMillis: 542, counts: { input: 983, emit: 983, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465755000|363, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll61_1436465755_96", timeMillis: 120, counts: { input: 1017, emit: 1017, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465755000|69, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 983, emit: 983, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1017, emit: 1017, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:239 locks:{ Global: { acquireCount: { r: 95, w: 67, W: 20 }, acquireWaitCount: { r: 3, w: 23, W: 20 }, timeAcquiringMicros: { r: 12468, w: 104216, W: 63876 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 5, W: 3 }, timeAcquiringMicros: { w: 15471, W: 963 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 290ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.393-0400 m31100| 2015-07-09T14:15:56.286-0400 I COMMAND [conn39] CMD: drop db61.tmp.mrs.coll61_1436465755_96 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.393-0400 m31101| 2015-07-09T14:15:56.288-0400 I COMMAND [repl writer worker 12] CMD: drop db61.tmp.mr.coll61_310 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.394-0400 m31102| 2015-07-09T14:15:56.289-0400 I COMMAND [repl writer worker 1] CMD: drop db61.tmp.mr.coll61_310 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.394-0400 m31200| 2015-07-09T14:15:56.289-0400 I COMMAND [conn34] CMD: drop db61.tmp.mrs.coll61_1436465755_96 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.394-0400 m31201| 2015-07-09T14:15:56.293-0400 I COMMAND [repl writer worker 12] CMD: drop db61.tmp.mrs.coll61_1436465755_96 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.394-0400 m31202| 2015-07-09T14:15:56.294-0400 I COMMAND [repl writer worker 10] CMD: drop db61.tmp.mrs.coll61_1436465755_96 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.394-0400 m31101| 2015-07-09T14:15:56.293-0400 I COMMAND [repl writer worker 5] CMD: drop db61.tmp.mrs.coll61_1436465755_96 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.394-0400 m31102| 2015-07-09T14:15:56.294-0400 I COMMAND [repl writer worker 11] CMD: drop db61.tmp.mrs.coll61_1436465755_96 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.394-0400 m31100| 2015-07-09T14:15:56.296-0400 I COMMAND [conn45] CMD: drop db61.tmp.mrs.coll61_1436465755_80 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.394-0400 m30998| 2015-07-09T14:15:56.297-0400 I NETWORK [conn394] end connection 127.0.0.1:63899 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.395-0400 m31100| 2015-07-09T14:15:56.300-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_308 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.395-0400 m31100| 2015-07-09T14:15:56.301-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_308 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.395-0400 m31100| 2015-07-09T14:15:56.306-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_308 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.395-0400 m31100| 2015-07-09T14:15:56.311-0400 I COMMAND [conn45] command db61.tmp.mrs.coll61_1436465755_80 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.395-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.395-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.395-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.396-0400 m31100| values...., out: "tmp.mrs.coll61_1436465755_80", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 28, w: 44, W: 1 }, timeAcquiringMicros: { r: 121559, w: 113814, W: 1949 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 3, w: 9, R: 6, W: 6 }, timeAcquiringMicros: { r: 22770, w: 31263, R: 6701, W: 22182 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 484ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.397-0400 m31100| 2015-07-09T14:15:56.311-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_313 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.397-0400 m31100| 2015-07-09T14:15:56.359-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_312 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.397-0400 m31100| 2015-07-09T14:15:56.361-0400 I COMMAND [conn49] CMD: drop db61.tmp.mr.coll61_312 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.397-0400 m31100| 2015-07-09T14:15:56.361-0400 I COMMAND [conn49] command db61.map_reduce_reduce_nonatomic command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.397-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.397-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.397-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.397-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.397-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.399-0400 m31100| }, out: { reduce: "map_reduce_reduce_nonatomic", nonAtomic: true } }, inputDB: "db61", shardedOutputCollection: "tmp.mrs.coll61_1436465755_97", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll61_1436465755_97", timeMillis: 413, counts: { input: 983, emit: 983, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465756000|34, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll61_1436465755_97", timeMillis: 140, counts: { input: 1017, emit: 1017, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465755000|92, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 983, emit: 983, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1017, emit: 1017, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:239 locks:{ Global: { acquireCount: { r: 95, w: 67, W: 20 }, acquireWaitCount: { w: 23, W: 20 }, timeAcquiringMicros: { w: 104722, W: 34388 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { r: 1, w: 4, W: 4 }, timeAcquiringMicros: { r: 784, w: 26951, W: 1887 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 271ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.399-0400 m31100| 2015-07-09T14:15:56.362-0400 I COMMAND [conn39] CMD: drop db61.tmp.mrs.coll61_1436465755_97 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.399-0400 m31101| 2015-07-09T14:15:56.362-0400 I COMMAND [repl writer worker 7] CMD: drop db61.tmp.mr.coll61_312 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.400-0400 m31102| 2015-07-09T14:15:56.362-0400 I COMMAND [repl writer worker 11] CMD: drop db61.tmp.mr.coll61_312 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.400-0400 m31200| 2015-07-09T14:15:56.363-0400 I COMMAND [conn34] CMD: drop db61.tmp.mrs.coll61_1436465755_97 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.400-0400 m31201| 2015-07-09T14:15:56.367-0400 I COMMAND [repl writer worker 2] CMD: drop db61.tmp.mrs.coll61_1436465755_97 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.400-0400 m31202| 2015-07-09T14:15:56.367-0400 I COMMAND [repl writer worker 8] CMD: drop db61.tmp.mrs.coll61_1436465755_97 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.400-0400 m31100| 2015-07-09T14:15:56.368-0400 I COMMAND [conn178] CMD: drop db61.tmp.mrs.coll61_1436465756_98 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.400-0400 m31101| 2015-07-09T14:15:56.369-0400 I COMMAND [repl writer worker 13] CMD: drop db61.tmp.mrs.coll61_1436465755_97 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.401-0400 m31102| 2015-07-09T14:15:56.370-0400 I COMMAND [repl writer worker 9] CMD: drop db61.tmp.mrs.coll61_1436465755_97 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.401-0400 m30998| 2015-07-09T14:15:56.374-0400 I NETWORK [conn396] end connection 127.0.0.1:63901 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.401-0400 m31100| 2015-07-09T14:15:56.378-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_311 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.401-0400 m31100| 2015-07-09T14:15:56.378-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_311 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.401-0400 m31100| 2015-07-09T14:15:56.380-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_311 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.401-0400 m31100| 2015-07-09T14:15:56.384-0400 I COMMAND [conn178] command db61.tmp.mrs.coll61_1436465756_98 command: mapReduce { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.401-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.401-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.402-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.402-0400 m31100| values...., out: "tmp.mrs.coll61_1436465756_98", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:212 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 22, w: 33, W: 1 }, timeAcquiringMicros: { r: 68836, w: 67009, W: 92 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 3, w: 5, R: 7, W: 4 }, timeAcquiringMicros: { r: 3731, w: 3548, R: 16208, W: 4904 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 332ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.402-0400 m31100| 2015-07-09T14:15:56.385-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_314 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.459-0400 m31100| 2015-07-09T14:15:56.458-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_313 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.462-0400 m31100| 2015-07-09T14:15:56.462-0400 I COMMAND [conn45] CMD: drop db61.tmp.mr.coll61_313 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.464-0400 m31100| 2015-07-09T14:15:56.464-0400 I COMMAND [conn45] command db61.map_reduce_reduce_nonatomic command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.465-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.465-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.465-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.465-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.465-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.467-0400 m31100| }, out: { reduce: "map_reduce_reduce_nonatomic", nonAtomic: true } }, inputDB: "db61", shardedOutputCollection: "tmp.mrs.coll61_1436465755_80", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll61_1436465755_80", timeMillis: 474, counts: { input: 983, emit: 983, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465756000|128, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll61_1436465755_80", timeMillis: 153, counts: { input: 1017, emit: 1017, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465755000|115, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 983, emit: 983, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1017, emit: 1017, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:239 locks:{ Global: { acquireCount: { r: 95, w: 67, W: 20 }, acquireWaitCount: { r: 1, w: 13, W: 19 }, timeAcquiringMicros: { r: 1256, w: 31444, W: 13818 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { w: 5, W: 1 }, timeAcquiringMicros: { w: 19274, W: 1005 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 152ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.467-0400 m31100| 2015-07-09T14:15:56.464-0400 I COMMAND [conn38] CMD: drop db61.tmp.mrs.coll61_1436465755_80 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.467-0400 m31102| 2015-07-09T14:15:56.464-0400 I COMMAND [repl writer worker 11] CMD: drop db61.tmp.mr.coll61_313 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.467-0400 m31101| 2015-07-09T14:15:56.465-0400 I COMMAND [repl writer worker 5] CMD: drop db61.tmp.mr.coll61_313 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.469-0400 m31200| 2015-07-09T14:15:56.469-0400 I COMMAND [conn63] CMD: drop db61.tmp.mrs.coll61_1436465755_80 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.471-0400 m31202| 2015-07-09T14:15:56.470-0400 I COMMAND [repl writer worker 0] CMD: drop db61.tmp.mrs.coll61_1436465755_80 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.474-0400 m31102| 2015-07-09T14:15:56.473-0400 I COMMAND [repl writer worker 5] CMD: drop db61.tmp.mrs.coll61_1436465755_80 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.474-0400 m31201| 2015-07-09T14:15:56.473-0400 I COMMAND [repl writer worker 11] CMD: drop db61.tmp.mrs.coll61_1436465755_80 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.477-0400 m30999| 2015-07-09T14:15:56.476-0400 I NETWORK [conn396] end connection 127.0.0.1:63898 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.477-0400 m31101| 2015-07-09T14:15:56.476-0400 I COMMAND [repl writer worker 7] CMD: drop db61.tmp.mrs.coll61_1436465755_80 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.504-0400 m31100| 2015-07-09T14:15:56.503-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_314 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.505-0400 m31100| 2015-07-09T14:15:56.505-0400 I COMMAND [conn178] CMD: drop db61.tmp.mr.coll61_314 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.507-0400 m31102| 2015-07-09T14:15:56.507-0400 I COMMAND [repl writer worker 6] CMD: drop db61.tmp.mr.coll61_314 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.507-0400 m31101| 2015-07-09T14:15:56.507-0400 I COMMAND [repl writer worker 8] CMD: drop db61.tmp.mr.coll61_314 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.516-0400 m31100| 2015-07-09T14:15:56.515-0400 I COMMAND [conn178] command db61.map_reduce_reduce_nonatomic command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll61", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.516-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.516-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.517-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.517-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.517-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.518-0400 m31100| }, out: { reduce: "map_reduce_reduce_nonatomic", nonAtomic: true } }, inputDB: "db61", shardedOutputCollection: "tmp.mrs.coll61_1436465756_98", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll61_1436465756_98", timeMillis: 326, counts: { input: 983, emit: 983, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465756000|180, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll61_1436465756_98", timeMillis: 176, counts: { input: 1017, emit: 1017, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465756000|23, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 983, emit: 983, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1017, emit: 1017, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:239 locks:{ Global: { acquireCount: { r: 95, w: 67, W: 20 }, acquireWaitCount: { r: 3, w: 16, W: 13 }, timeAcquiringMicros: { r: 5485, w: 24425, W: 6797 } }, Database: { acquireCount: { r: 4, w: 64, W: 4 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 2162 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 42 } }, oplog: { acquireCount: { w: 42 } } } protocol:op_command 130ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.518-0400 m31100| 2015-07-09T14:15:56.516-0400 I COMMAND [conn39] CMD: drop db61.tmp.mrs.coll61_1436465756_98 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.518-0400 m31200| 2015-07-09T14:15:56.518-0400 I COMMAND [conn34] CMD: drop db61.tmp.mrs.coll61_1436465756_98 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.520-0400 m31102| 2015-07-09T14:15:56.520-0400 I COMMAND [repl writer worker 9] CMD: drop db61.tmp.mrs.coll61_1436465756_98 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.520-0400 m31101| 2015-07-09T14:15:56.520-0400 I COMMAND [repl writer worker 1] CMD: drop db61.tmp.mrs.coll61_1436465756_98 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.522-0400 m31202| 2015-07-09T14:15:56.522-0400 I COMMAND [repl writer worker 7] CMD: drop db61.tmp.mrs.coll61_1436465756_98 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.522-0400 m31201| 2015-07-09T14:15:56.522-0400 I COMMAND [repl writer worker 9] CMD: drop db61.tmp.mrs.coll61_1436465756_98 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.525-0400 m30998| 2015-07-09T14:15:56.525-0400 I NETWORK [conn395] end connection 127.0.0.1:63900 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.543-0400 m30999| 2015-07-09T14:15:56.543-0400 I COMMAND [conn1] DROP: db61.map_reduce_reduce_nonatomic [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.543-0400 m30999| 2015-07-09T14:15:56.543-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.543-0400 m31100| 2015-07-09T14:15:56.543-0400 I COMMAND [conn45] CMD: drop db61.map_reduce_reduce_nonatomic [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.546-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.546-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.546-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.546-0400 jstests/concurrency/fsm_workloads/map_reduce_reduce_nonatomic.js: Workload completed in 9515 ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.546-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.546-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.547-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.547-0400 m30999| 2015-07-09T14:15:56.546-0400 I COMMAND [conn1] DROP: db61.coll61 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.547-0400 m30999| 2015-07-09T14:15:56.546-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:56.546-0400-559eba5cca4787b9985d1e7a", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465756546), what: "dropCollection.start", ns: "db61.coll61", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.547-0400 m31101| 2015-07-09T14:15:56.547-0400 I COMMAND [repl writer worker 9] CMD: drop db61.map_reduce_reduce_nonatomic [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.548-0400 m31102| 2015-07-09T14:15:56.547-0400 I COMMAND [repl writer worker 15] CMD: drop db61.map_reduce_reduce_nonatomic [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.604-0400 m30999| 2015-07-09T14:15:56.604-0400 I SHARDING [conn1] distributed lock 'db61.coll61/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba5cca4787b9985d1e7b [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.605-0400 m31100| 2015-07-09T14:15:56.605-0400 I COMMAND [conn38] CMD: drop db61.coll61 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.609-0400 m31200| 2015-07-09T14:15:56.608-0400 I COMMAND [conn63] CMD: drop db61.coll61 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.610-0400 m31102| 2015-07-09T14:15:56.609-0400 I COMMAND [repl writer worker 10] CMD: drop db61.coll61 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.610-0400 m31101| 2015-07-09T14:15:56.610-0400 I COMMAND [repl writer worker 13] CMD: drop db61.coll61 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.613-0400 m31202| 2015-07-09T14:15:56.612-0400 I COMMAND [repl writer worker 12] CMD: drop db61.coll61 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.618-0400 m31201| 2015-07-09T14:15:56.617-0400 I COMMAND [repl writer worker 10] CMD: drop db61.coll61 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.667-0400 m31100| 2015-07-09T14:15:56.666-0400 I SHARDING [conn38] remotely refreshing metadata for db61.coll61 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559eba51ca4787b9985d1e78, current metadata version is 2|3||559eba51ca4787b9985d1e78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.669-0400 m31100| 2015-07-09T14:15:56.668-0400 W SHARDING [conn38] no chunks found when reloading db61.coll61, previous version was 0|0||559eba51ca4787b9985d1e78, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.669-0400 m31100| 2015-07-09T14:15:56.668-0400 I SHARDING [conn38] dropping metadata for db61.coll61 at shard version 2|3||559eba51ca4787b9985d1e78, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.670-0400 m31200| 2015-07-09T14:15:56.670-0400 I SHARDING [conn63] remotely refreshing metadata for db61.coll61 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559eba51ca4787b9985d1e78, current metadata version is 2|5||559eba51ca4787b9985d1e78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.672-0400 m31200| 2015-07-09T14:15:56.671-0400 W SHARDING [conn63] no chunks found when reloading db61.coll61, previous version was 0|0||559eba51ca4787b9985d1e78, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.672-0400 m31200| 2015-07-09T14:15:56.672-0400 I SHARDING [conn63] dropping metadata for db61.coll61 at shard version 2|5||559eba51ca4787b9985d1e78, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.673-0400 m30999| 2015-07-09T14:15:56.672-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:56.672-0400-559eba5cca4787b9985d1e7c", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465756672), what: "dropCollection", ns: "db61.coll61", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.728-0400 m30999| 2015-07-09T14:15:56.728-0400 I SHARDING [conn1] distributed lock 'db61.coll61/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.784-0400 m30999| 2015-07-09T14:15:56.784-0400 I COMMAND [conn1] DROP DATABASE: db61 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.784-0400 m30999| 2015-07-09T14:15:56.784-0400 I SHARDING [conn1] DBConfig::dropDatabase: db61 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.785-0400 m30999| 2015-07-09T14:15:56.784-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:56.784-0400-559eba5cca4787b9985d1e7d", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465756784), what: "dropDatabase.start", ns: "db61", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.891-0400 m30999| 2015-07-09T14:15:56.890-0400 I SHARDING [conn1] DBConfig::dropDatabase: db61 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.891-0400 m31100| 2015-07-09T14:15:56.891-0400 I COMMAND [conn157] dropDatabase db61 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.891-0400 m31100| 2015-07-09T14:15:56.891-0400 I COMMAND [conn157] dropDatabase db61 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.893-0400 m30999| 2015-07-09T14:15:56.892-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:56.892-0400-559eba5cca4787b9985d1e7e", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465756892), what: "dropDatabase", ns: "db61", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.893-0400 m31102| 2015-07-09T14:15:56.892-0400 I COMMAND [repl writer worker 0] dropDatabase db61 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.893-0400 m31102| 2015-07-09T14:15:56.893-0400 I COMMAND [repl writer worker 0] dropDatabase db61 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.893-0400 m31101| 2015-07-09T14:15:56.893-0400 I COMMAND [repl writer worker 14] dropDatabase db61 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.894-0400 m31101| 2015-07-09T14:15:56.893-0400 I COMMAND [repl writer worker 14] dropDatabase db61 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.976-0400 m31100| 2015-07-09T14:15:56.976-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.980-0400 m31101| 2015-07-09T14:15:56.979-0400 I COMMAND [repl writer worker 4] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:56.980-0400 m31102| 2015-07-09T14:15:56.979-0400 I COMMAND [repl writer worker 4] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.016-0400 m31200| 2015-07-09T14:15:57.016-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.019-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.019-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.019-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.019-0400 jstests/concurrency/fsm_workloads/indexed_insert_base_noindex.js [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.020-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.020-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.020-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.020-0400 m31201| 2015-07-09T14:15:57.019-0400 I COMMAND [repl writer worker 0] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.020-0400 m31202| 2015-07-09T14:15:57.019-0400 I COMMAND [repl writer worker 11] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.026-0400 m30999| 2015-07-09T14:15:57.026-0400 I SHARDING [conn1] distributed lock 'db62/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba5dca4787b9985d1e7f [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.031-0400 m30999| 2015-07-09T14:15:57.030-0400 I SHARDING [conn1] Placing [db62] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.031-0400 m30999| 2015-07-09T14:15:57.030-0400 I SHARDING [conn1] Enabling sharding for database [db62] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.083-0400 m30999| 2015-07-09T14:15:57.083-0400 I SHARDING [conn1] distributed lock 'db62/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.101-0400 m31100| 2015-07-09T14:15:57.101-0400 I INDEX [conn144] build index on: db62.coll62 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db62.coll62" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.101-0400 m31100| 2015-07-09T14:15:57.101-0400 I INDEX [conn144] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.110-0400 m31100| 2015-07-09T14:15:57.109-0400 I INDEX [conn144] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.111-0400 m30999| 2015-07-09T14:15:57.110-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db62.coll62", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.114-0400 m30999| 2015-07-09T14:15:57.113-0400 I SHARDING [conn1] distributed lock 'db62.coll62/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba5dca4787b9985d1e80 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.115-0400 m30999| 2015-07-09T14:15:57.114-0400 I SHARDING [conn1] enable sharding on: db62.coll62 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.116-0400 m30999| 2015-07-09T14:15:57.114-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:57.114-0400-559eba5dca4787b9985d1e81", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465757114), what: "shardCollection.start", ns: "db62.coll62", details: { shardKey: { _id: "hashed" }, collection: "db62.coll62", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.122-0400 m31101| 2015-07-09T14:15:57.120-0400 I INDEX [repl writer worker 12] build index on: db62.coll62 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db62.coll62" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.122-0400 m31101| 2015-07-09T14:15:57.121-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.127-0400 m31101| 2015-07-09T14:15:57.126-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.129-0400 m31102| 2015-07-09T14:15:57.129-0400 I INDEX [repl writer worker 13] build index on: db62.coll62 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db62.coll62" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.129-0400 m31102| 2015-07-09T14:15:57.129-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.134-0400 m31102| 2015-07-09T14:15:57.134-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.167-0400 m30999| 2015-07-09T14:15:57.167-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db62.coll62 using new epoch 559eba5dca4787b9985d1e82 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.274-0400 m30999| 2015-07-09T14:15:57.273-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db62.coll62: 0ms sequenceNumber: 273 version: 1|1||559eba5dca4787b9985d1e82 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.329-0400 m30999| 2015-07-09T14:15:57.328-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db62.coll62: 0ms sequenceNumber: 274 version: 1|1||559eba5dca4787b9985d1e82 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.331-0400 m31100| 2015-07-09T14:15:57.330-0400 I SHARDING [conn45] remotely refreshing metadata for db62.coll62 with requested shard version 1|1||559eba5dca4787b9985d1e82, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.332-0400 m31100| 2015-07-09T14:15:57.332-0400 I SHARDING [conn45] collection db62.coll62 was previously unsharded, new metadata loaded with shard version 1|1||559eba5dca4787b9985d1e82 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.333-0400 m31100| 2015-07-09T14:15:57.332-0400 I SHARDING [conn45] collection version was loaded at version 1|1||559eba5dca4787b9985d1e82, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.333-0400 m30999| 2015-07-09T14:15:57.333-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:57.333-0400-559eba5dca4787b9985d1e83", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465757333), what: "shardCollection", ns: "db62.coll62", details: { version: "1|1||559eba5dca4787b9985d1e82" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.388-0400 m30999| 2015-07-09T14:15:57.388-0400 I SHARDING [conn1] distributed lock 'db62.coll62/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.389-0400 m30999| 2015-07-09T14:15:57.388-0400 I SHARDING [conn1] moving chunk ns: db62.coll62 moving ( ns: db62.coll62, shard: test-rs0, lastmod: 1|1||000000000000000000000000, min: { _id: 0 }, max: { _id: MaxKey }) test-rs0 -> test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.389-0400 m31100| 2015-07-09T14:15:57.389-0400 I SHARDING [conn38] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.390-0400 m31100| 2015-07-09T14:15:57.390-0400 I SHARDING [conn38] received moveChunk request: { moveChunk: "db62.coll62", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eba5dca4787b9985d1e82') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.394-0400 m31100| 2015-07-09T14:15:57.394-0400 I SHARDING [conn38] distributed lock 'db62.coll62/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eba5d792e00bb67274a70 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.394-0400 m31100| 2015-07-09T14:15:57.394-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:57.394-0400-559eba5d792e00bb67274a71", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465757394), what: "moveChunk.start", ns: "db62.coll62", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.447-0400 m31100| 2015-07-09T14:15:57.446-0400 I SHARDING [conn38] remotely refreshing metadata for db62.coll62 based on current shard version 1|1||559eba5dca4787b9985d1e82, current metadata version is 1|1||559eba5dca4787b9985d1e82 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.448-0400 m31100| 2015-07-09T14:15:57.448-0400 I SHARDING [conn38] metadata of collection db62.coll62 already up to date (shard version : 1|1||559eba5dca4787b9985d1e82, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.448-0400 m31100| 2015-07-09T14:15:57.448-0400 I SHARDING [conn38] moveChunk request accepted at version 1|1||559eba5dca4787b9985d1e82 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.449-0400 m31100| 2015-07-09T14:15:57.448-0400 I SHARDING [conn38] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.449-0400 m31200| 2015-07-09T14:15:57.449-0400 I SHARDING [conn16] remotely refreshing metadata for db62.coll62, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.451-0400 m31200| 2015-07-09T14:15:57.450-0400 I SHARDING [conn16] collection db62.coll62 was previously unsharded, new metadata loaded with shard version 0|0||559eba5dca4787b9985d1e82 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.451-0400 m31200| 2015-07-09T14:15:57.451-0400 I SHARDING [conn16] collection version was loaded at version 1|1||559eba5dca4787b9985d1e82, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.451-0400 m31200| 2015-07-09T14:15:57.451-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: 0 } -> { _id: MaxKey } for collection db62.coll62 from test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 at epoch 559eba5dca4787b9985d1e82 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.454-0400 m31100| 2015-07-09T14:15:57.453-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db62.coll62", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.457-0400 m31100| 2015-07-09T14:15:57.456-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db62.coll62", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.462-0400 m31100| 2015-07-09T14:15:57.461-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db62.coll62", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.469-0400 m31200| 2015-07-09T14:15:57.468-0400 I INDEX [migrateThread] build index on: db62.coll62 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db62.coll62" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.469-0400 m31200| 2015-07-09T14:15:57.469-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.471-0400 m31100| 2015-07-09T14:15:57.471-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db62.coll62", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.479-0400 m31200| 2015-07-09T14:15:57.479-0400 I INDEX [migrateThread] build index on: db62.coll62 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db62.coll62" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.479-0400 m31200| 2015-07-09T14:15:57.479-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.489-0400 m31100| 2015-07-09T14:15:57.488-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db62.coll62", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.491-0400 m31200| 2015-07-09T14:15:57.491-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.493-0400 m31200| 2015-07-09T14:15:57.492-0400 I SHARDING [migrateThread] Deleter starting delete for: db62.coll62 from { _id: 0 } -> { _id: MaxKey }, with opId: 93841 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.493-0400 m31200| 2015-07-09T14:15:57.493-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db62.coll62 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.502-0400 m31202| 2015-07-09T14:15:57.502-0400 I INDEX [repl writer worker 13] build index on: db62.coll62 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db62.coll62" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.502-0400 m31201| 2015-07-09T14:15:57.502-0400 I INDEX [repl writer worker 1] build index on: db62.coll62 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db62.coll62" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.503-0400 m31202| 2015-07-09T14:15:57.502-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.503-0400 m31201| 2015-07-09T14:15:57.502-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.507-0400 m31201| 2015-07-09T14:15:57.507-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.510-0400 m31200| 2015-07-09T14:15:57.509-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.510-0400 m31202| 2015-07-09T14:15:57.509-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.510-0400 m31200| 2015-07-09T14:15:57.509-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db62.coll62' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.522-0400 m31100| 2015-07-09T14:15:57.522-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db62.coll62", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.522-0400 m31100| 2015-07-09T14:15:57.522-0400 I SHARDING [conn38] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.523-0400 m31100| 2015-07-09T14:15:57.523-0400 I SHARDING [conn38] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.523-0400 m31100| 2015-07-09T14:15:57.523-0400 I SHARDING [conn38] moveChunk setting version to: 2|0||559eba5dca4787b9985d1e82 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.532-0400 m31200| 2015-07-09T14:15:57.532-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db62.coll62' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.533-0400 m31200| 2015-07-09T14:15:57.532-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:57.532-0400-559eba5dd5a107a5b9c0db67", server: "bs-osx108-8", clientAddr: "", time: new Date(1436465757532), what: "moveChunk.to", ns: "db62.coll62", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 5: 41, step 2 of 5: 15, step 3 of 5: 0, step 4 of 5: 1, step 5 of 5: 22, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.586-0400 m31100| 2015-07-09T14:15:57.586-0400 I SHARDING [conn38] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db62.coll62", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.587-0400 m31100| 2015-07-09T14:15:57.586-0400 I SHARDING [conn38] moveChunk updating self version to: 2|1||559eba5dca4787b9985d1e82 through { _id: MinKey } -> { _id: 0 } for collection 'db62.coll62' [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.588-0400 m31100| 2015-07-09T14:15:57.587-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:57.587-0400-559eba5d792e00bb67274a72", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465757587), what: "moveChunk.commit", ns: "db62.coll62", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.641-0400 m31100| 2015-07-09T14:15:57.641-0400 I SHARDING [conn38] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.641-0400 m31100| 2015-07-09T14:15:57.641-0400 I SHARDING [conn38] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.642-0400 m31100| 2015-07-09T14:15:57.641-0400 I SHARDING [conn38] Deleter starting delete for: db62.coll62 from { _id: 0 } -> { _id: MaxKey }, with opId: 191237 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.642-0400 m31100| 2015-07-09T14:15:57.641-0400 I SHARDING [conn38] rangeDeleter deleted 0 documents for db62.coll62 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.642-0400 m31100| 2015-07-09T14:15:57.641-0400 I SHARDING [conn38] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.643-0400 m31100| 2015-07-09T14:15:57.643-0400 I SHARDING [conn38] distributed lock 'db62.coll62/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.643-0400 m31100| 2015-07-09T14:15:57.643-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:57.643-0400-559eba5d792e00bb67274a73", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465757643), what: "moveChunk.from", ns: "db62.coll62", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 6: 0, step 2 of 6: 58, step 3 of 6: 3, step 4 of 6: 70, step 5 of 6: 119, step 6 of 6: 0, to: "test-rs1", from: "test-rs0", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.697-0400 m31100| 2015-07-09T14:15:57.696-0400 I COMMAND [conn38] command db62.coll62 command: moveChunk { moveChunk: "db62.coll62", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eba5dca4787b9985d1e82') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 306ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.698-0400 m30999| 2015-07-09T14:15:57.697-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db62.coll62: 0ms sequenceNumber: 275 version: 2|1||559eba5dca4787b9985d1e82 based on: 1|1||559eba5dca4787b9985d1e82 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.699-0400 m31100| 2015-07-09T14:15:57.699-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db62.coll62", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba5dca4787b9985d1e82') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.702-0400 m31100| 2015-07-09T14:15:57.702-0400 I SHARDING [conn38] distributed lock 'db62.coll62/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eba5d792e00bb67274a74 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.703-0400 m31100| 2015-07-09T14:15:57.702-0400 I SHARDING [conn38] remotely refreshing metadata for db62.coll62 based on current shard version 2|0||559eba5dca4787b9985d1e82, current metadata version is 2|0||559eba5dca4787b9985d1e82 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.704-0400 m31100| 2015-07-09T14:15:57.703-0400 I SHARDING [conn38] updating metadata for db62.coll62 from shard version 2|0||559eba5dca4787b9985d1e82 to shard version 2|1||559eba5dca4787b9985d1e82 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.704-0400 m31100| 2015-07-09T14:15:57.703-0400 I SHARDING [conn38] collection version was loaded at version 2|1||559eba5dca4787b9985d1e82, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.704-0400 m31100| 2015-07-09T14:15:57.704-0400 I SHARDING [conn38] splitChunk accepted at version 2|1||559eba5dca4787b9985d1e82 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.707-0400 m31100| 2015-07-09T14:15:57.706-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:57.706-0400-559eba5d792e00bb67274a75", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465757706), what: "split", ns: "db62.coll62", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559eba5dca4787b9985d1e82') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559eba5dca4787b9985d1e82') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.762-0400 m31100| 2015-07-09T14:15:57.761-0400 I SHARDING [conn38] distributed lock 'db62.coll62/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.765-0400 m30999| 2015-07-09T14:15:57.764-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db62.coll62: 0ms sequenceNumber: 276 version: 2|3||559eba5dca4787b9985d1e82 based on: 2|1||559eba5dca4787b9985d1e82 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.766-0400 m31200| 2015-07-09T14:15:57.765-0400 I SHARDING [conn63] received splitChunk request: { splitChunk: "db62.coll62", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba5dca4787b9985d1e82') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.769-0400 m31200| 2015-07-09T14:15:57.769-0400 I SHARDING [conn63] distributed lock 'db62.coll62/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eba5dd5a107a5b9c0db68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.770-0400 m31200| 2015-07-09T14:15:57.769-0400 I SHARDING [conn63] remotely refreshing metadata for db62.coll62 based on current shard version 0|0||559eba5dca4787b9985d1e82, current metadata version is 1|1||559eba5dca4787b9985d1e82 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.771-0400 m31200| 2015-07-09T14:15:57.771-0400 I SHARDING [conn63] updating metadata for db62.coll62 from shard version 0|0||559eba5dca4787b9985d1e82 to shard version 2|0||559eba5dca4787b9985d1e82 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.771-0400 m31200| 2015-07-09T14:15:57.771-0400 I SHARDING [conn63] collection version was loaded at version 2|3||559eba5dca4787b9985d1e82, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.771-0400 m31200| 2015-07-09T14:15:57.771-0400 I SHARDING [conn63] splitChunk accepted at version 2|0||559eba5dca4787b9985d1e82 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.774-0400 m31200| 2015-07-09T14:15:57.773-0400 I SHARDING [conn63] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:57.773-0400-559eba5dd5a107a5b9c0db69", server: "bs-osx108-8", clientAddr: "127.0.0.1:62862", time: new Date(1436465757773), what: "split", ns: "db62.coll62", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559eba5dca4787b9985d1e82') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559eba5dca4787b9985d1e82') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.828-0400 m31200| 2015-07-09T14:15:57.828-0400 I SHARDING [conn63] distributed lock 'db62.coll62/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.830-0400 m30999| 2015-07-09T14:15:57.830-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db62.coll62: 0ms sequenceNumber: 277 version: 2|5||559eba5dca4787b9985d1e82 based on: 2|3||559eba5dca4787b9985d1e82 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.839-0400 m31200| 2015-07-09T14:15:57.839-0400 I INDEX [conn30] build index on: db62.coll62 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db62.coll62" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.839-0400 m31100| 2015-07-09T14:15:57.839-0400 I INDEX [conn45] build index on: db62.coll62 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db62.coll62" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.840-0400 m31200| 2015-07-09T14:15:57.839-0400 I INDEX [conn30] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.840-0400 m31100| 2015-07-09T14:15:57.839-0400 I INDEX [conn45] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.846-0400 m31200| 2015-07-09T14:15:57.846-0400 I INDEX [conn30] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.850-0400 m31100| 2015-07-09T14:15:57.849-0400 I INDEX [conn45] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.852-0400 m31201| 2015-07-09T14:15:57.851-0400 I INDEX [repl writer worker 4] build index on: db62.coll62 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db62.coll62" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.853-0400 m31201| 2015-07-09T14:15:57.851-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.853-0400 m31200| 2015-07-09T14:15:57.852-0400 I COMMAND [conn63] CMD: dropIndexes db62.coll62 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.853-0400 m31100| 2015-07-09T14:15:57.851-0400 I COMMAND [conn38] CMD: dropIndexes db62.coll62 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.857-0400 Using 20 threads (requested 20) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.861-0400 m31202| 2015-07-09T14:15:57.860-0400 I INDEX [repl writer worker 9] build index on: db62.coll62 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db62.coll62" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.861-0400 m31202| 2015-07-09T14:15:57.860-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.978-0400 m31101| 2015-07-09T14:15:57.922-0400 I INDEX [repl writer worker 10] build index on: db62.coll62 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db62.coll62" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.978-0400 m31101| 2015-07-09T14:15:57.922-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.978-0400 m31102| 2015-07-09T14:15:57.963-0400 I INDEX [repl writer worker 8] build index on: db62.coll62 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db62.coll62" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.979-0400 m31102| 2015-07-09T14:15:57.963-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:57.993-0400 m30998| 2015-07-09T14:15:57.993-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63910 #397 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.030-0400 m30999| 2015-07-09T14:15:58.029-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63911 #397 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.032-0400 m31102| 2015-07-09T14:15:58.032-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.033-0400 m31202| 2015-07-09T14:15:58.032-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.035-0400 m31101| 2015-07-09T14:15:58.034-0400 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.044-0400 m30998| 2015-07-09T14:15:58.044-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63912 #398 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.055-0400 m31201| 2015-07-09T14:15:58.055-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.090-0400 m31201| 2015-07-09T14:15:58.086-0400 I COMMAND [repl writer worker 15] CMD: dropIndexes db62.coll62 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.103-0400 m30999| 2015-07-09T14:15:58.103-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63913 #398 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.104-0400 m30998| 2015-07-09T14:15:58.103-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63914 #399 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.104-0400 m30998| 2015-07-09T14:15:58.104-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63915 #400 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.112-0400 m30999| 2015-07-09T14:15:58.112-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63916 #399 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.113-0400 m30998| 2015-07-09T14:15:58.112-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63917 #401 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.113-0400 m30999| 2015-07-09T14:15:58.112-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63918 #400 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.115-0400 m30998| 2015-07-09T14:15:58.115-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63922 #402 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.121-0400 m30998| 2015-07-09T14:15:58.120-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63923 #403 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.121-0400 m30998| 2015-07-09T14:15:58.120-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63924 #404 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.121-0400 m30998| 2015-07-09T14:15:58.121-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63925 #405 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.127-0400 m30999| 2015-07-09T14:15:58.122-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63919 #401 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.128-0400 m30999| 2015-07-09T14:15:58.122-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63920 #402 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.129-0400 m30999| 2015-07-09T14:15:58.128-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63921 #403 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.129-0400 m30998| 2015-07-09T14:15:58.128-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63929 #406 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.129-0400 m30999| 2015-07-09T14:15:58.128-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63926 #404 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.130-0400 m30999| 2015-07-09T14:15:58.128-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63927 #405 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.130-0400 m30999| 2015-07-09T14:15:58.128-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63928 #406 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.140-0400 setting random seed: 2778001273982 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.140-0400 setting random seed: 3923928826116 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.142-0400 setting random seed: 7784936726093 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.142-0400 setting random seed: 2734302650205 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.143-0400 setting random seed: 1176460878923 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.144-0400 setting random seed: 2731546908617 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.144-0400 setting random seed: 5626874505542 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.145-0400 setting random seed: 5424663405865 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.145-0400 setting random seed: 9471811237744 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.145-0400 setting random seed: 879858685657 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.148-0400 m30998| 2015-07-09T14:15:58.147-0400 I SHARDING [conn397] ChunkManager: time to load chunks for db62.coll62: 0ms sequenceNumber: 74 version: 2|5||559eba5dca4787b9985d1e82 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.148-0400 setting random seed: 2317904834635 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.153-0400 setting random seed: 9108914500102 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.153-0400 setting random seed: 9867433556355 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.153-0400 setting random seed: 6594470459967 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.155-0400 setting random seed: 1955858892761 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.155-0400 setting random seed: 7026573857292 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.156-0400 setting random seed: 1431616377085 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.157-0400 setting random seed: 7998845386318 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.159-0400 m31202| 2015-07-09T14:15:58.158-0400 I COMMAND [repl writer worker 3] CMD: dropIndexes db62.coll62 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.161-0400 m31102| 2015-07-09T14:15:58.161-0400 I COMMAND [repl writer worker 14] CMD: dropIndexes db62.coll62 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.162-0400 m31101| 2015-07-09T14:15:58.162-0400 I COMMAND [repl writer worker 5] CMD: dropIndexes db62.coll62 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.183-0400 setting random seed: 3526074509136 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.189-0400 setting random seed: 2120425943285 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.553-0400 m30999| 2015-07-09T14:15:58.553-0400 I NETWORK [conn397] end connection 127.0.0.1:63911 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.629-0400 m30998| 2015-07-09T14:15:58.628-0400 I NETWORK [conn400] end connection 127.0.0.1:63915 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.646-0400 m30998| 2015-07-09T14:15:58.645-0400 I NETWORK [conn399] end connection 127.0.0.1:63914 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.657-0400 m30999| 2015-07-09T14:15:58.657-0400 I NETWORK [conn398] end connection 127.0.0.1:63913 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.658-0400 m30998| 2015-07-09T14:15:58.658-0400 I NETWORK [conn397] end connection 127.0.0.1:63910 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.673-0400 m30998| 2015-07-09T14:15:58.673-0400 I NETWORK [conn402] end connection 127.0.0.1:63922 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.681-0400 m30998| 2015-07-09T14:15:58.681-0400 I NETWORK [conn398] end connection 127.0.0.1:63912 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.725-0400 m30998| 2015-07-09T14:15:58.725-0400 I NETWORK [conn401] end connection 127.0.0.1:63917 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.736-0400 m30998| 2015-07-09T14:15:58.735-0400 I NETWORK [conn403] end connection 127.0.0.1:63923 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.748-0400 m30998| 2015-07-09T14:15:58.748-0400 I NETWORK [conn405] end connection 127.0.0.1:63925 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.775-0400 m30999| 2015-07-09T14:15:58.775-0400 I NETWORK [conn399] end connection 127.0.0.1:63916 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.781-0400 m30999| 2015-07-09T14:15:58.780-0400 I NETWORK [conn400] end connection 127.0.0.1:63918 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.788-0400 m30999| 2015-07-09T14:15:58.788-0400 I NETWORK [conn404] end connection 127.0.0.1:63926 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.802-0400 m30999| 2015-07-09T14:15:58.795-0400 I NETWORK [conn402] end connection 127.0.0.1:63920 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.806-0400 m30999| 2015-07-09T14:15:58.806-0400 I NETWORK [conn405] end connection 127.0.0.1:63927 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.810-0400 m30998| 2015-07-09T14:15:58.810-0400 I NETWORK [conn406] end connection 127.0.0.1:63929 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.811-0400 m30999| 2015-07-09T14:15:58.811-0400 I NETWORK [conn401] end connection 127.0.0.1:63919 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.841-0400 m30999| 2015-07-09T14:15:58.840-0400 I NETWORK [conn403] end connection 127.0.0.1:63921 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.870-0400 m30998| 2015-07-09T14:15:58.869-0400 I NETWORK [conn404] end connection 127.0.0.1:63924 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.887-0400 m30999| 2015-07-09T14:15:58.887-0400 I NETWORK [conn406] end connection 127.0.0.1:63928 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.906-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.907-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.907-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.907-0400 jstests/concurrency/fsm_workloads/indexed_insert_base_noindex.js: Workload completed in 1049 ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.907-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.907-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.907-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.908-0400 m30999| 2015-07-09T14:15:58.907-0400 I COMMAND [conn1] DROP: db62.coll62 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.908-0400 m30999| 2015-07-09T14:15:58.907-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:58.907-0400-559eba5eca4787b9985d1e84", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465758907), what: "dropCollection.start", ns: "db62.coll62", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.940-0400 m30999| 2015-07-09T14:15:58.940-0400 I SHARDING [conn1] distributed lock 'db62.coll62/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba5eca4787b9985d1e85 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.941-0400 m31100| 2015-07-09T14:15:58.941-0400 I COMMAND [conn38] CMD: drop db62.coll62 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.945-0400 m31200| 2015-07-09T14:15:58.945-0400 I COMMAND [conn63] CMD: drop db62.coll62 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.946-0400 m31101| 2015-07-09T14:15:58.945-0400 I COMMAND [repl writer worker 3] CMD: drop db62.coll62 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.946-0400 m31102| 2015-07-09T14:15:58.945-0400 I COMMAND [repl writer worker 11] CMD: drop db62.coll62 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.949-0400 m31201| 2015-07-09T14:15:58.948-0400 I COMMAND [repl writer worker 15] CMD: drop db62.coll62 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:58.949-0400 m31202| 2015-07-09T14:15:58.949-0400 I COMMAND [repl writer worker 3] CMD: drop db62.coll62 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.002-0400 m31100| 2015-07-09T14:15:59.001-0400 I SHARDING [conn38] remotely refreshing metadata for db62.coll62 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559eba5dca4787b9985d1e82, current metadata version is 2|3||559eba5dca4787b9985d1e82 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.003-0400 m31100| 2015-07-09T14:15:59.003-0400 W SHARDING [conn38] no chunks found when reloading db62.coll62, previous version was 0|0||559eba5dca4787b9985d1e82, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.004-0400 m31100| 2015-07-09T14:15:59.003-0400 I SHARDING [conn38] dropping metadata for db62.coll62 at shard version 2|3||559eba5dca4787b9985d1e82, took 2ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.005-0400 m31200| 2015-07-09T14:15:59.005-0400 I SHARDING [conn63] remotely refreshing metadata for db62.coll62 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559eba5dca4787b9985d1e82, current metadata version is 2|5||559eba5dca4787b9985d1e82 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.007-0400 m31200| 2015-07-09T14:15:59.006-0400 W SHARDING [conn63] no chunks found when reloading db62.coll62, previous version was 0|0||559eba5dca4787b9985d1e82, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.007-0400 m31200| 2015-07-09T14:15:59.007-0400 I SHARDING [conn63] dropping metadata for db62.coll62 at shard version 2|5||559eba5dca4787b9985d1e82, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.008-0400 m30999| 2015-07-09T14:15:59.008-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:59.008-0400-559eba5fca4787b9985d1e86", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465759008), what: "dropCollection", ns: "db62.coll62", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.063-0400 m30999| 2015-07-09T14:15:59.063-0400 I SHARDING [conn1] distributed lock 'db62.coll62/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.119-0400 m30999| 2015-07-09T14:15:59.119-0400 I COMMAND [conn1] DROP DATABASE: db62 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.119-0400 m30999| 2015-07-09T14:15:59.119-0400 I SHARDING [conn1] DBConfig::dropDatabase: db62 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.120-0400 m30999| 2015-07-09T14:15:59.119-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:59.119-0400-559eba5fca4787b9985d1e87", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465759119), what: "dropDatabase.start", ns: "db62", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.226-0400 m30999| 2015-07-09T14:15:59.225-0400 I SHARDING [conn1] DBConfig::dropDatabase: db62 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.226-0400 m31100| 2015-07-09T14:15:59.226-0400 I COMMAND [conn157] dropDatabase db62 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.226-0400 m31100| 2015-07-09T14:15:59.226-0400 I COMMAND [conn157] dropDatabase db62 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.228-0400 m30999| 2015-07-09T14:15:59.228-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:59.228-0400-559eba5fca4787b9985d1e88", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465759228), what: "dropDatabase", ns: "db62", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.229-0400 m31102| 2015-07-09T14:15:59.228-0400 I COMMAND [repl writer worker 0] dropDatabase db62 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.229-0400 m31102| 2015-07-09T14:15:59.229-0400 I COMMAND [repl writer worker 0] dropDatabase db62 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.229-0400 m31101| 2015-07-09T14:15:59.229-0400 I COMMAND [repl writer worker 13] dropDatabase db62 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.229-0400 m31101| 2015-07-09T14:15:59.229-0400 I COMMAND [repl writer worker 13] dropDatabase db62 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.318-0400 m31100| 2015-07-09T14:15:59.317-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.322-0400 m31102| 2015-07-09T14:15:59.321-0400 I COMMAND [repl writer worker 12] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.322-0400 m31101| 2015-07-09T14:15:59.322-0400 I COMMAND [repl writer worker 9] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.356-0400 m31200| 2015-07-09T14:15:59.355-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.358-0400 m31201| 2015-07-09T14:15:59.357-0400 I COMMAND [repl writer worker 12] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.358-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.359-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.359-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.359-0400 jstests/concurrency/fsm_workloads/explain_group.js [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.359-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.359-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.359-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.360-0400 m31202| 2015-07-09T14:15:59.360-0400 I COMMAND [repl writer worker 1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.367-0400 m30999| 2015-07-09T14:15:59.367-0400 I SHARDING [conn1] distributed lock 'db63/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba5fca4787b9985d1e89 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.371-0400 m30999| 2015-07-09T14:15:59.371-0400 I SHARDING [conn1] Placing [db63] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.372-0400 m30999| 2015-07-09T14:15:59.371-0400 I SHARDING [conn1] Enabling sharding for database [db63] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.426-0400 m30999| 2015-07-09T14:15:59.426-0400 I SHARDING [conn1] distributed lock 'db63/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.448-0400 m31100| 2015-07-09T14:15:59.448-0400 I INDEX [conn145] build index on: db63.coll63 properties: { v: 1, key: { j: 1.0 }, name: "j_1", ns: "db63.coll63" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.449-0400 m31100| 2015-07-09T14:15:59.448-0400 I INDEX [conn145] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.459-0400 m31100| 2015-07-09T14:15:59.459-0400 I INDEX [conn145] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.460-0400 m30999| 2015-07-09T14:15:59.460-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db63.coll63", key: { j: 1.0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.465-0400 m30999| 2015-07-09T14:15:59.465-0400 I SHARDING [conn1] distributed lock 'db63.coll63/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba5fca4787b9985d1e8a [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.467-0400 m30999| 2015-07-09T14:15:59.466-0400 I SHARDING [conn1] enable sharding on: db63.coll63 with shard key: { j: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.468-0400 m30999| 2015-07-09T14:15:59.466-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:59.466-0400-559eba5fca4787b9985d1e8b", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465759466), what: "shardCollection.start", ns: "db63.coll63", details: { shardKey: { j: 1.0 }, collection: "db63.coll63", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 1 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.472-0400 m31102| 2015-07-09T14:15:59.470-0400 I INDEX [repl writer worker 1] build index on: db63.coll63 properties: { v: 1, key: { j: 1.0 }, name: "j_1", ns: "db63.coll63" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.472-0400 m31102| 2015-07-09T14:15:59.470-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.477-0400 m31102| 2015-07-09T14:15:59.476-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.480-0400 m31101| 2015-07-09T14:15:59.479-0400 I INDEX [repl writer worker 11] build index on: db63.coll63 properties: { v: 1, key: { j: 1.0 }, name: "j_1", ns: "db63.coll63" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.480-0400 m31101| 2015-07-09T14:15:59.479-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.486-0400 m31101| 2015-07-09T14:15:59.486-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.519-0400 m30999| 2015-07-09T14:15:59.519-0400 I SHARDING [conn1] going to create 1 chunk(s) for: db63.coll63 using new epoch 559eba5fca4787b9985d1e8c [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.575-0400 m30999| 2015-07-09T14:15:59.574-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db63.coll63: 1ms sequenceNumber: 278 version: 1|0||559eba5fca4787b9985d1e8c based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.629-0400 m30999| 2015-07-09T14:15:59.628-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db63.coll63: 0ms sequenceNumber: 279 version: 1|0||559eba5fca4787b9985d1e8c based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.629-0400 m31100| 2015-07-09T14:15:59.629-0400 I SHARDING [conn188] remotely refreshing metadata for db63.coll63 with requested shard version 1|0||559eba5fca4787b9985d1e8c, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.631-0400 m31100| 2015-07-09T14:15:59.630-0400 I SHARDING [conn188] collection db63.coll63 was previously unsharded, new metadata loaded with shard version 1|0||559eba5fca4787b9985d1e8c [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.631-0400 m31100| 2015-07-09T14:15:59.631-0400 I SHARDING [conn188] collection version was loaded at version 1|0||559eba5fca4787b9985d1e8c, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.631-0400 m30999| 2015-07-09T14:15:59.631-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:59.631-0400-559eba5fca4787b9985d1e8d", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465759631), what: "shardCollection", ns: "db63.coll63", details: { version: "1|0||559eba5fca4787b9985d1e8c" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.686-0400 m30999| 2015-07-09T14:15:59.685-0400 I SHARDING [conn1] distributed lock 'db63.coll63/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.686-0400 Using 10 threads (requested 10) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.789-0400 m30998| 2015-07-09T14:15:59.789-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63930 #407 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.803-0400 m30998| 2015-07-09T14:15:59.803-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63931 #408 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.816-0400 m30999| 2015-07-09T14:15:59.816-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63932 #407 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.826-0400 m30998| 2015-07-09T14:15:59.826-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63935 #409 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.826-0400 m30999| 2015-07-09T14:15:59.826-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63933 #408 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.827-0400 m30998| 2015-07-09T14:15:59.826-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63936 #410 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.827-0400 m30999| 2015-07-09T14:15:59.827-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63934 #409 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.829-0400 m30998| 2015-07-09T14:15:59.829-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63937 #411 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.835-0400 m30999| 2015-07-09T14:15:59.835-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63938 #410 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.838-0400 m30999| 2015-07-09T14:15:59.838-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63939 #411 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.847-0400 setting random seed: 5383744379505 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.847-0400 setting random seed: 6069985479116 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.847-0400 setting random seed: 438233101740 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.847-0400 setting random seed: 5356357339769 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.848-0400 setting random seed: 4297851426526 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.848-0400 setting random seed: 8245995012111 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.849-0400 setting random seed: 4485949683003 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.849-0400 setting random seed: 4591828482225 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.854-0400 setting random seed: 3265072288922 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.855-0400 setting random seed: 4861336699686 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.857-0400 m30998| 2015-07-09T14:15:59.857-0400 I SHARDING [conn407] ChunkManager: time to load chunks for db63.coll63: 0ms sequenceNumber: 75 version: 1|0||559eba5fca4787b9985d1e8c based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.896-0400 m31100| 2015-07-09T14:15:59.895-0400 I SHARDING [conn38] request split points lookup for chunk db63.coll63 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.896-0400 m31100| 2015-07-09T14:15:59.896-0400 I SHARDING [conn15] request split points lookup for chunk db63.coll63 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.900-0400 m31100| 2015-07-09T14:15:59.900-0400 I SHARDING [conn39] request split points lookup for chunk db63.coll63 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.902-0400 m31100| 2015-07-09T14:15:59.902-0400 I SHARDING [conn39] received splitChunk request: { splitChunk: "db63.coll63", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 6.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba5fca4787b9985d1e8c') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.903-0400 m31100| 2015-07-09T14:15:59.903-0400 I SHARDING [conn35] request split points lookup for chunk db63.coll63 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.904-0400 m31100| 2015-07-09T14:15:59.903-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db63.coll63", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba5fca4787b9985d1e8c') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.904-0400 m31100| 2015-07-09T14:15:59.904-0400 I SHARDING [conn36] request split points lookup for chunk db63.coll63 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.905-0400 m31100| 2015-07-09T14:15:59.904-0400 I SHARDING [conn39] distributed lock 'db63.coll63/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eba5f792e00bb67274a77 [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.905-0400 m31100| 2015-07-09T14:15:59.904-0400 I SHARDING [conn39] remotely refreshing metadata for db63.coll63 based on current shard version 1|0||559eba5fca4787b9985d1e8c, current metadata version is 1|0||559eba5fca4787b9985d1e8c [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.906-0400 m31100| 2015-07-09T14:15:59.905-0400 I SHARDING [conn36] received splitChunk request: { splitChunk: "db63.coll63", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba5fca4787b9985d1e8c') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.906-0400 m31100| 2015-07-09T14:15:59.905-0400 W SHARDING [conn35] could not acquire collection lock for db63.coll63 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db63.coll63 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.906-0400 m30998| 2015-07-09T14:15:59.906-0400 W SHARDING [conn411] splitChunk failed - cmd: { splitChunk: "db63.coll63", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba5fca4787b9985d1e8c') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db63.coll63 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.907-0400 m31100| 2015-07-09T14:15:59.907-0400 I SHARDING [conn39] metadata of collection db63.coll63 already up to date (shard version : 1|0||559eba5fca4787b9985d1e8c, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.908-0400 m31100| 2015-07-09T14:15:59.907-0400 I SHARDING [conn39] splitChunk accepted at version 1|0||559eba5fca4787b9985d1e8c [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.908-0400 m31100| 2015-07-09T14:15:59.907-0400 W SHARDING [conn36] could not acquire collection lock for db63.coll63 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db63.coll63 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.908-0400 m30998| 2015-07-09T14:15:59.908-0400 W SHARDING [conn407] splitChunk failed - cmd: { splitChunk: "db63.coll63", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba5fca4787b9985d1e8c') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db63.coll63 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.913-0400 m31100| 2015-07-09T14:15:59.913-0400 I SHARDING [conn15] request split points lookup for chunk db63.coll63 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.914-0400 m31100| 2015-07-09T14:15:59.913-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db63.coll63", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba5fca4787b9985d1e8c') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.921-0400 m31100| 2015-07-09T14:15:59.921-0400 I SHARDING [conn38] request split points lookup for chunk db63.coll63 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.922-0400 m31100| 2015-07-09T14:15:59.921-0400 I SHARDING [conn40] request split points lookup for chunk db63.coll63 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.922-0400 m31100| 2015-07-09T14:15:59.922-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db63.coll63", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 8.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba5fca4787b9985d1e8c') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.923-0400 m31100| 2015-07-09T14:15:59.923-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db63.coll63", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 8.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba5fca4787b9985d1e8c') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.925-0400 m31100| 2015-07-09T14:15:59.925-0400 I SHARDING [conn39] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:59.925-0400-559eba5f792e00bb67274a78", server: "bs-osx108-8", clientAddr: "127.0.0.1:62641", time: new Date(1436465759925), what: "multi-split", ns: "db63.coll63", details: { before: { min: { j: MinKey }, max: { j: MaxKey } }, number: 1, of: 3, chunk: { min: { j: MinKey }, max: { j: 0.0 }, lastmod: Timestamp 1000|1, lastmodEpoch: ObjectId('559eba5fca4787b9985d1e8c') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.926-0400 m31100| 2015-07-09T14:15:59.925-0400 I SHARDING [conn35] request split points lookup for chunk db63.coll63 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.926-0400 m31100| 2015-07-09T14:15:59.925-0400 I SHARDING [conn36] request split points lookup for chunk db63.coll63 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.926-0400 m31100| 2015-07-09T14:15:59.925-0400 W SHARDING [conn40] could not acquire collection lock for db63.coll63 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db63.coll63 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.927-0400 m30999| 2015-07-09T14:15:59.925-0400 I SHARDING [conn410] ChunkManager: time to load chunks for db63.coll63: 0ms sequenceNumber: 280 version: 1|3||559eba5fca4787b9985d1e8c based on: 1|0||559eba5fca4787b9985d1e8c [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.927-0400 m30999| 2015-07-09T14:15:59.926-0400 W SHARDING [conn407] splitChunk failed - cmd: { splitChunk: "db63.coll63", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 8.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba5fca4787b9985d1e8c') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db63.coll63 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.927-0400 m31100| 2015-07-09T14:15:59.926-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db63.coll63", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 8.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba5fca4787b9985d1e8c') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.927-0400 m31100| 2015-07-09T14:15:59.927-0400 I SHARDING [conn36] received splitChunk request: { splitChunk: "db63.coll63", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 8.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba5fca4787b9985d1e8c') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.928-0400 m31100| 2015-07-09T14:15:59.927-0400 W SHARDING [conn38] could not acquire collection lock for db63.coll63 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db63.coll63 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.928-0400 m31100| 2015-07-09T14:15:59.927-0400 W SHARDING [conn35] could not acquire collection lock for db63.coll63 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db63.coll63 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.929-0400 m31100| 2015-07-09T14:15:59.927-0400 W SHARDING [conn15] could not acquire collection lock for db63.coll63 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db63.coll63 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.929-0400 m30998| 2015-07-09T14:15:59.928-0400 W SHARDING [conn411] splitChunk failed - cmd: { splitChunk: "db63.coll63", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 8.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba5fca4787b9985d1e8c') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db63.coll63 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.929-0400 m30999| 2015-07-09T14:15:59.928-0400 W SHARDING [conn408] splitChunk failed - cmd: { splitChunk: "db63.coll63", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 8.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba5fca4787b9985d1e8c') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db63.coll63 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.930-0400 m30999| 2015-07-09T14:15:59.928-0400 W SHARDING [conn409] splitChunk failed - cmd: { splitChunk: "db63.coll63", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba5fca4787b9985d1e8c') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db63.coll63 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.930-0400 m31100| 2015-07-09T14:15:59.930-0400 W SHARDING [conn36] could not acquire collection lock for db63.coll63 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db63.coll63 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.931-0400 m30998| 2015-07-09T14:15:59.930-0400 W SHARDING [conn408] splitChunk failed - cmd: { splitChunk: "db63.coll63", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 8.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba5fca4787b9985d1e8c') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db63.coll63 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.941-0400 m31100| 2015-07-09T14:15:59.941-0400 I SHARDING [conn36] request split points lookup for chunk db63.coll63 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.942-0400 m31100| 2015-07-09T14:15:59.942-0400 I SHARDING [conn36] received splitChunk request: { splitChunk: "db63.coll63", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba5fca4787b9985d1e8c') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.943-0400 m31100| 2015-07-09T14:15:59.943-0400 I SHARDING [conn35] request split points lookup for chunk db63.coll63 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.944-0400 m31100| 2015-07-09T14:15:59.943-0400 W SHARDING [conn36] could not acquire collection lock for db63.coll63 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db63.coll63 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.945-0400 m30998| 2015-07-09T14:15:59.944-0400 W SHARDING [conn411] splitChunk failed - cmd: { splitChunk: "db63.coll63", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba5fca4787b9985d1e8c') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db63.coll63 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.946-0400 m31100| 2015-07-09T14:15:59.944-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db63.coll63", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba5fca4787b9985d1e8c') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.946-0400 m31100| 2015-07-09T14:15:59.945-0400 W SHARDING [conn35] could not acquire collection lock for db63.coll63 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db63.coll63 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.946-0400 m30998| 2015-07-09T14:15:59.945-0400 W SHARDING [conn408] splitChunk failed - cmd: { splitChunk: "db63.coll63", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba5fca4787b9985d1e8c') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db63.coll63 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.949-0400 m30998| 2015-07-09T14:15:59.949-0400 I SHARDING [conn408] ChunkManager: time to load chunks for db63.coll63: 0ms sequenceNumber: 76 version: 1|3||559eba5fca4787b9985d1e8c based on: 1|0||559eba5fca4787b9985d1e8c [js_test:fsm_all_sharded_replication] 2015-07-09T14:15:59.979-0400 m31100| 2015-07-09T14:15:59.977-0400 I SHARDING [conn39] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:15:59.977-0400-559eba5f792e00bb67274a79", server: "bs-osx108-8", clientAddr: "127.0.0.1:62641", time: new Date(1436465759977), what: "multi-split", ns: "db63.coll63", details: { before: { min: { j: MinKey }, max: { j: MaxKey } }, number: 2, of: 3, chunk: { min: { j: 0.0 }, max: { j: 6.0 }, lastmod: Timestamp 1000|2, lastmodEpoch: ObjectId('559eba5fca4787b9985d1e8c') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.033-0400 m31100| 2015-07-09T14:16:00.033-0400 I SHARDING [conn39] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:00.033-0400-559eba60792e00bb67274a7a", server: "bs-osx108-8", clientAddr: "127.0.0.1:62641", time: new Date(1436465760033), what: "multi-split", ns: "db63.coll63", details: { before: { min: { j: MinKey }, max: { j: MaxKey } }, number: 3, of: 3, chunk: { min: { j: 6.0 }, max: { j: MaxKey }, lastmod: Timestamp 1000|3, lastmodEpoch: ObjectId('559eba5fca4787b9985d1e8c') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.089-0400 m31100| 2015-07-09T14:16:00.089-0400 I SHARDING [conn39] distributed lock 'db63.coll63/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.090-0400 m31100| 2015-07-09T14:16:00.089-0400 I COMMAND [conn39] command db63.coll63 command: splitChunk { splitChunk: "db63.coll63", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 6.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba5fca4787b9985d1e8c') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 4, w: 2 } }, Database: { acquireCount: { r: 1, w: 2 } }, Collection: { acquireCount: { r: 1, W: 2 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 1432 } } } protocol:op_command 187ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.090-0400 m30998| 2015-07-09T14:16:00.090-0400 I SHARDING [conn410] autosplitted db63.coll63 shard: ns: db63.coll63, shard: test-rs0, lastmod: 1|0||000000000000000000000000, min: { j: MinKey }, max: { j: MaxKey } into 3 (splitThreshold 921) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.171-0400 m30999| 2015-07-09T14:16:00.166-0400 I NETWORK [conn409] end connection 127.0.0.1:63934 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.175-0400 m30998| 2015-07-09T14:16:00.175-0400 I NETWORK [conn408] end connection 127.0.0.1:63931 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.178-0400 m30999| 2015-07-09T14:16:00.178-0400 I NETWORK [conn407] end connection 127.0.0.1:63932 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.184-0400 m30998| 2015-07-09T14:16:00.183-0400 I NETWORK [conn407] end connection 127.0.0.1:63930 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.211-0400 m30998| 2015-07-09T14:16:00.211-0400 I NETWORK [conn409] end connection 127.0.0.1:63935 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.212-0400 m30999| 2015-07-09T14:16:00.211-0400 I NETWORK [conn408] end connection 127.0.0.1:63933 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.213-0400 m30998| 2015-07-09T14:16:00.213-0400 I NETWORK [conn411] end connection 127.0.0.1:63937 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.237-0400 m30999| 2015-07-09T14:16:00.236-0400 I NETWORK [conn410] end connection 127.0.0.1:63938 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.243-0400 m30999| 2015-07-09T14:16:00.243-0400 I NETWORK [conn411] end connection 127.0.0.1:63939 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.293-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.293-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.294-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.294-0400 jstests/concurrency/fsm_workloads/explain_group.js: Workload completed in 607 ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.294-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.294-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.294-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.294-0400 m30998| 2015-07-09T14:16:00.293-0400 I NETWORK [conn410] end connection 127.0.0.1:63936 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.294-0400 m30999| 2015-07-09T14:16:00.293-0400 I COMMAND [conn1] DROP: db63.coll63 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.294-0400 m30999| 2015-07-09T14:16:00.293-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:00.293-0400-559eba60ca4787b9985d1e8e", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465760293), what: "dropCollection.start", ns: "db63.coll63", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.353-0400 m30999| 2015-07-09T14:16:00.353-0400 I SHARDING [conn1] distributed lock 'db63.coll63/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba60ca4787b9985d1e8f [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.355-0400 m31100| 2015-07-09T14:16:00.354-0400 I COMMAND [conn15] CMD: drop db63.coll63 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.357-0400 m31200| 2015-07-09T14:16:00.356-0400 I COMMAND [conn63] CMD: drop db63.coll63 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.358-0400 m31101| 2015-07-09T14:16:00.358-0400 I COMMAND [repl writer worker 8] CMD: drop db63.coll63 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.359-0400 m31102| 2015-07-09T14:16:00.358-0400 I COMMAND [repl writer worker 9] CMD: drop db63.coll63 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.413-0400 m31100| 2015-07-09T14:16:00.412-0400 I SHARDING [conn15] remotely refreshing metadata for db63.coll63 with requested shard version 0|0||000000000000000000000000, current shard version is 1|3||559eba5fca4787b9985d1e8c, current metadata version is 1|3||559eba5fca4787b9985d1e8c [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.415-0400 m31100| 2015-07-09T14:16:00.414-0400 W SHARDING [conn15] no chunks found when reloading db63.coll63, previous version was 0|0||559eba5fca4787b9985d1e8c, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.415-0400 m31100| 2015-07-09T14:16:00.414-0400 I SHARDING [conn15] dropping metadata for db63.coll63 at shard version 1|3||559eba5fca4787b9985d1e8c, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.417-0400 m30999| 2015-07-09T14:16:00.416-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:00.416-0400-559eba60ca4787b9985d1e90", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465760416), what: "dropCollection", ns: "db63.coll63", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.470-0400 m30999| 2015-07-09T14:16:00.470-0400 I SHARDING [conn1] distributed lock 'db63.coll63/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.526-0400 m30999| 2015-07-09T14:16:00.525-0400 I COMMAND [conn1] DROP DATABASE: db63 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.526-0400 m30999| 2015-07-09T14:16:00.526-0400 I SHARDING [conn1] DBConfig::dropDatabase: db63 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.526-0400 m30999| 2015-07-09T14:16:00.526-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:00.526-0400-559eba60ca4787b9985d1e91", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465760526), what: "dropDatabase.start", ns: "db63", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.632-0400 m30999| 2015-07-09T14:16:00.632-0400 I SHARDING [conn1] DBConfig::dropDatabase: db63 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.633-0400 m31100| 2015-07-09T14:16:00.632-0400 I COMMAND [conn157] dropDatabase db63 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.633-0400 m31100| 2015-07-09T14:16:00.632-0400 I COMMAND [conn157] dropDatabase db63 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.633-0400 m30999| 2015-07-09T14:16:00.633-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:00.633-0400-559eba60ca4787b9985d1e92", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465760633), what: "dropDatabase", ns: "db63", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.634-0400 m31102| 2015-07-09T14:16:00.633-0400 I COMMAND [repl writer worker 8] dropDatabase db63 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.634-0400 m31101| 2015-07-09T14:16:00.633-0400 I COMMAND [repl writer worker 6] dropDatabase db63 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.634-0400 m31102| 2015-07-09T14:16:00.633-0400 I COMMAND [repl writer worker 8] dropDatabase db63 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.634-0400 m31101| 2015-07-09T14:16:00.633-0400 I COMMAND [repl writer worker 6] dropDatabase db63 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.723-0400 m31100| 2015-07-09T14:16:00.722-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.727-0400 m31101| 2015-07-09T14:16:00.726-0400 I COMMAND [repl writer worker 4] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.727-0400 m31102| 2015-07-09T14:16:00.726-0400 I COMMAND [repl writer worker 1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.755-0400 m31200| 2015-07-09T14:16:00.754-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.758-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.758-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.758-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.758-0400 jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous.js [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.759-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.759-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.759-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.759-0400 m31201| 2015-07-09T14:16:00.758-0400 I COMMAND [repl writer worker 2] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.759-0400 m31202| 2015-07-09T14:16:00.758-0400 I COMMAND [repl writer worker 15] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.766-0400 m30999| 2015-07-09T14:16:00.765-0400 I SHARDING [conn1] distributed lock 'db64/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba60ca4787b9985d1e93 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.769-0400 m30999| 2015-07-09T14:16:00.769-0400 I SHARDING [conn1] Placing [db64] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.770-0400 m30999| 2015-07-09T14:16:00.769-0400 I SHARDING [conn1] Enabling sharding for database [db64] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.825-0400 m30999| 2015-07-09T14:16:00.824-0400 I SHARDING [conn1] distributed lock 'db64/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.849-0400 m31100| 2015-07-09T14:16:00.848-0400 I INDEX [conn68] build index on: db64.coll64 properties: { v: 1, key: { indexed_insert_heterogeneous: 1.0 }, name: "indexed_insert_heterogeneous_1", ns: "db64.coll64" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.850-0400 m31100| 2015-07-09T14:16:00.848-0400 I INDEX [conn68] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.860-0400 m31100| 2015-07-09T14:16:00.859-0400 I INDEX [conn68] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.861-0400 m30999| 2015-07-09T14:16:00.861-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db64.coll64", key: { indexed_insert_heterogeneous: 1.0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.864-0400 m30999| 2015-07-09T14:16:00.864-0400 I SHARDING [conn1] distributed lock 'db64.coll64/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba60ca4787b9985d1e94 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.865-0400 m30999| 2015-07-09T14:16:00.865-0400 I SHARDING [conn1] enable sharding on: db64.coll64 with shard key: { indexed_insert_heterogeneous: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.866-0400 m30999| 2015-07-09T14:16:00.865-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:00.865-0400-559eba60ca4787b9985d1e95", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465760865), what: "shardCollection.start", ns: "db64.coll64", details: { shardKey: { indexed_insert_heterogeneous: 1.0 }, collection: "db64.coll64", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 1 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.869-0400 m31101| 2015-07-09T14:16:00.869-0400 I INDEX [repl writer worker 1] build index on: db64.coll64 properties: { v: 1, key: { indexed_insert_heterogeneous: 1.0 }, name: "indexed_insert_heterogeneous_1", ns: "db64.coll64" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.870-0400 m31101| 2015-07-09T14:16:00.869-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.873-0400 m31102| 2015-07-09T14:16:00.872-0400 I INDEX [repl writer worker 5] build index on: db64.coll64 properties: { v: 1, key: { indexed_insert_heterogeneous: 1.0 }, name: "indexed_insert_heterogeneous_1", ns: "db64.coll64" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.873-0400 m31102| 2015-07-09T14:16:00.872-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.876-0400 m31101| 2015-07-09T14:16:00.876-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.879-0400 m31102| 2015-07-09T14:16:00.879-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.920-0400 m30999| 2015-07-09T14:16:00.919-0400 I SHARDING [conn1] going to create 1 chunk(s) for: db64.coll64 using new epoch 559eba60ca4787b9985d1e96 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:00.974-0400 m30999| 2015-07-09T14:16:00.973-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db64.coll64: 0ms sequenceNumber: 281 version: 1|0||559eba60ca4787b9985d1e96 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.030-0400 m30999| 2015-07-09T14:16:01.030-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db64.coll64: 1ms sequenceNumber: 282 version: 1|0||559eba60ca4787b9985d1e96 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.032-0400 m31100| 2015-07-09T14:16:01.032-0400 I SHARDING [conn45] remotely refreshing metadata for db64.coll64 with requested shard version 1|0||559eba60ca4787b9985d1e96, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.034-0400 m31100| 2015-07-09T14:16:01.034-0400 I SHARDING [conn45] collection db64.coll64 was previously unsharded, new metadata loaded with shard version 1|0||559eba60ca4787b9985d1e96 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.034-0400 m31100| 2015-07-09T14:16:01.034-0400 I SHARDING [conn45] collection version was loaded at version 1|0||559eba60ca4787b9985d1e96, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.035-0400 m30999| 2015-07-09T14:16:01.034-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:01.034-0400-559eba61ca4787b9985d1e97", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465761034), what: "shardCollection", ns: "db64.coll64", details: { version: "1|0||559eba60ca4787b9985d1e96" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.089-0400 m30999| 2015-07-09T14:16:01.088-0400 I SHARDING [conn1] distributed lock 'db64.coll64/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.108-0400 m31200| 2015-07-09T14:16:01.107-0400 I INDEX [conn39] build index on: db64.coll64 properties: { v: 1, key: { indexed_insert_heterogeneous: 1.0 }, name: "indexed_insert_heterogeneous_1", ns: "db64.coll64" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.108-0400 m31200| 2015-07-09T14:16:01.107-0400 I INDEX [conn39] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.117-0400 m31200| 2015-07-09T14:16:01.117-0400 I INDEX [conn39] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.119-0400 Using 20 threads (requested 20) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.286-0400 m31201| 2015-07-09T14:16:01.285-0400 I INDEX [repl writer worker 8] build index on: db64.coll64 properties: { v: 1, key: { indexed_insert_heterogeneous: 1.0 }, name: "indexed_insert_heterogeneous_1", ns: "db64.coll64" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.286-0400 m31201| 2015-07-09T14:16:01.285-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.319-0400 m30999| 2015-07-09T14:16:01.319-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63941 #412 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.330-0400 m31201| 2015-07-09T14:16:01.329-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.344-0400 m31202| 2015-07-09T14:16:01.343-0400 I INDEX [repl writer worker 9] build index on: db64.coll64 properties: { v: 1, key: { indexed_insert_heterogeneous: 1.0 }, name: "indexed_insert_heterogeneous_1", ns: "db64.coll64" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.344-0400 m31202| 2015-07-09T14:16:01.344-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.349-0400 m30998| 2015-07-09T14:16:01.349-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63942 #412 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.351-0400 m30998| 2015-07-09T14:16:01.351-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63943 #413 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.352-0400 m30999| 2015-07-09T14:16:01.352-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63944 #413 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.362-0400 m31202| 2015-07-09T14:16:01.362-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.367-0400 m30999| 2015-07-09T14:16:01.366-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63945 #414 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.368-0400 m30998| 2015-07-09T14:16:01.367-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63946 #414 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.370-0400 m30999| 2015-07-09T14:16:01.367-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63947 #415 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.370-0400 m30999| 2015-07-09T14:16:01.368-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63948 #416 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.370-0400 m30998| 2015-07-09T14:16:01.369-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63951 #415 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.372-0400 m30999| 2015-07-09T14:16:01.372-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63949 #417 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.374-0400 m30998| 2015-07-09T14:16:01.374-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63952 #416 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.381-0400 m30999| 2015-07-09T14:16:01.381-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63950 #418 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.383-0400 m30998| 2015-07-09T14:16:01.382-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63954 #417 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.384-0400 m30999| 2015-07-09T14:16:01.384-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63953 #419 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.385-0400 m30998| 2015-07-09T14:16:01.384-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63956 #418 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.390-0400 m30999| 2015-07-09T14:16:01.389-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63955 #420 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.390-0400 m30998| 2015-07-09T14:16:01.390-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63957 #419 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.394-0400 m30999| 2015-07-09T14:16:01.393-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63959 #421 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.394-0400 m30998| 2015-07-09T14:16:01.394-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63958 #420 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.397-0400 m30998| 2015-07-09T14:16:01.396-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63960 #421 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.405-0400 setting random seed: 5098691848106 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.405-0400 setting random seed: 7555137285962 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.406-0400 setting random seed: 271019302308 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.407-0400 setting random seed: 3713931031525 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.410-0400 setting random seed: 4501577285118 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.412-0400 setting random seed: 9192424300126 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.414-0400 m30998| 2015-07-09T14:16:01.413-0400 I SHARDING [conn416] ChunkManager: time to load chunks for db64.coll64: 0ms sequenceNumber: 77 version: 1|0||559eba60ca4787b9985d1e96 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.415-0400 setting random seed: 4244514494203 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.416-0400 setting random seed: 8009466212242 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.419-0400 setting random seed: 38875909522 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.420-0400 setting random seed: 880176634527 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.424-0400 setting random seed: 4939065873622 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.426-0400 setting random seed: 934402425773 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.429-0400 setting random seed: 3988959272392 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.446-0400 setting random seed: 4301100340671 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.447-0400 setting random seed: 2438522311858 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.447-0400 setting random seed: 5021416018716 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.448-0400 setting random seed: 3128703394904 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.451-0400 setting random seed: 8593459329567 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.453-0400 setting random seed: 7075284169986 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.461-0400 m31100| 2015-07-09T14:16:01.459-0400 I SHARDING [conn15] request split points lookup for chunk db64.coll64 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.461-0400 setting random seed: 4117982648313 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.475-0400 m31100| 2015-07-09T14:16:01.474-0400 I SHARDING [conn15] request split points lookup for chunk db64.coll64 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.476-0400 m31100| 2015-07-09T14:16:01.475-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: new Date(946684819000) } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.477-0400 m31100| 2015-07-09T14:16:01.476-0400 I SHARDING [conn38] request split points lookup for chunk db64.coll64 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.477-0400 m31100| 2015-07-09T14:16:01.476-0400 I SHARDING [conn39] request split points lookup for chunk db64.coll64 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.478-0400 m31100| 2015-07-09T14:16:01.477-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: new Date(946684805000) } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.479-0400 m31100| 2015-07-09T14:16:01.477-0400 I SHARDING [conn39] received splitChunk request: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: new Date(946684805000) } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.480-0400 m31100| 2015-07-09T14:16:01.479-0400 W SHARDING [conn39] could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db64.coll64 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.481-0400 m30998| 2015-07-09T14:16:01.479-0400 W SHARDING [conn420] splitChunk failed - cmd: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: new Date(946684805000) } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.481-0400 m31100| 2015-07-09T14:16:01.479-0400 W SHARDING [conn38] could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db64.coll64 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.481-0400 m30999| 2015-07-09T14:16:01.480-0400 W SHARDING [conn421] splitChunk failed - cmd: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: new Date(946684805000) } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.487-0400 m31100| 2015-07-09T14:16:01.487-0400 I SHARDING [conn38] request split points lookup for chunk db64.coll64 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.489-0400 m31100| 2015-07-09T14:16:01.487-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.489-0400 m31100| 2015-07-09T14:16:01.488-0400 I SHARDING [conn40] request split points lookup for chunk db64.coll64 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.489-0400 m31100| 2015-07-09T14:16:01.489-0400 I SHARDING [conn15] distributed lock 'db64.coll64/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eba61792e00bb67274a7c [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.490-0400 m31100| 2015-07-09T14:16:01.489-0400 W SHARDING [conn38] could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db64.coll64 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.490-0400 m31100| 2015-07-09T14:16:01.489-0400 I SHARDING [conn15] remotely refreshing metadata for db64.coll64 based on current shard version 1|0||559eba60ca4787b9985d1e96, current metadata version is 1|0||559eba60ca4787b9985d1e96 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.491-0400 m30999| 2015-07-09T14:16:01.489-0400 W SHARDING [conn417] splitChunk failed - cmd: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.492-0400 m31100| 2015-07-09T14:16:01.489-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.492-0400 m31100| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.492-0400 m31100| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.492-0400 m31100| 2015-07-09T14:16:01.490-0400 I SHARDING [conn39] request split points lookup for chunk db64.coll64 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.492-0400 m31100| 2015-07-09T14:16:01.491-0400 W SHARDING [conn40] could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db64.coll64 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.492-0400 m30999| 2015-07-09T14:16:01.491-0400 W SHARDING [conn413] splitChunk failed - cmd: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.493-0400 m30999| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.493-0400 m30999| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.498-0400 m31100| 2015-07-09T14:16:01.497-0400 I SHARDING [conn15] metadata of collection db64.coll64 already up to date (shard version : 1|0||559eba60ca4787b9985d1e96, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.499-0400 m31100| 2015-07-09T14:16:01.497-0400 I SHARDING [conn35] request split points lookup for chunk db64.coll64 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.500-0400 m31100| 2015-07-09T14:16:01.497-0400 I SHARDING [conn15] splitChunk accepted at version 1|0||559eba60ca4787b9985d1e96 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.500-0400 m31100| 2015-07-09T14:16:01.497-0400 I SHARDING [conn36] request split points lookup for chunk db64.coll64 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.500-0400 m31100| 2015-07-09T14:16:01.497-0400 I SHARDING [conn32] request split points lookup for chunk db64.coll64 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.501-0400 m31100| 2015-07-09T14:16:01.498-0400 I SHARDING [conn39] received splitChunk request: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.501-0400 m31100| return 13; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.501-0400 m31100| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.502-0400 m31100| 2015-07-09T14:16:01.500-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: { tid: 3.0 } }, { indexed_insert_heterogeneous: new Date(946684812000) } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.502-0400 m31100| 2015-07-09T14:16:01.501-0400 I SHARDING [conn36] received splitChunk request: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: { tid: 3.0 } }, { indexed_insert_heterogeneous: new Date(946684812000) } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.502-0400 m31100| 2015-07-09T14:16:01.501-0400 I SHARDING [conn32] received splitChunk request: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: { tid: 3.0 } }, { indexed_insert_heterogeneous: new Date(946684812000) } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.508-0400 m31100| 2015-07-09T14:16:01.507-0400 W SHARDING [conn36] could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db64.coll64 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.512-0400 m30998| 2015-07-09T14:16:01.508-0400 W SHARDING [conn417] splitChunk failed - cmd: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: { tid: 3.0 } }, { indexed_insert_heterogeneous: new Date(946684812000) } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.512-0400 m31100| 2015-07-09T14:16:01.508-0400 W SHARDING [conn39] could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db64.coll64 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.512-0400 m31100| 2015-07-09T14:16:01.508-0400 W SHARDING [conn35] could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db64.coll64 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.512-0400 m31100| 2015-07-09T14:16:01.508-0400 W SHARDING [conn32] could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db64.coll64 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.513-0400 m30998| 2015-07-09T14:16:01.509-0400 W SHARDING [conn412] splitChunk failed - cmd: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.513-0400 m30998| return 13; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.513-0400 m30998| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.514-0400 m30998| 2015-07-09T14:16:01.509-0400 W SHARDING [conn414] splitChunk failed - cmd: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: { tid: 3.0 } }, { indexed_insert_heterogeneous: new Date(946684812000) } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.514-0400 m30998| 2015-07-09T14:16:01.509-0400 W SHARDING [conn415] splitChunk failed - cmd: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: { tid: 3.0 } }, { indexed_insert_heterogeneous: new Date(946684812000) } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.514-0400 m30998| 2015-07-09T14:16:01.513-0400 I SHARDING [conn412] ChunkManager: time to load chunks for db64.coll64: 0ms sequenceNumber: 78 version: 1|3||559eba60ca4787b9985d1e96 based on: 1|0||559eba60ca4787b9985d1e96 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.518-0400 m31100| 2015-07-09T14:16:01.515-0400 I SHARDING [conn34] request split points lookup for chunk db64.coll64 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.519-0400 m31100| 2015-07-09T14:16:01.515-0400 I SHARDING [conn40] request split points lookup for chunk db64.coll64 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.519-0400 m31100| 2015-07-09T14:16:01.515-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:01.515-0400-559eba61792e00bb67274a7d", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436465761515), what: "multi-split", ns: "db64.coll64", details: { before: { min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey } }, number: 1, of: 3, chunk: { min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: 0.0 }, lastmod: Timestamp 1000|1, lastmodEpoch: ObjectId('559eba60ca4787b9985d1e96') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.519-0400 m31100| 2015-07-09T14:16:01.515-0400 I SHARDING [conn132] request split points lookup for chunk db64.coll64 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.519-0400 m31100| 2015-07-09T14:16:01.515-0400 I SHARDING [conn37] request split points lookup for chunk db64.coll64 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.520-0400 m31100| 2015-07-09T14:16:01.515-0400 I SHARDING [conn38] request split points lookup for chunk db64.coll64 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.522-0400 m31100| 2015-07-09T14:16:01.518-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: "8" }, { indexed_insert_heterogeneous: { tid: 10.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000012') }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.522-0400 m31100| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.522-0400 m31100| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.524-0400 m31100| 2015-07-09T14:16:01.519-0400 I SHARDING [conn132] received splitChunk request: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: "8" }, { indexed_insert_heterogeneous: { tid: 10.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000012') }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.524-0400 m31100| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.525-0400 m31100| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.525-0400 m31100| 2015-07-09T14:16:01.519-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: "8" }, { indexed_insert_heterogeneous: { tid: 10.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000012') }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.525-0400 m31100| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.526-0400 m31100| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.526-0400 m31100| 2015-07-09T14:16:01.519-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: "8" }, { indexed_insert_heterogeneous: { tid: 10.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000012') }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.526-0400 m31100| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.527-0400 m31100| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.527-0400 m31100| 2015-07-09T14:16:01.520-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: "8" }, { indexed_insert_heterogeneous: { tid: 10.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000012') }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.527-0400 m31100| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.528-0400 m31100| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.528-0400 m31100| 2015-07-09T14:16:01.521-0400 W SHARDING [conn40] could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db64.coll64 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.528-0400 m31100| 2015-07-09T14:16:01.521-0400 W SHARDING [conn38] could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db64.coll64 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.529-0400 m31100| 2015-07-09T14:16:01.521-0400 W SHARDING [conn132] could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db64.coll64 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.529-0400 m31100| 2015-07-09T14:16:01.521-0400 W SHARDING [conn37] could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db64.coll64 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.529-0400 m31100| 2015-07-09T14:16:01.521-0400 W SHARDING [conn34] could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db64.coll64 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.530-0400 m30999| 2015-07-09T14:16:01.522-0400 W SHARDING [conn417] splitChunk failed - cmd: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: "8" }, { indexed_insert_heterogeneous: { tid: 10.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000012') }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.530-0400 m30999| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.530-0400 m30999| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.531-0400 m30998| 2015-07-09T14:16:01.521-0400 W SHARDING [conn420] splitChunk failed - cmd: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: "8" }, { indexed_insert_heterogeneous: { tid: 10.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000012') }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.531-0400 m30999| 2015-07-09T14:16:01.522-0400 W SHARDING [conn418] splitChunk failed - cmd: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: "8" }, { indexed_insert_heterogeneous: { tid: 10.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000012') }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.531-0400 m30998| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.532-0400 m30999| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.532-0400 m30998| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.534-0400 m30999| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.534-0400 m30999| 2015-07-09T14:16:01.522-0400 W SHARDING [conn414] splitChunk failed - cmd: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: "8" }, { indexed_insert_heterogeneous: { tid: 10.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000012') }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.535-0400 m30999| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.535-0400 m30999| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.551-0400 m30999| 2015-07-09T14:16:01.522-0400 W SHARDING [conn419] splitChunk failed - cmd: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: "8" }, { indexed_insert_heterogeneous: { tid: 10.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000012') }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.552-0400 m30999| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.557-0400 m30999| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.557-0400 m31100| 2015-07-09T14:16:01.537-0400 I SHARDING [conn38] request split points lookup for chunk db64.coll64 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.557-0400 m31100| 2015-07-09T14:16:01.537-0400 I SHARDING [conn34] request split points lookup for chunk db64.coll64 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.559-0400 m31100| 2015-07-09T14:16:01.538-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: "15" }, { indexed_insert_heterogeneous: { tid: 3.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') }, { indexed_insert_heterogeneous: new Date(946684812000) }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.559-0400 m31100| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.560-0400 m31100| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.561-0400 m31100| 2015-07-09T14:16:01.538-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: "15" }, { indexed_insert_heterogeneous: { tid: 3.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') }, { indexed_insert_heterogeneous: new Date(946684812000) }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.561-0400 m31100| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.571-0400 m31100| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.571-0400 m31100| 2015-07-09T14:16:01.539-0400 W SHARDING [conn38] could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db64.coll64 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.572-0400 m31100| 2015-07-09T14:16:01.539-0400 W SHARDING [conn34] could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db64.coll64 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.572-0400 m30999| 2015-07-09T14:16:01.539-0400 W SHARDING [conn413] splitChunk failed - cmd: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: "15" }, { indexed_insert_heterogeneous: { tid: 3.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') }, { indexed_insert_heterogeneous: new Date(946684812000) }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.572-0400 m30999| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.572-0400 m30999| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.573-0400 m30999| 2015-07-09T14:16:01.540-0400 W SHARDING [conn414] splitChunk failed - cmd: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: "15" }, { indexed_insert_heterogeneous: { tid: 3.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') }, { indexed_insert_heterogeneous: new Date(946684812000) }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.573-0400 m30999| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.575-0400 m30999| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.575-0400 m31100| 2015-07-09T14:16:01.547-0400 I SHARDING [conn34] request split points lookup for chunk db64.coll64 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.577-0400 m31100| 2015-07-09T14:16:01.548-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: 14.0 }, { indexed_insert_heterogeneous: "8" }, { indexed_insert_heterogeneous: { tid: 3.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') }, { indexed_insert_heterogeneous: new Date(946684805000) }, { indexed_insert_heterogeneous: new Date(946684819000) }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.577-0400 m31100| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.578-0400 m31100| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.578-0400 m31100| 2015-07-09T14:16:01.550-0400 W SHARDING [conn34] could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db64.coll64 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.579-0400 m30999| 2015-07-09T14:16:01.550-0400 W SHARDING [conn415] splitChunk failed - cmd: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: 14.0 }, { indexed_insert_heterogeneous: "8" }, { indexed_insert_heterogeneous: { tid: 3.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') }, { indexed_insert_heterogeneous: new Date(946684805000) }, { indexed_insert_heterogeneous: new Date(946684819000) }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.579-0400 m30999| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.581-0400 m30999| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.590-0400 m31100| 2015-07-09T14:16:01.557-0400 I SHARDING [conn34] request split points lookup for chunk db64.coll64 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.590-0400 m31100| 2015-07-09T14:16:01.557-0400 I SHARDING [conn38] request split points lookup for chunk db64.coll64 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.591-0400 m31100| 2015-07-09T14:16:01.558-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: 7.0 }, { indexed_insert_heterogeneous: "15" }, { indexed_insert_heterogeneous: "8" }, { indexed_insert_heterogeneous: { tid: 10.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') }, { indexed_insert_heterogeneous: new Date(946684805000) }, { indexed_insert_heterogeneous: new Date(946684819000) }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.591-0400 m31100| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.591-0400 m31100| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.591-0400 m31100| 2015-07-09T14:16:01.558-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: 7.0 }, { indexed_insert_heterogeneous: "15" }, { indexed_insert_heterogeneous: "8" }, { indexed_insert_heterogeneous: { tid: 10.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') }, { indexed_insert_heterogeneous: new Date(946684805000) }, { indexed_insert_heterogeneous: new Date(946684819000) }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.592-0400 m31100| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.593-0400 m31100| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.593-0400 m31100| 2015-07-09T14:16:01.559-0400 W SHARDING [conn38] could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db64.coll64 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.594-0400 m30999| 2015-07-09T14:16:01.560-0400 W SHARDING [conn416] splitChunk failed - cmd: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: 7.0 }, { indexed_insert_heterogeneous: "15" }, { indexed_insert_heterogeneous: "8" }, { indexed_insert_heterogeneous: { tid: 10.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') }, { indexed_insert_heterogeneous: new Date(946684805000) }, { indexed_insert_heterogeneous: new Date(946684819000) }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.594-0400 m30999| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.594-0400 m30999| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.594-0400 m31100| 2015-07-09T14:16:01.560-0400 W SHARDING [conn34] could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db64.coll64 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.595-0400 m30999| 2015-07-09T14:16:01.560-0400 W SHARDING [conn421] splitChunk failed - cmd: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: 7.0 }, { indexed_insert_heterogeneous: "15" }, { indexed_insert_heterogeneous: "8" }, { indexed_insert_heterogeneous: { tid: 10.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') }, { indexed_insert_heterogeneous: new Date(946684805000) }, { indexed_insert_heterogeneous: new Date(946684819000) }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.595-0400 m30999| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.595-0400 m30999| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.595-0400 m31100| 2015-07-09T14:16:01.570-0400 I SHARDING [conn34] request split points lookup for chunk db64.coll64 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.595-0400 m31100| 2015-07-09T14:16:01.570-0400 I SHARDING [conn38] request split points lookup for chunk db64.coll64 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.595-0400 m31100| 2015-07-09T14:16:01.573-0400 I SHARDING [conn37] request split points lookup for chunk db64.coll64 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.596-0400 m31100| 2015-07-09T14:16:01.573-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: 0.0016 }, { indexed_insert_heterogeneous: 14.0 }, { indexed_insert_heterogeneous: "15" }, { indexed_insert_heterogeneous: { tid: 3.0 } }, { indexed_insert_heterogeneous: { tid: 10.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') }, { indexed_insert_heterogeneous: new Date(946684805000) }, { indexed_insert_heterogeneous: new Date(946684812000) }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.596-0400 m31100| return 13; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.596-0400 m31100| } }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.596-0400 m31100| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.596-0400 m31100| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.597-0400 m31100| 2015-07-09T14:16:01.573-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: 7.0 }, { indexed_insert_heterogeneous: "1" }, { indexed_insert_heterogeneous: "8" }, { indexed_insert_heterogeneous: { tid: 3.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') }, { indexed_insert_heterogeneous: new Date(946684805000) }, { indexed_insert_heterogeneous: new Date(946684819000) }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.597-0400 m31100| return 13; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.597-0400 m31100| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.604-0400 m31100| 2015-07-09T14:16:01.574-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: 0.0016 }, { indexed_insert_heterogeneous: 14.0 }, { indexed_insert_heterogeneous: "15" }, { indexed_insert_heterogeneous: { tid: 3.0 } }, { indexed_insert_heterogeneous: { tid: 10.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') }, { indexed_insert_heterogeneous: new Date(946684805000) }, { indexed_insert_heterogeneous: new Date(946684812000) }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.604-0400 m31100| return 13; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.611-0400 m31100| } }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.611-0400 m31100| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.611-0400 m31100| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.611-0400 m31100| 2015-07-09T14:16:01.575-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:01.575-0400-559eba61792e00bb67274a7e", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436465761575), what: "multi-split", ns: "db64.coll64", details: { before: { min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey } }, number: 2, of: 3, chunk: { min: { indexed_insert_heterogeneous: 0.0 }, max: { indexed_insert_heterogeneous: new Date(946684819000) }, lastmod: Timestamp 1000|2, lastmodEpoch: ObjectId('559eba60ca4787b9985d1e96') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.611-0400 m31100| 2015-07-09T14:16:01.575-0400 I SHARDING [conn40] request split points lookup for chunk db64.coll64 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.612-0400 m31100| 2015-07-09T14:16:01.575-0400 W SHARDING [conn38] could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db64.coll64 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.612-0400 m31100| 2015-07-09T14:16:01.576-0400 W SHARDING [conn37] could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db64.coll64 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.613-0400 m30999| 2015-07-09T14:16:01.576-0400 W SHARDING [conn413] splitChunk failed - cmd: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: 0.0016 }, { indexed_insert_heterogeneous: 14.0 }, { indexed_insert_heterogeneous: "15" }, { indexed_insert_heterogeneous: { tid: 3.0 } }, { indexed_insert_heterogeneous: { tid: 10.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') }, { indexed_insert_heterogeneous: new Date(946684805000) }, { indexed_insert_heterogeneous: new Date(946684812000) }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.613-0400 m30999| return 13; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.614-0400 m30999| } }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.614-0400 m30999| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.614-0400 m30999| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.614-0400 m30999| 2015-07-09T14:16:01.576-0400 W SHARDING [conn415] splitChunk failed - cmd: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: 0.0016 }, { indexed_insert_heterogeneous: 14.0 }, { indexed_insert_heterogeneous: "15" }, { indexed_insert_heterogeneous: { tid: 3.0 } }, { indexed_insert_heterogeneous: { tid: 10.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') }, { indexed_insert_heterogeneous: new Date(946684805000) }, { indexed_insert_heterogeneous: new Date(946684812000) }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.614-0400 m30999| return 13; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.615-0400 m30999| } }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.615-0400 m30999| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.615-0400 m30999| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.615-0400 m31100| 2015-07-09T14:16:01.576-0400 W SHARDING [conn34] could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db64.coll64 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.616-0400 m30999| 2015-07-09T14:16:01.577-0400 W SHARDING [conn414] splitChunk failed - cmd: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: 7.0 }, { indexed_insert_heterogeneous: "1" }, { indexed_insert_heterogeneous: "8" }, { indexed_insert_heterogeneous: { tid: 3.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') }, { indexed_insert_heterogeneous: new Date(946684805000) }, { indexed_insert_heterogeneous: new Date(946684819000) }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.616-0400 m30999| return 13; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.617-0400 m30999| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.633-0400 m31100| 2015-07-09T14:16:01.577-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: 0.0016 }, { indexed_insert_heterogeneous: 14.0 }, { indexed_insert_heterogeneous: "15" }, { indexed_insert_heterogeneous: "8" }, { indexed_insert_heterogeneous: { tid: 10.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000012') }, { indexed_insert_heterogeneous: new Date(946684812000) }, { indexed_insert_heterogeneous: new Date(946684819000) }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.634-0400 m31100| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.634-0400 m31100| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.634-0400 m31100| 2015-07-09T14:16:01.579-0400 W SHARDING [conn40] could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db64.coll64 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.635-0400 m30999| 2015-07-09T14:16:01.579-0400 W SHARDING [conn419] splitChunk failed - cmd: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: 0.0016 }, { indexed_insert_heterogeneous: 14.0 }, { indexed_insert_heterogeneous: "15" }, { indexed_insert_heterogeneous: "8" }, { indexed_insert_heterogeneous: { tid: 10.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000012') }, { indexed_insert_heterogeneous: new Date(946684812000) }, { indexed_insert_heterogeneous: new Date(946684819000) }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.635-0400 m30999| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.635-0400 m30999| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.635-0400 m31100| 2015-07-09T14:16:01.582-0400 I SHARDING [conn40] request split points lookup for chunk db64.coll64 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.637-0400 m31100| 2015-07-09T14:16:01.584-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: 7.0 }, { indexed_insert_heterogeneous: "1" }, { indexed_insert_heterogeneous: "8" }, { indexed_insert_heterogeneous: { tid: 3.0 } }, { indexed_insert_heterogeneous: { tid: 10.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000012') }, { indexed_insert_heterogeneous: new Date(946684812000) }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.637-0400 m31100| return 13; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.637-0400 m31100| } }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.637-0400 m31100| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.638-0400 m31100| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.638-0400 m31100| 2015-07-09T14:16:01.586-0400 W SHARDING [conn40] could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db64.coll64 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.638-0400 m30999| 2015-07-09T14:16:01.586-0400 W SHARDING [conn417] splitChunk failed - cmd: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: 7.0 }, { indexed_insert_heterogeneous: "1" }, { indexed_insert_heterogeneous: "8" }, { indexed_insert_heterogeneous: { tid: 3.0 } }, { indexed_insert_heterogeneous: { tid: 10.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000012') }, { indexed_insert_heterogeneous: new Date(946684812000) }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.638-0400 m30999| return 13; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.638-0400 m30999| } }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.639-0400 m30999| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.639-0400 m30999| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.639-0400 m31100| 2015-07-09T14:16:01.591-0400 I SHARDING [conn40] request split points lookup for chunk db64.coll64 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.639-0400 m31100| 2015-07-09T14:16:01.598-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: 0.0016 }, { indexed_insert_heterogeneous: 7.0 }, { indexed_insert_heterogeneous: "1" }, { indexed_insert_heterogeneous: "15" }, { indexed_insert_heterogeneous: "8" }, { indexed_insert_heterogeneous: { tid: 3.0 } }, { indexed_insert_heterogeneous: { tid: 17.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000012') }, { indexed_insert_heterogeneous: new Date(946684812000) }, { indexed_insert_heterogeneous: new Date(946684819000) }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.640-0400 m31100| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.640-0400 m31100| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.640-0400 m31100| 2015-07-09T14:16:01.599-0400 I SHARDING [conn38] request split points lookup for chunk db64.coll64 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.640-0400 m31100| 2015-07-09T14:16:01.599-0400 I SHARDING [conn34] request split points lookup for chunk db64.coll64 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.640-0400 m31100| 2015-07-09T14:16:01.600-0400 W SHARDING [conn40] could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db64.coll64 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.641-0400 m30999| 2015-07-09T14:16:01.600-0400 W SHARDING [conn414] splitChunk failed - cmd: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: 0.0016 }, { indexed_insert_heterogeneous: 7.0 }, { indexed_insert_heterogeneous: "1" }, { indexed_insert_heterogeneous: "15" }, { indexed_insert_heterogeneous: "8" }, { indexed_insert_heterogeneous: { tid: 3.0 } }, { indexed_insert_heterogeneous: { tid: 17.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000012') }, { indexed_insert_heterogeneous: new Date(946684812000) }, { indexed_insert_heterogeneous: new Date(946684819000) }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.641-0400 m30999| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.641-0400 m30999| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.641-0400 m31100| 2015-07-09T14:16:01.601-0400 I SHARDING [conn34] request split points lookup for chunk db64.coll64 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.642-0400 m31100| 2015-07-09T14:16:01.603-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: 0.0016 }, { indexed_insert_heterogeneous: 7.0 }, { indexed_insert_heterogeneous: "1" }, { indexed_insert_heterogeneous: "15" }, { indexed_insert_heterogeneous: "8" }, { indexed_insert_heterogeneous: { tid: 3.0 } }, { indexed_insert_heterogeneous: { tid: 10.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') }, { indexed_insert_heterogeneous: ObjectId('00000000000000000000000b') }, { indexed_insert_heterogeneous: new Date(946684805000) }, { indexed_insert_heterogeneous: new Date(946684812000) }, { indexed_insert_heterogeneous: new Date(946684819000) }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.642-0400 m31100| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.642-0400 m31100| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.643-0400 m31100| 2015-07-09T14:16:01.603-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: 0.0016 }, { indexed_insert_heterogeneous: 7.0 }, { indexed_insert_heterogeneous: "1" }, { indexed_insert_heterogeneous: "15" }, { indexed_insert_heterogeneous: "8" }, { indexed_insert_heterogeneous: { tid: 3.0 } }, { indexed_insert_heterogeneous: { tid: 10.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') }, { indexed_insert_heterogeneous: ObjectId('00000000000000000000000b') }, { indexed_insert_heterogeneous: new Date(946684805000) }, { indexed_insert_heterogeneous: new Date(946684812000) }, { indexed_insert_heterogeneous: new Date(946684819000) }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.643-0400 m31100| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.643-0400 m31100| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.643-0400 m31100| 2015-07-09T14:16:01.603-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: 0.0016 }, { indexed_insert_heterogeneous: 7.0 }, { indexed_insert_heterogeneous: "1" }, { indexed_insert_heterogeneous: "15" }, { indexed_insert_heterogeneous: "8" }, { indexed_insert_heterogeneous: { tid: 3.0 } }, { indexed_insert_heterogeneous: { tid: 10.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') }, { indexed_insert_heterogeneous: ObjectId('00000000000000000000000b') }, { indexed_insert_heterogeneous: new Date(946684805000) }, { indexed_insert_heterogeneous: new Date(946684812000) }, { indexed_insert_heterogeneous: new Date(946684819000) }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.644-0400 m31100| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.644-0400 m31100| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.644-0400 m31100| 2015-07-09T14:16:01.605-0400 W SHARDING [conn34] could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db64.coll64 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.648-0400 m30999| 2015-07-09T14:16:01.605-0400 W SHARDING [conn413] splitChunk failed - cmd: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: 0.0016 }, { indexed_insert_heterogeneous: 7.0 }, { indexed_insert_heterogeneous: "1" }, { indexed_insert_heterogeneous: "15" }, { indexed_insert_heterogeneous: "8" }, { indexed_insert_heterogeneous: { tid: 3.0 } }, { indexed_insert_heterogeneous: { tid: 10.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') }, { indexed_insert_heterogeneous: ObjectId('00000000000000000000000b') }, { indexed_insert_heterogeneous: new Date(946684805000) }, { indexed_insert_heterogeneous: new Date(946684812000) }, { indexed_insert_heterogeneous: new Date(946684819000) }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.648-0400 m30999| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.648-0400 m30999| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.649-0400 m31100| 2015-07-09T14:16:01.606-0400 W SHARDING [conn40] could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db64.coll64 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.649-0400 m30999| 2015-07-09T14:16:01.606-0400 W SHARDING [conn421] splitChunk failed - cmd: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: 0.0016 }, { indexed_insert_heterogeneous: 7.0 }, { indexed_insert_heterogeneous: "1" }, { indexed_insert_heterogeneous: "15" }, { indexed_insert_heterogeneous: "8" }, { indexed_insert_heterogeneous: { tid: 3.0 } }, { indexed_insert_heterogeneous: { tid: 10.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') }, { indexed_insert_heterogeneous: ObjectId('00000000000000000000000b') }, { indexed_insert_heterogeneous: new Date(946684805000) }, { indexed_insert_heterogeneous: new Date(946684812000) }, { indexed_insert_heterogeneous: new Date(946684819000) }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.650-0400 m30999| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.650-0400 m30999| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.650-0400 m31100| 2015-07-09T14:16:01.607-0400 W SHARDING [conn38] could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db64.coll64 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.651-0400 m30999| 2015-07-09T14:16:01.607-0400 W SHARDING [conn420] splitChunk failed - cmd: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: 0.0016 }, { indexed_insert_heterogeneous: 7.0 }, { indexed_insert_heterogeneous: "1" }, { indexed_insert_heterogeneous: "15" }, { indexed_insert_heterogeneous: "8" }, { indexed_insert_heterogeneous: { tid: 3.0 } }, { indexed_insert_heterogeneous: { tid: 10.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') }, { indexed_insert_heterogeneous: ObjectId('00000000000000000000000b') }, { indexed_insert_heterogeneous: new Date(946684805000) }, { indexed_insert_heterogeneous: new Date(946684812000) }, { indexed_insert_heterogeneous: new Date(946684819000) }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.651-0400 m30999| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.651-0400 m30999| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.651-0400 m31100| 2015-07-09T14:16:01.615-0400 I SHARDING [conn38] request split points lookup for chunk db64.coll64 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.651-0400 m31100| 2015-07-09T14:16:01.615-0400 W SHARDING [conn38] possible low cardinality key detected in db64.coll64 - key is { indexed_insert_heterogeneous: { tid: 3.0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.652-0400 m31100| 2015-07-09T14:16:01.616-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: 0.0009000000000000001 }, { indexed_insert_heterogeneous: 7.0 }, { indexed_insert_heterogeneous: 14.0 }, { indexed_insert_heterogeneous: "15" }, { indexed_insert_heterogeneous: "8" }, { indexed_insert_heterogeneous: { tid: 3.0 } }, { indexed_insert_heterogeneous: { tid: 10.0 } }, { indexed_insert_heterogeneous: { tid: 17.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000012') }, { indexed_insert_heterogeneous: new Date(946684805000) }, { indexed_insert_heterogeneous: new Date(946684819000) }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.652-0400 m31100| return 13; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.652-0400 m31100| } }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.652-0400 m31100| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.652-0400 m31100| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.652-0400 m31100| 2015-07-09T14:16:01.616-0400 I SHARDING [conn40] request split points lookup for chunk db64.coll64 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.652-0400 m31100| 2015-07-09T14:16:01.617-0400 W SHARDING [conn40] possible low cardinality key detected in db64.coll64 - key is { indexed_insert_heterogeneous: "8" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.653-0400 m31100| 2015-07-09T14:16:01.617-0400 W SHARDING [conn40] possible low cardinality key detected in db64.coll64 - key is { indexed_insert_heterogeneous: { tid: 3.0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.653-0400 m31100| 2015-07-09T14:16:01.618-0400 W SHARDING [conn38] could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db64.coll64 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.653-0400 m30999| 2015-07-09T14:16:01.618-0400 W SHARDING [conn417] splitChunk failed - cmd: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: 0.0009000000000000001 }, { indexed_insert_heterogeneous: 7.0 }, { indexed_insert_heterogeneous: 14.0 }, { indexed_insert_heterogeneous: "15" }, { indexed_insert_heterogeneous: "8" }, { indexed_insert_heterogeneous: { tid: 3.0 } }, { indexed_insert_heterogeneous: { tid: 10.0 } }, { indexed_insert_heterogeneous: { tid: 17.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000012') }, { indexed_insert_heterogeneous: new Date(946684805000) }, { indexed_insert_heterogeneous: new Date(946684819000) }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.653-0400 m30999| return 13; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.654-0400 m30999| } }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.654-0400 m30999| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.654-0400 m30999| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.655-0400 m31100| 2015-07-09T14:16:01.619-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: 0.0009000000000000001 }, { indexed_insert_heterogeneous: 7.0 }, { indexed_insert_heterogeneous: 14.0 }, { indexed_insert_heterogeneous: "1" }, { indexed_insert_heterogeneous: "8" }, { indexed_insert_heterogeneous: { tid: 3.0 } }, { indexed_insert_heterogeneous: { tid: 10.0 } }, { indexed_insert_heterogeneous: { tid: 17.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000012') }, { indexed_insert_heterogeneous: new Date(946684805000) }, { indexed_insert_heterogeneous: new Date(946684819000) }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.655-0400 m31100| return 13; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.655-0400 m31100| } }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.655-0400 m31100| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.655-0400 m31100| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.655-0400 m31100| 2015-07-09T14:16:01.621-0400 W SHARDING [conn40] could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db64.coll64 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.665-0400 m30999| 2015-07-09T14:16:01.621-0400 W SHARDING [conn414] splitChunk failed - cmd: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: 0.0009000000000000001 }, { indexed_insert_heterogeneous: 7.0 }, { indexed_insert_heterogeneous: 14.0 }, { indexed_insert_heterogeneous: "1" }, { indexed_insert_heterogeneous: "8" }, { indexed_insert_heterogeneous: { tid: 3.0 } }, { indexed_insert_heterogeneous: { tid: 10.0 } }, { indexed_insert_heterogeneous: { tid: 17.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000012') }, { indexed_insert_heterogeneous: new Date(946684805000) }, { indexed_insert_heterogeneous: new Date(946684819000) }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.665-0400 m30999| return 13; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.665-0400 m30999| } }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.665-0400 m30999| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.705-0400 m30999| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.706-0400 m31100| 2015-07-09T14:16:01.628-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:01.628-0400-559eba61792e00bb67274a7f", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436465761628), what: "multi-split", ns: "db64.coll64", details: { before: { min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey } }, number: 3, of: 3, chunk: { min: { indexed_insert_heterogeneous: new Date(946684819000) }, max: { indexed_insert_heterogeneous: MaxKey }, lastmod: Timestamp 1000|3, lastmodEpoch: ObjectId('559eba60ca4787b9985d1e96') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.706-0400 m31100| 2015-07-09T14:16:01.630-0400 I SHARDING [conn40] request split points lookup for chunk db64.coll64 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.707-0400 m31100| 2015-07-09T14:16:01.632-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: 0.0016 }, { indexed_insert_heterogeneous: 7.0 }, { indexed_insert_heterogeneous: 14.0 }, { indexed_insert_heterogeneous: "15" }, { indexed_insert_heterogeneous: "8" }, { indexed_insert_heterogeneous: { tid: 3.0 } }, { indexed_insert_heterogeneous: { tid: 10.0 } }, { indexed_insert_heterogeneous: { tid: 17.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000012') }, { indexed_insert_heterogeneous: new Date(946684805000) }, { indexed_insert_heterogeneous: new Date(946684819000) }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.707-0400 m31100| return 13; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.707-0400 m31100| } }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.733-0400 m31100| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.733-0400 m31100| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.733-0400 m31100| 2015-07-09T14:16:01.635-0400 W SHARDING [conn40] could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db64.coll64 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.734-0400 m30999| 2015-07-09T14:16:01.635-0400 W SHARDING [conn416] splitChunk failed - cmd: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: 0.0016 }, { indexed_insert_heterogeneous: 7.0 }, { indexed_insert_heterogeneous: 14.0 }, { indexed_insert_heterogeneous: "15" }, { indexed_insert_heterogeneous: "8" }, { indexed_insert_heterogeneous: { tid: 3.0 } }, { indexed_insert_heterogeneous: { tid: 10.0 } }, { indexed_insert_heterogeneous: { tid: 17.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000012') }, { indexed_insert_heterogeneous: new Date(946684805000) }, { indexed_insert_heterogeneous: new Date(946684819000) }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.734-0400 m30999| return 13; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.734-0400 m30999| } }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.734-0400 m30999| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.734-0400 m30999| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.734-0400 m31100| 2015-07-09T14:16:01.644-0400 I SHARDING [conn40] request split points lookup for chunk db64.coll64 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.734-0400 m31100| 2015-07-09T14:16:01.646-0400 I SHARDING [conn38] request split points lookup for chunk db64.coll64 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.735-0400 m31100| 2015-07-09T14:16:01.646-0400 I SHARDING [conn34] request split points lookup for chunk db64.coll64 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.735-0400 m31100| 2015-07-09T14:16:01.648-0400 I SHARDING [conn37] request split points lookup for chunk db64.coll64 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.735-0400 m31100| 2015-07-09T14:16:01.654-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: 0.0009000000000000001 }, { indexed_insert_heterogeneous: 7.0 }, { indexed_insert_heterogeneous: 14.0 }, { indexed_insert_heterogeneous: "1" }, { indexed_insert_heterogeneous: "15" }, { indexed_insert_heterogeneous: "8" }, { indexed_insert_heterogeneous: { tid: 3.0 } }, { indexed_insert_heterogeneous: { tid: 10.0 } }, { indexed_insert_heterogeneous: { tid: 17.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000012') }, { indexed_insert_heterogeneous: new Date(946684805000) }, { indexed_insert_heterogeneous: new Date(946684812000) }, { indexed_insert_heterogeneous: new Date(946684819000) }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.735-0400 m31100| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.736-0400 m31100| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.736-0400 m31100| 2015-07-09T14:16:01.655-0400 W SHARDING [conn40] could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db64.coll64 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.736-0400 m31100| 2015-07-09T14:16:01.656-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: 0.0009000000000000001 }, { indexed_insert_heterogeneous: 7.0 }, { indexed_insert_heterogeneous: 14.0 }, { indexed_insert_heterogeneous: "1" }, { indexed_insert_heterogeneous: "15" }, { indexed_insert_heterogeneous: "8" }, { indexed_insert_heterogeneous: { tid: 3.0 } }, { indexed_insert_heterogeneous: { tid: 10.0 } }, { indexed_insert_heterogeneous: { tid: 17.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') }, { indexed_insert_heterogeneous: ObjectId('00000000000000000000000b') }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000012') }, { indexed_insert_heterogeneous: new Date(946684812000) }, { indexed_insert_heterogeneous: new Date(946684819000) }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.736-0400 m31100| return 13; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.737-0400 m31100| } }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.737-0400 m31100| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.737-0400 m31100| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.737-0400 m30999| 2015-07-09T14:16:01.657-0400 W SHARDING [conn413] splitChunk failed - cmd: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: 0.0009000000000000001 }, { indexed_insert_heterogeneous: 7.0 }, { indexed_insert_heterogeneous: 14.0 }, { indexed_insert_heterogeneous: "1" }, { indexed_insert_heterogeneous: "15" }, { indexed_insert_heterogeneous: "8" }, { indexed_insert_heterogeneous: { tid: 3.0 } }, { indexed_insert_heterogeneous: { tid: 10.0 } }, { indexed_insert_heterogeneous: { tid: 17.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000012') }, { indexed_insert_heterogeneous: new Date(946684805000) }, { indexed_insert_heterogeneous: new Date(946684812000) }, { indexed_insert_heterogeneous: new Date(946684819000) }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.737-0400 m30999| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.738-0400 m30999| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.765-0400 m31100| 2015-07-09T14:16:01.657-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: 0.0009000000000000001 }, { indexed_insert_heterogeneous: 7.0 }, { indexed_insert_heterogeneous: 14.0 }, { indexed_insert_heterogeneous: "1" }, { indexed_insert_heterogeneous: "15" }, { indexed_insert_heterogeneous: "8" }, { indexed_insert_heterogeneous: { tid: 3.0 } }, { indexed_insert_heterogeneous: { tid: 10.0 } }, { indexed_insert_heterogeneous: { tid: 17.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000012') }, { indexed_insert_heterogeneous: new Date(946684805000) }, { indexed_insert_heterogeneous: new Date(946684812000) }, { indexed_insert_heterogeneous: new Date(946684819000) }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.765-0400 m31100| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.765-0400 m31100| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.766-0400 m31100| 2015-07-09T14:16:01.658-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: 0.0009000000000000001 }, { indexed_insert_heterogeneous: 7.0 }, { indexed_insert_heterogeneous: 14.0 }, { indexed_insert_heterogeneous: "1" }, { indexed_insert_heterogeneous: "15" }, { indexed_insert_heterogeneous: "8" }, { indexed_insert_heterogeneous: { tid: 3.0 } }, { indexed_insert_heterogeneous: { tid: 10.0 } }, { indexed_insert_heterogeneous: { tid: 17.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000012') }, { indexed_insert_heterogeneous: new Date(946684805000) }, { indexed_insert_heterogeneous: new Date(946684812000) }, { indexed_insert_heterogeneous: new Date(946684819000) }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.766-0400 m31100| return 13; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.766-0400 m31100| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.766-0400 m31100| 2015-07-09T14:16:01.658-0400 W SHARDING [conn37] could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db64.coll64 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.767-0400 m30999| 2015-07-09T14:16:01.659-0400 W SHARDING [conn418] splitChunk failed - cmd: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: 0.0009000000000000001 }, { indexed_insert_heterogeneous: 7.0 }, { indexed_insert_heterogeneous: 14.0 }, { indexed_insert_heterogeneous: "1" }, { indexed_insert_heterogeneous: "15" }, { indexed_insert_heterogeneous: "8" }, { indexed_insert_heterogeneous: { tid: 3.0 } }, { indexed_insert_heterogeneous: { tid: 10.0 } }, { indexed_insert_heterogeneous: { tid: 17.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') }, { indexed_insert_heterogeneous: ObjectId('00000000000000000000000b') }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000012') }, { indexed_insert_heterogeneous: new Date(946684812000) }, { indexed_insert_heterogeneous: new Date(946684819000) }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.767-0400 m30999| return 13; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.767-0400 m30999| } }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.767-0400 m30999| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.767-0400 m30999| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.767-0400 m31100| 2015-07-09T14:16:01.660-0400 W SHARDING [conn38] could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db64.coll64 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.768-0400 m31100| 2015-07-09T14:16:01.660-0400 W SHARDING [conn34] could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db64.coll64 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.768-0400 m30999| 2015-07-09T14:16:01.661-0400 W SHARDING [conn419] splitChunk failed - cmd: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: 0.0009000000000000001 }, { indexed_insert_heterogeneous: 7.0 }, { indexed_insert_heterogeneous: 14.0 }, { indexed_insert_heterogeneous: "1" }, { indexed_insert_heterogeneous: "15" }, { indexed_insert_heterogeneous: "8" }, { indexed_insert_heterogeneous: { tid: 3.0 } }, { indexed_insert_heterogeneous: { tid: 10.0 } }, { indexed_insert_heterogeneous: { tid: 17.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000012') }, { indexed_insert_heterogeneous: new Date(946684805000) }, { indexed_insert_heterogeneous: new Date(946684812000) }, { indexed_insert_heterogeneous: new Date(946684819000) }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.768-0400 m30999| return 13; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.769-0400 m30999| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.770-0400 m30999| 2015-07-09T14:16:01.661-0400 W SHARDING [conn417] splitChunk failed - cmd: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: 0.0009000000000000001 }, { indexed_insert_heterogeneous: 7.0 }, { indexed_insert_heterogeneous: 14.0 }, { indexed_insert_heterogeneous: "1" }, { indexed_insert_heterogeneous: "15" }, { indexed_insert_heterogeneous: "8" }, { indexed_insert_heterogeneous: { tid: 3.0 } }, { indexed_insert_heterogeneous: { tid: 10.0 } }, { indexed_insert_heterogeneous: { tid: 17.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000012') }, { indexed_insert_heterogeneous: new Date(946684805000) }, { indexed_insert_heterogeneous: new Date(946684812000) }, { indexed_insert_heterogeneous: new Date(946684819000) }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.770-0400 m30999| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.770-0400 m30999| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.770-0400 m31100| 2015-07-09T14:16:01.669-0400 I SHARDING [conn38] request split points lookup for chunk db64.coll64 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.771-0400 m31100| 2015-07-09T14:16:01.669-0400 W SHARDING [conn38] possible low cardinality key detected in db64.coll64 - key is { indexed_insert_heterogeneous: { tid: 3.0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.771-0400 m31100| 2015-07-09T14:16:01.669-0400 W SHARDING [conn38] possible low cardinality key detected in db64.coll64 - key is { indexed_insert_heterogeneous: { tid: 17.0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.771-0400 m31100| 2015-07-09T14:16:01.670-0400 W SHARDING [conn38] possible low cardinality key detected in db64.coll64 - key is { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.772-0400 m31100| 2015-07-09T14:16:01.670-0400 I SHARDING [conn34] request split points lookup for chunk db64.coll64 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.772-0400 m31100| 2015-07-09T14:16:01.670-0400 W SHARDING [conn34] possible low cardinality key detected in db64.coll64 - key is { indexed_insert_heterogeneous: "8" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.772-0400 m31100| 2015-07-09T14:16:01.670-0400 W SHARDING [conn34] possible low cardinality key detected in db64.coll64 - key is { indexed_insert_heterogeneous: { tid: 3.0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.772-0400 m31100| 2015-07-09T14:16:01.670-0400 W SHARDING [conn34] possible low cardinality key detected in db64.coll64 - key is { indexed_insert_heterogeneous: { tid: 17.0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.772-0400 m31100| 2015-07-09T14:16:01.670-0400 W SHARDING [conn34] possible low cardinality key detected in db64.coll64 - key is { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.773-0400 m31100| 2015-07-09T14:16:01.670-0400 I SHARDING [conn37] request split points lookup for chunk db64.coll64 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.773-0400 m31100| 2015-07-09T14:16:01.671-0400 W SHARDING [conn37] possible low cardinality key detected in db64.coll64 - key is { indexed_insert_heterogeneous: "8" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.773-0400 m31100| 2015-07-09T14:16:01.671-0400 W SHARDING [conn37] possible low cardinality key detected in db64.coll64 - key is { indexed_insert_heterogeneous: { tid: 3.0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.775-0400 m31100| 2015-07-09T14:16:01.671-0400 W SHARDING [conn37] possible low cardinality key detected in db64.coll64 - key is { indexed_insert_heterogeneous: { tid: 17.0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.775-0400 m31100| 2015-07-09T14:16:01.671-0400 W SHARDING [conn37] possible low cardinality key detected in db64.coll64 - key is { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.776-0400 m30999| 2015-07-09T14:16:01.676-0400 W SHARDING [conn418] splitChunk failed - cmd: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: 0.0009000000000000001 }, { indexed_insert_heterogeneous: 0.0016 }, { indexed_insert_heterogeneous: 7.0 }, { indexed_insert_heterogeneous: 14.0 }, { indexed_insert_heterogeneous: "1" }, { indexed_insert_heterogeneous: "15" }, { indexed_insert_heterogeneous: "8" }, { indexed_insert_heterogeneous: { tid: 3.0 } }, { indexed_insert_heterogeneous: { tid: 10.0 } }, { indexed_insert_heterogeneous: { tid: 17.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') }, { indexed_insert_heterogeneous: ObjectId('00000000000000000000000b') }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000012') }, { indexed_insert_heterogeneous: new Date(946684805000) }, { indexed_insert_heterogeneous: new Date(946684812000) }, { indexed_insert_heterogeneous: new Date(946684819000) }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.777-0400 m31100| 2015-07-09T14:16:01.672-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: 0.0009000000000000001 }, { indexed_insert_heterogeneous: 0.0016 }, { indexed_insert_heterogeneous: 7.0 }, { indexed_insert_heterogeneous: 14.0 }, { indexed_insert_heterogeneous: "1" }, { indexed_insert_heterogeneous: "15" }, { indexed_insert_heterogeneous: "8" }, { indexed_insert_heterogeneous: { tid: 3.0 } }, { indexed_insert_heterogeneous: { tid: 10.0 } }, { indexed_insert_heterogeneous: { tid: 17.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') }, { indexed_insert_heterogeneous: ObjectId('00000000000000000000000b') }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000012') }, { indexed_insert_heterogeneous: new Date(946684805000) }, { indexed_insert_heterogeneous: new Date(946684812000) }, { indexed_insert_heterogeneous: new Date(946684819000) }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.777-0400 m30999| return 13; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.777-0400 m31100| return 13; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.778-0400 m30999| } }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.778-0400 m31100| } }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.778-0400 m30999| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.778-0400 m31100| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.778-0400 m30999| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.778-0400 m31100| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.779-0400 m30999| 2015-07-09T14:16:01.679-0400 W SHARDING [conn413] splitChunk failed - cmd: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: 0.0009000000000000001 }, { indexed_insert_heterogeneous: 0.0016 }, { indexed_insert_heterogeneous: 7.0 }, { indexed_insert_heterogeneous: 14.0 }, { indexed_insert_heterogeneous: "1" }, { indexed_insert_heterogeneous: "15" }, { indexed_insert_heterogeneous: "8" }, { indexed_insert_heterogeneous: { tid: 3.0 } }, { indexed_insert_heterogeneous: { tid: 10.0 } }, { indexed_insert_heterogeneous: { tid: 17.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') }, { indexed_insert_heterogeneous: ObjectId('00000000000000000000000b') }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000012') }, { indexed_insert_heterogeneous: new Date(946684805000) }, { indexed_insert_heterogeneous: new Date(946684812000) }, { indexed_insert_heterogeneous: new Date(946684819000) }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.780-0400 m31100| 2015-07-09T14:16:01.672-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: 0.0009000000000000001 }, { indexed_insert_heterogeneous: 0.0016 }, { indexed_insert_heterogeneous: 7.0 }, { indexed_insert_heterogeneous: 14.0 }, { indexed_insert_heterogeneous: "1" }, { indexed_insert_heterogeneous: "15" }, { indexed_insert_heterogeneous: "8" }, { indexed_insert_heterogeneous: { tid: 3.0 } }, { indexed_insert_heterogeneous: { tid: 10.0 } }, { indexed_insert_heterogeneous: { tid: 17.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') }, { indexed_insert_heterogeneous: ObjectId('00000000000000000000000b') }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000012') }, { indexed_insert_heterogeneous: new Date(946684805000) }, { indexed_insert_heterogeneous: new Date(946684812000) }, { indexed_insert_heterogeneous: new Date(946684819000) }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.780-0400 m30999| return 13; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.780-0400 m31100| return 13; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.780-0400 m30999| } }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.780-0400 m31100| } }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.780-0400 m30999| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.780-0400 m31100| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.780-0400 m30999| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.780-0400 m31100| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.781-0400 m30999| 2015-07-09T14:16:01.682-0400 W SHARDING [conn420] splitChunk failed - cmd: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: 0.0009000000000000001 }, { indexed_insert_heterogeneous: 0.0016 }, { indexed_insert_heterogeneous: 7.0 }, { indexed_insert_heterogeneous: 14.0 }, { indexed_insert_heterogeneous: "1" }, { indexed_insert_heterogeneous: "15" }, { indexed_insert_heterogeneous: "8" }, { indexed_insert_heterogeneous: { tid: 3.0 } }, { indexed_insert_heterogeneous: { tid: 10.0 } }, { indexed_insert_heterogeneous: { tid: 17.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') }, { indexed_insert_heterogeneous: ObjectId('00000000000000000000000b') }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000012') }, { indexed_insert_heterogeneous: new Date(946684805000) }, { indexed_insert_heterogeneous: new Date(946684812000) }, { indexed_insert_heterogeneous: new Date(946684819000) }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.782-0400 m31100| 2015-07-09T14:16:01.672-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: 0.0009000000000000001 }, { indexed_insert_heterogeneous: 0.0016 }, { indexed_insert_heterogeneous: 7.0 }, { indexed_insert_heterogeneous: 14.0 }, { indexed_insert_heterogeneous: "1" }, { indexed_insert_heterogeneous: "15" }, { indexed_insert_heterogeneous: "8" }, { indexed_insert_heterogeneous: { tid: 3.0 } }, { indexed_insert_heterogeneous: { tid: 10.0 } }, { indexed_insert_heterogeneous: { tid: 17.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') }, { indexed_insert_heterogeneous: ObjectId('00000000000000000000000b') }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000012') }, { indexed_insert_heterogeneous: new Date(946684805000) }, { indexed_insert_heterogeneous: new Date(946684812000) }, { indexed_insert_heterogeneous: new Date(946684819000) }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.782-0400 m30999| return 13; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.782-0400 m31100| return 13; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.782-0400 m30999| } }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.782-0400 m31100| } }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.783-0400 m30999| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.783-0400 m31100| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.784-0400 m30999| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.784-0400 m31100| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.785-0400 m30999| 2015-07-09T14:16:01.685-0400 W SHARDING [conn415] splitChunk failed - cmd: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: 0.0009000000000000001 }, { indexed_insert_heterogeneous: 0.0016 }, { indexed_insert_heterogeneous: 7.0 }, { indexed_insert_heterogeneous: 14.0 }, { indexed_insert_heterogeneous: "1" }, { indexed_insert_heterogeneous: "15" }, { indexed_insert_heterogeneous: "8" }, { indexed_insert_heterogeneous: { tid: 3.0 } }, { indexed_insert_heterogeneous: { tid: 10.0 } }, { indexed_insert_heterogeneous: { tid: 17.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') }, { indexed_insert_heterogeneous: ObjectId('00000000000000000000000b') }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000012') }, { indexed_insert_heterogeneous: new Date(946684805000) }, { indexed_insert_heterogeneous: new Date(946684812000) }, { indexed_insert_heterogeneous: new Date(946684819000) }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.785-0400 m31100| 2015-07-09T14:16:01.674-0400 I SHARDING [conn40] request split points lookup for chunk db64.coll64 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.785-0400 m30999| return 13; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.785-0400 m31100| 2015-07-09T14:16:01.674-0400 W SHARDING [conn40] possible low cardinality key detected in db64.coll64 - key is { indexed_insert_heterogeneous: "15" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.785-0400 m30999| } }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.785-0400 m31100| 2015-07-09T14:16:01.674-0400 W SHARDING [conn40] possible low cardinality key detected in db64.coll64 - key is { indexed_insert_heterogeneous: "8" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.786-0400 m30999| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.786-0400 m31100| 2015-07-09T14:16:01.674-0400 W SHARDING [conn40] possible low cardinality key detected in db64.coll64 - key is { indexed_insert_heterogeneous: { tid: 3.0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.786-0400 m30999| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.786-0400 m31100| 2015-07-09T14:16:01.675-0400 W SHARDING [conn40] possible low cardinality key detected in db64.coll64 - key is { indexed_insert_heterogeneous: { tid: 17.0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.786-0400 m31100| 2015-07-09T14:16:01.675-0400 W SHARDING [conn40] possible low cardinality key detected in db64.coll64 - key is { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.786-0400 m31100| 2015-07-09T14:16:01.676-0400 W SHARDING [conn37] could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db64.coll64 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.787-0400 m31100| 2015-07-09T14:16:01.677-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: 0.0009000000000000001 }, { indexed_insert_heterogeneous: 0.0016 }, { indexed_insert_heterogeneous: 7.0 }, { indexed_insert_heterogeneous: 14.0 }, { indexed_insert_heterogeneous: "1" }, { indexed_insert_heterogeneous: "15" }, { indexed_insert_heterogeneous: "8" }, { indexed_insert_heterogeneous: { tid: 3.0 } }, { indexed_insert_heterogeneous: { tid: 10.0 } }, { indexed_insert_heterogeneous: { tid: 17.0 } }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000004') }, { indexed_insert_heterogeneous: ObjectId('00000000000000000000000b') }, { indexed_insert_heterogeneous: ObjectId('000000000000000000000012') }, { indexed_insert_heterogeneous: new Date(946684805000) }, { indexed_insert_heterogeneous: new Date(946684812000) }, { indexed_insert_heterogeneous: new Date(946684819000) }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.787-0400 m31100| return 13; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.787-0400 m31100| } }, { indexed_insert_heterogeneous: function anonymous() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.787-0400 m31100| return 6; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.788-0400 m31100| } } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.788-0400 m31100| 2015-07-09T14:16:01.678-0400 W SHARDING [conn34] could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db64.coll64 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.788-0400 m31100| 2015-07-09T14:16:01.678-0400 W SHARDING [conn40] could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db64.coll64 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.788-0400 m31100| 2015-07-09T14:16:01.685-0400 W SHARDING [conn38] could not acquire collection lock for db64.coll64 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db64.coll64 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.789-0400 m31100| 2015-07-09T14:16:01.692-0400 I SHARDING [conn15] distributed lock 'db64.coll64/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.804-0400 m31100| 2015-07-09T14:16:01.692-0400 I COMMAND [conn15] command db64.coll64 command: splitChunk { splitChunk: "db64.coll64", keyPattern: { indexed_insert_heterogeneous: 1.0 }, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_heterogeneous: 0.0 }, { indexed_insert_heterogeneous: new Date(946684819000) } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba60ca4787b9985d1e96') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 4, w: 2 } }, Database: { acquireCount: { r: 1, w: 2 } }, Collection: { acquireCount: { r: 1, W: 2 }, acquireWaitCount: { W: 2 }, timeAcquiringMicros: { W: 15447 } } } protocol:op_command 216ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.804-0400 m30999| 2015-07-09T14:16:01.693-0400 I SHARDING [conn412] ChunkManager: time to load chunks for db64.coll64: 0ms sequenceNumber: 283 version: 1|3||559eba60ca4787b9985d1e96 based on: 1|0||559eba60ca4787b9985d1e96 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.804-0400 m30999| 2015-07-09T14:16:01.693-0400 I SHARDING [conn412] autosplitted db64.coll64 shard: ns: db64.coll64, shard: test-rs0, lastmod: 1|0||000000000000000000000000, min: { indexed_insert_heterogeneous: MinKey }, max: { indexed_insert_heterogeneous: MaxKey } into 3 (splitThreshold 921) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.897-0400 m30998| 2015-07-09T14:16:01.896-0400 I NETWORK [conn416] end connection 127.0.0.1:63952 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.914-0400 m30999| 2015-07-09T14:16:01.913-0400 I NETWORK [conn413] end connection 127.0.0.1:63944 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.929-0400 m30999| 2015-07-09T14:16:01.922-0400 I NETWORK [conn414] end connection 127.0.0.1:63945 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.929-0400 m30999| 2015-07-09T14:16:01.923-0400 I NETWORK [conn421] end connection 127.0.0.1:63959 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.960-0400 m30998| 2015-07-09T14:16:01.954-0400 I NETWORK [conn413] end connection 127.0.0.1:63943 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:01.999-0400 m30999| 2015-07-09T14:16:01.998-0400 I NETWORK [conn417] end connection 127.0.0.1:63949 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.040-0400 m30998| 2015-07-09T14:16:02.009-0400 I NETWORK [conn419] end connection 127.0.0.1:63957 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.045-0400 m30998| 2015-07-09T14:16:02.012-0400 I NETWORK [conn415] end connection 127.0.0.1:63951 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.045-0400 m30998| 2015-07-09T14:16:02.022-0400 I NETWORK [conn414] end connection 127.0.0.1:63946 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.045-0400 m30998| 2015-07-09T14:16:02.023-0400 I NETWORK [conn412] end connection 127.0.0.1:63942 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.045-0400 m30998| 2015-07-09T14:16:02.023-0400 I NETWORK [conn417] end connection 127.0.0.1:63954 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.045-0400 m30999| 2015-07-09T14:16:02.040-0400 I NETWORK [conn420] end connection 127.0.0.1:63955 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.051-0400 m30999| 2015-07-09T14:16:02.051-0400 I NETWORK [conn416] end connection 127.0.0.1:63948 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.057-0400 m30998| 2015-07-09T14:16:02.056-0400 I NETWORK [conn421] end connection 127.0.0.1:63960 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.079-0400 m30999| 2015-07-09T14:16:02.060-0400 I NETWORK [conn419] end connection 127.0.0.1:63953 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.079-0400 m30998| 2015-07-09T14:16:02.072-0400 I NETWORK [conn418] end connection 127.0.0.1:63956 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.079-0400 m30999| 2015-07-09T14:16:02.072-0400 I NETWORK [conn415] end connection 127.0.0.1:63947 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.106-0400 m30999| 2015-07-09T14:16:02.106-0400 I NETWORK [conn418] end connection 127.0.0.1:63950 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.111-0400 m30998| 2015-07-09T14:16:02.111-0400 I NETWORK [conn420] end connection 127.0.0.1:63958 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.143-0400 m30999| 2015-07-09T14:16:02.143-0400 I NETWORK [conn412] end connection 127.0.0.1:63941 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.166-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.166-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.166-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.166-0400 jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous.js: Workload completed in 1046 ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.166-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.166-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.166-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.167-0400 m30999| 2015-07-09T14:16:02.166-0400 I COMMAND [conn1] DROP: db64.coll64 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.167-0400 m30999| 2015-07-09T14:16:02.166-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:02.166-0400-559eba62ca4787b9985d1e98", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465762166), what: "dropCollection.start", ns: "db64.coll64", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.223-0400 m30999| 2015-07-09T14:16:02.223-0400 I SHARDING [conn1] distributed lock 'db64.coll64/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba62ca4787b9985d1e99 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.224-0400 m31100| 2015-07-09T14:16:02.223-0400 I COMMAND [conn15] CMD: drop db64.coll64 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.227-0400 m31200| 2015-07-09T14:16:02.226-0400 I COMMAND [conn63] CMD: drop db64.coll64 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.228-0400 m31101| 2015-07-09T14:16:02.228-0400 I COMMAND [repl writer worker 5] CMD: drop db64.coll64 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.229-0400 m31102| 2015-07-09T14:16:02.229-0400 I COMMAND [repl writer worker 6] CMD: drop db64.coll64 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.230-0400 m31201| 2015-07-09T14:16:02.230-0400 I COMMAND [repl writer worker 9] CMD: drop db64.coll64 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.233-0400 m31202| 2015-07-09T14:16:02.232-0400 I COMMAND [repl writer worker 10] CMD: drop db64.coll64 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.283-0400 m31100| 2015-07-09T14:16:02.282-0400 I SHARDING [conn15] remotely refreshing metadata for db64.coll64 with requested shard version 0|0||000000000000000000000000, current shard version is 1|3||559eba60ca4787b9985d1e96, current metadata version is 1|3||559eba60ca4787b9985d1e96 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.285-0400 m31100| 2015-07-09T14:16:02.284-0400 W SHARDING [conn15] no chunks found when reloading db64.coll64, previous version was 0|0||559eba60ca4787b9985d1e96, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.285-0400 m31100| 2015-07-09T14:16:02.284-0400 I SHARDING [conn15] dropping metadata for db64.coll64 at shard version 1|3||559eba60ca4787b9985d1e96, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.287-0400 m30999| 2015-07-09T14:16:02.286-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:02.286-0400-559eba62ca4787b9985d1e9a", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465762286), what: "dropCollection", ns: "db64.coll64", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.341-0400 m30999| 2015-07-09T14:16:02.341-0400 I SHARDING [conn1] distributed lock 'db64.coll64/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.398-0400 m30999| 2015-07-09T14:16:02.398-0400 I COMMAND [conn1] DROP DATABASE: db64 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.398-0400 m30999| 2015-07-09T14:16:02.398-0400 I SHARDING [conn1] DBConfig::dropDatabase: db64 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.398-0400 m30999| 2015-07-09T14:16:02.398-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:02.398-0400-559eba62ca4787b9985d1e9b", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465762398), what: "dropDatabase.start", ns: "db64", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.505-0400 m30999| 2015-07-09T14:16:02.504-0400 I SHARDING [conn1] DBConfig::dropDatabase: db64 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.505-0400 m31100| 2015-07-09T14:16:02.505-0400 I COMMAND [conn157] dropDatabase db64 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.505-0400 m31100| 2015-07-09T14:16:02.505-0400 I COMMAND [conn157] dropDatabase db64 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.506-0400 m30999| 2015-07-09T14:16:02.506-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:02.506-0400-559eba62ca4787b9985d1e9c", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465762506), what: "dropDatabase", ns: "db64", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.507-0400 m31102| 2015-07-09T14:16:02.507-0400 I COMMAND [repl writer worker 5] dropDatabase db64 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.507-0400 m31102| 2015-07-09T14:16:02.507-0400 I COMMAND [repl writer worker 5] dropDatabase db64 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.507-0400 m31101| 2015-07-09T14:16:02.507-0400 I COMMAND [repl writer worker 0] dropDatabase db64 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.507-0400 m31101| 2015-07-09T14:16:02.507-0400 I COMMAND [repl writer worker 0] dropDatabase db64 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.596-0400 m31100| 2015-07-09T14:16:02.596-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.600-0400 m31101| 2015-07-09T14:16:02.599-0400 I COMMAND [repl writer worker 4] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.600-0400 m31102| 2015-07-09T14:16:02.600-0400 I COMMAND [repl writer worker 1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.630-0400 m31200| 2015-07-09T14:16:02.630-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.633-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.634-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.634-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.634-0400 jstests/concurrency/fsm_workloads/findAndModify_update_grow.js [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.634-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.634-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.634-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.634-0400 m31201| 2015-07-09T14:16:02.633-0400 I COMMAND [repl writer worker 3] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.635-0400 m31202| 2015-07-09T14:16:02.634-0400 I COMMAND [repl writer worker 5] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.639-0400 m30999| 2015-07-09T14:16:02.639-0400 I SHARDING [conn1] distributed lock 'db65/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba62ca4787b9985d1e9d [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.644-0400 m30999| 2015-07-09T14:16:02.643-0400 I SHARDING [conn1] Placing [db65] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.644-0400 m30999| 2015-07-09T14:16:02.644-0400 I SHARDING [conn1] Enabling sharding for database [db65] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.698-0400 m30999| 2015-07-09T14:16:02.697-0400 I SHARDING [conn1] distributed lock 'db65/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.735-0400 m31100| 2015-07-09T14:16:02.734-0400 I INDEX [conn70] build index on: db65.coll65 properties: { v: 1, key: { tid: 1.0 }, name: "tid_1", ns: "db65.coll65" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.735-0400 m31100| 2015-07-09T14:16:02.734-0400 I INDEX [conn70] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.740-0400 m31100| 2015-07-09T14:16:02.740-0400 I INDEX [conn70] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.741-0400 m30999| 2015-07-09T14:16:02.741-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db65.coll65", key: { tid: 1.0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.745-0400 m31101| 2015-07-09T14:16:02.744-0400 I INDEX [repl writer worker 10] build index on: db65.coll65 properties: { v: 1, key: { tid: 1.0 }, name: "tid_1", ns: "db65.coll65" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.745-0400 m31101| 2015-07-09T14:16:02.744-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.746-0400 m30999| 2015-07-09T14:16:02.744-0400 I SHARDING [conn1] distributed lock 'db65.coll65/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba62ca4787b9985d1e9e [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.746-0400 m30999| 2015-07-09T14:16:02.745-0400 I SHARDING [conn1] enable sharding on: db65.coll65 with shard key: { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.746-0400 m31102| 2015-07-09T14:16:02.745-0400 I INDEX [repl writer worker 2] build index on: db65.coll65 properties: { v: 1, key: { tid: 1.0 }, name: "tid_1", ns: "db65.coll65" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.747-0400 m30999| 2015-07-09T14:16:02.745-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:02.745-0400-559eba62ca4787b9985d1e9f", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465762745), what: "shardCollection.start", ns: "db65.coll65", details: { shardKey: { tid: 1.0 }, collection: "db65.coll65", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 1 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.747-0400 m31102| 2015-07-09T14:16:02.745-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.749-0400 m31102| 2015-07-09T14:16:02.749-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.749-0400 m31101| 2015-07-09T14:16:02.749-0400 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.799-0400 m30999| 2015-07-09T14:16:02.799-0400 I SHARDING [conn1] going to create 1 chunk(s) for: db65.coll65 using new epoch 559eba62ca4787b9985d1ea0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.852-0400 m30999| 2015-07-09T14:16:02.852-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db65.coll65: 0ms sequenceNumber: 284 version: 1|0||559eba62ca4787b9985d1ea0 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.907-0400 m30999| 2015-07-09T14:16:02.907-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db65.coll65: 0ms sequenceNumber: 285 version: 1|0||559eba62ca4787b9985d1ea0 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.909-0400 m31100| 2015-07-09T14:16:02.909-0400 I SHARDING [conn179] remotely refreshing metadata for db65.coll65 with requested shard version 1|0||559eba62ca4787b9985d1ea0, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.910-0400 m31100| 2015-07-09T14:16:02.910-0400 I SHARDING [conn179] collection db65.coll65 was previously unsharded, new metadata loaded with shard version 1|0||559eba62ca4787b9985d1ea0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.911-0400 m31100| 2015-07-09T14:16:02.910-0400 I SHARDING [conn179] collection version was loaded at version 1|0||559eba62ca4787b9985d1ea0, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.911-0400 m30999| 2015-07-09T14:16:02.911-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:02.911-0400-559eba62ca4787b9985d1ea1", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465762911), what: "shardCollection", ns: "db65.coll65", details: { version: "1|0||559eba62ca4787b9985d1ea0" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.965-0400 m30999| 2015-07-09T14:16:02.965-0400 I SHARDING [conn1] distributed lock 'db65.coll65/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:02.966-0400 Using 20 threads (requested 20) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.119-0400 m30999| 2015-07-09T14:16:03.119-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63961 #422 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.173-0400 m30999| 2015-07-09T14:16:03.173-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63962 #423 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.179-0400 m30998| 2015-07-09T14:16:03.179-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63963 #422 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.187-0400 m30999| 2015-07-09T14:16:03.186-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63964 #424 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.189-0400 m30998| 2015-07-09T14:16:03.189-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63965 #423 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.192-0400 m30998| 2015-07-09T14:16:03.192-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63966 #424 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.193-0400 m30998| 2015-07-09T14:16:03.193-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63968 #425 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.193-0400 m30999| 2015-07-09T14:16:03.193-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63967 #425 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.202-0400 m30998| 2015-07-09T14:16:03.194-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63970 #426 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.202-0400 m30998| 2015-07-09T14:16:03.197-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63971 #427 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.202-0400 m30998| 2015-07-09T14:16:03.197-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63974 #428 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.202-0400 m30998| 2015-07-09T14:16:03.197-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63976 #429 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.203-0400 m30998| 2015-07-09T14:16:03.201-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63977 #430 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.204-0400 m30999| 2015-07-09T14:16:03.203-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63969 #426 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.209-0400 m30999| 2015-07-09T14:16:03.208-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63972 #427 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.209-0400 m30998| 2015-07-09T14:16:03.209-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63980 #431 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.209-0400 m30999| 2015-07-09T14:16:03.209-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63973 #428 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.212-0400 m30999| 2015-07-09T14:16:03.212-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63975 #429 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.213-0400 m30999| 2015-07-09T14:16:03.212-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63978 #430 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.216-0400 m30999| 2015-07-09T14:16:03.216-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63979 #431 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.221-0400 setting random seed: 3387411641888 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.221-0400 setting random seed: 7687022355385 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.221-0400 setting random seed: 8573903781361 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.222-0400 setting random seed: 9599287332966 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.222-0400 setting random seed: 5784787116572 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.222-0400 setting random seed: 6001148400828 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.223-0400 setting random seed: 4917113501578 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.223-0400 setting random seed: 2227491727098 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.224-0400 setting random seed: 1600920450873 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.228-0400 setting random seed: 550679294392 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.233-0400 m30998| 2015-07-09T14:16:03.232-0400 I SHARDING [conn423] ChunkManager: time to load chunks for db65.coll65: 0ms sequenceNumber: 79 version: 1|0||559eba62ca4787b9985d1ea0 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.236-0400 setting random seed: 875091427005 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.238-0400 setting random seed: 1366454521194 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.240-0400 setting random seed: 3200314571149 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.241-0400 setting random seed: 7453474779613 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.245-0400 setting random seed: 5266905161552 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.246-0400 setting random seed: 7661620532162 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.253-0400 setting random seed: 1301219686865 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.267-0400 setting random seed: 1926659513264 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.269-0400 setting random seed: 8856344013474 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.270-0400 setting random seed: 6670030360110 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.280-0400 m31100| 2015-07-09T14:16:03.279-0400 I SHARDING [conn15] request split points lookup for chunk db65.coll65 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.287-0400 m31100| 2015-07-09T14:16:03.287-0400 I SHARDING [conn132] request split points lookup for chunk db65.coll65 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.289-0400 m31100| 2015-07-09T14:16:03.288-0400 I SHARDING [conn132] received splitChunk request: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 7.0 }, { tid: 12.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.290-0400 m31100| 2015-07-09T14:16:03.290-0400 I SHARDING [conn32] request split points lookup for chunk db65.coll65 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.291-0400 m31100| 2015-07-09T14:16:03.290-0400 I SHARDING [conn32] received splitChunk request: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 5.0 }, { tid: 8.0 }, { tid: 12.0 }, { tid: 16.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.292-0400 m31100| 2015-07-09T14:16:03.291-0400 I SHARDING [conn35] request split points lookup for chunk db65.coll65 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.292-0400 m31100| 2015-07-09T14:16:03.292-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 5.0 }, { tid: 8.0 }, { tid: 12.0 }, { tid: 16.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.293-0400 m31100| 2015-07-09T14:16:03.292-0400 I SHARDING [conn15] request split points lookup for chunk db65.coll65 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.294-0400 m31100| 2015-07-09T14:16:03.293-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 5.0 }, { tid: 8.0 }, { tid: 12.0 }, { tid: 15.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.294-0400 m31100| 2015-07-09T14:16:03.293-0400 I SHARDING [conn39] request split points lookup for chunk db65.coll65 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.294-0400 m31100| 2015-07-09T14:16:03.293-0400 I SHARDING [conn34] request split points lookup for chunk db65.coll65 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.294-0400 m31100| 2015-07-09T14:16:03.293-0400 I SHARDING [conn38] request split points lookup for chunk db65.coll65 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.295-0400 m31100| 2015-07-09T14:16:03.294-0400 I SHARDING [conn36] request split points lookup for chunk db65.coll65 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.295-0400 m31100| 2015-07-09T14:16:03.294-0400 I SHARDING [conn40] request split points lookup for chunk db65.coll65 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.295-0400 m31100| 2015-07-09T14:16:03.294-0400 I SHARDING [conn39] received splitChunk request: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 5.0 }, { tid: 8.0 }, { tid: 12.0 }, { tid: 15.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.296-0400 m31100| 2015-07-09T14:16:03.294-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 5.0 }, { tid: 8.0 }, { tid: 12.0 }, { tid: 15.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.296-0400 m31100| 2015-07-09T14:16:03.295-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 5.0 }, { tid: 8.0 }, { tid: 12.0 }, { tid: 15.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.296-0400 m31100| 2015-07-09T14:16:03.295-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 5.0 }, { tid: 8.0 }, { tid: 12.0 }, { tid: 15.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.297-0400 m31100| 2015-07-09T14:16:03.295-0400 I SHARDING [conn36] received splitChunk request: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 5.0 }, { tid: 8.0 }, { tid: 12.0 }, { tid: 15.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.297-0400 m31100| 2015-07-09T14:16:03.296-0400 I SHARDING [conn37] request split points lookup for chunk db65.coll65 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.299-0400 m31100| 2015-07-09T14:16:03.298-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 5.0 }, { tid: 8.0 }, { tid: 12.0 }, { tid: 15.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.299-0400 m31100| 2015-07-09T14:16:03.299-0400 I SHARDING [conn32] could not acquire lock 'db65.coll65/bs-osx108-8:31100:1436464536:197041335' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.300-0400 m31100| 2015-07-09T14:16:03.299-0400 I SHARDING [conn32] distributed lock 'db65.coll65/bs-osx108-8:31100:1436464536:197041335' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.300-0400 m31100| 2015-07-09T14:16:03.299-0400 W SHARDING [conn32] could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db65.coll65 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.300-0400 m30998| 2015-07-09T14:16:03.299-0400 W SHARDING [conn426] splitChunk failed - cmd: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 5.0 }, { tid: 8.0 }, { tid: 12.0 }, { tid: 16.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.301-0400 m31100| 2015-07-09T14:16:03.300-0400 I SHARDING [conn39] could not acquire lock 'db65.coll65/bs-osx108-8:31100:1436464536:197041335' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.301-0400 m31100| 2015-07-09T14:16:03.300-0400 I SHARDING [conn39] distributed lock 'db65.coll65/bs-osx108-8:31100:1436464536:197041335' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.301-0400 m31100| 2015-07-09T14:16:03.300-0400 W SHARDING [conn39] could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db65.coll65 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.302-0400 m31100| 2015-07-09T14:16:03.300-0400 I SHARDING [conn15] could not acquire lock 'db65.coll65/bs-osx108-8:31100:1436464536:197041335' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.302-0400 m30998| 2015-07-09T14:16:03.301-0400 W SHARDING [conn428] splitChunk failed - cmd: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 5.0 }, { tid: 8.0 }, { tid: 12.0 }, { tid: 15.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.304-0400 m30999| 2015-07-09T14:16:03.301-0400 W SHARDING [conn424] splitChunk failed - cmd: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 5.0 }, { tid: 8.0 }, { tid: 12.0 }, { tid: 15.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.304-0400 m31100| 2015-07-09T14:16:03.300-0400 I SHARDING [conn34] could not acquire lock 'db65.coll65/bs-osx108-8:31100:1436464536:197041335' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.305-0400 m30998| 2015-07-09T14:16:03.302-0400 W SHARDING [conn424] splitChunk failed - cmd: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 5.0 }, { tid: 8.0 }, { tid: 12.0 }, { tid: 16.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.306-0400 m30999| 2015-07-09T14:16:03.301-0400 W SHARDING [conn422] splitChunk failed - cmd: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 5.0 }, { tid: 8.0 }, { tid: 12.0 }, { tid: 15.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.306-0400 m31100| 2015-07-09T14:16:03.300-0400 I SHARDING [conn37] could not acquire lock 'db65.coll65/bs-osx108-8:31100:1436464536:197041335' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.307-0400 m30998| 2015-07-09T14:16:03.303-0400 W SHARDING [conn423] splitChunk failed - cmd: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 5.0 }, { tid: 8.0 }, { tid: 12.0 }, { tid: 15.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.307-0400 m30999| 2015-07-09T14:16:03.302-0400 W SHARDING [conn430] splitChunk failed - cmd: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 5.0 }, { tid: 8.0 }, { tid: 12.0 }, { tid: 15.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.308-0400 m31100| 2015-07-09T14:16:03.300-0400 I SHARDING [conn15] distributed lock 'db65.coll65/bs-osx108-8:31100:1436464536:197041335' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.308-0400 m30999| 2015-07-09T14:16:03.303-0400 W SHARDING [conn431] splitChunk failed - cmd: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 5.0 }, { tid: 8.0 }, { tid: 12.0 }, { tid: 15.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.308-0400 m31100| 2015-07-09T14:16:03.301-0400 I SHARDING [conn34] distributed lock 'db65.coll65/bs-osx108-8:31100:1436464536:197041335' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.309-0400 m30998| 2015-07-09T14:16:03.305-0400 W SHARDING [conn430] splitChunk failed - cmd: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 5.0 }, { tid: 8.0 }, { tid: 12.0 }, { tid: 15.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.310-0400 m30999| 2015-07-09T14:16:03.303-0400 W SHARDING [conn425] splitChunk failed - cmd: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 5.0 }, { tid: 8.0 }, { tid: 12.0 }, { tid: 15.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.310-0400 m31100| 2015-07-09T14:16:03.301-0400 I SHARDING [conn39] request split points lookup for chunk db65.coll65 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.310-0400 m30999| 2015-07-09T14:16:03.307-0400 W SHARDING [conn429] splitChunk failed - cmd: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 5.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.311-0400 m31100| 2015-07-09T14:16:03.301-0400 I SHARDING [conn37] distributed lock 'db65.coll65/bs-osx108-8:31100:1436464536:197041335' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.311-0400 m31100| 2015-07-09T14:16:03.301-0400 W SHARDING [conn15] could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db65.coll65 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.311-0400 m31100| 2015-07-09T14:16:03.301-0400 I SHARDING [conn38] could not acquire lock 'db65.coll65/bs-osx108-8:31100:1436464536:197041335' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.311-0400 m31100| 2015-07-09T14:16:03.301-0400 I SHARDING [conn35] could not acquire lock 'db65.coll65/bs-osx108-8:31100:1436464536:197041335' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.312-0400 m31100| 2015-07-09T14:16:03.301-0400 W SHARDING [conn34] could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db65.coll65 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.312-0400 m31100| 2015-07-09T14:16:03.301-0400 W SHARDING [conn37] could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db65.coll65 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.312-0400 m31100| 2015-07-09T14:16:03.301-0400 I SHARDING [conn38] distributed lock 'db65.coll65/bs-osx108-8:31100:1436464536:197041335' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.315-0400 m31100| 2015-07-09T14:16:03.301-0400 I SHARDING [conn35] distributed lock 'db65.coll65/bs-osx108-8:31100:1436464536:197041335' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.315-0400 m31100| 2015-07-09T14:16:03.301-0400 I SHARDING [conn40] could not acquire lock 'db65.coll65/bs-osx108-8:31100:1436464536:197041335' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.315-0400 m31100| 2015-07-09T14:16:03.301-0400 W SHARDING [conn38] could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db65.coll65 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.316-0400 m31100| 2015-07-09T14:16:03.302-0400 W SHARDING [conn35] could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db65.coll65 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.316-0400 m31100| 2015-07-09T14:16:03.302-0400 I SHARDING [conn40] distributed lock 'db65.coll65/bs-osx108-8:31100:1436464536:197041335' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.316-0400 m31100| 2015-07-09T14:16:03.302-0400 I SHARDING [conn39] received splitChunk request: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 5.0 }, { tid: 8.0 }, { tid: 12.0 }, { tid: 15.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.316-0400 m31100| 2015-07-09T14:16:03.302-0400 W SHARDING [conn40] could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db65.coll65 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.316-0400 m31100| 2015-07-09T14:16:03.302-0400 I SHARDING [conn36] could not acquire lock 'db65.coll65/bs-osx108-8:31100:1436464536:197041335' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.317-0400 m31100| 2015-07-09T14:16:03.302-0400 I SHARDING [conn36] distributed lock 'db65.coll65/bs-osx108-8:31100:1436464536:197041335' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.317-0400 m31100| 2015-07-09T14:16:03.302-0400 W SHARDING [conn36] could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db65.coll65 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.317-0400 m31100| 2015-07-09T14:16:03.303-0400 I SHARDING [conn38] request split points lookup for chunk db65.coll65 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.317-0400 m31100| 2015-07-09T14:16:03.304-0400 I SHARDING [conn40] request split points lookup for chunk db65.coll65 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.317-0400 m31100| 2015-07-09T14:16:03.304-0400 W SHARDING [conn39] could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db65.coll65 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.318-0400 m31100| 2015-07-09T14:16:03.304-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 5.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.319-0400 m31100| 2015-07-09T14:16:03.306-0400 W SHARDING [conn38] could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db65.coll65 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.320-0400 m31100| 2015-07-09T14:16:03.309-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 5.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.320-0400 m31100| 2015-07-09T14:16:03.310-0400 I SHARDING [conn132] distributed lock 'db65.coll65/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eba63792e00bb67274a81 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.321-0400 m31100| 2015-07-09T14:16:03.310-0400 I SHARDING [conn132] remotely refreshing metadata for db65.coll65 based on current shard version 1|0||559eba62ca4787b9985d1ea0, current metadata version is 1|0||559eba62ca4787b9985d1ea0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.321-0400 m31100| 2015-07-09T14:16:03.312-0400 W SHARDING [conn40] could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db65.coll65 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.322-0400 m31100| 2015-07-09T14:16:03.312-0400 I SHARDING [conn38] request split points lookup for chunk db65.coll65 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.322-0400 m30999| 2015-07-09T14:16:03.312-0400 W SHARDING [conn426] splitChunk failed - cmd: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 5.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.322-0400 m31100| 2015-07-09T14:16:03.320-0400 I SHARDING [conn132] metadata of collection db65.coll65 already up to date (shard version : 1|0||559eba62ca4787b9985d1ea0, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.322-0400 m31100| 2015-07-09T14:16:03.320-0400 I SHARDING [conn132] splitChunk accepted at version 1|0||559eba62ca4787b9985d1ea0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.323-0400 m31100| 2015-07-09T14:16:03.320-0400 I SHARDING [conn37] request split points lookup for chunk db65.coll65 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.323-0400 m31100| 2015-07-09T14:16:03.320-0400 I SHARDING [conn39] request split points lookup for chunk db65.coll65 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.323-0400 m31100| 2015-07-09T14:16:03.321-0400 I SHARDING [conn40] request split points lookup for chunk db65.coll65 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.323-0400 m31100| 2015-07-09T14:16:03.321-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 7.0 }, { tid: 9.0 }, { tid: 11.0 }, { tid: 13.0 }, { tid: 15.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.323-0400 m31100| 2015-07-09T14:16:03.321-0400 I SHARDING [conn36] request split points lookup for chunk db65.coll65 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.327-0400 m31100| 2015-07-09T14:16:03.324-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 7.0 }, { tid: 9.0 }, { tid: 11.0 }, { tid: 13.0 }, { tid: 15.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.328-0400 m31100| 2015-07-09T14:16:03.324-0400 I SHARDING [conn39] received splitChunk request: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 7.0 }, { tid: 9.0 }, { tid: 11.0 }, { tid: 13.0 }, { tid: 15.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.329-0400 m31100| 2015-07-09T14:16:03.325-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 7.0 }, { tid: 9.0 }, { tid: 11.0 }, { tid: 13.0 }, { tid: 15.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.331-0400 m31100| 2015-07-09T14:16:03.325-0400 I SHARDING [conn36] received splitChunk request: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 7.0 }, { tid: 9.0 }, { tid: 11.0 }, { tid: 13.0 }, { tid: 15.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.334-0400 m31100| 2015-07-09T14:16:03.326-0400 I SHARDING [conn35] request split points lookup for chunk db65.coll65 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.334-0400 m31100| 2015-07-09T14:16:03.326-0400 I SHARDING [conn32] request split points lookup for chunk db65.coll65 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.334-0400 m31100| 2015-07-09T14:16:03.327-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 5.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.335-0400 m31100| 2015-07-09T14:16:03.327-0400 I SHARDING [conn32] received splitChunk request: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 7.0 }, { tid: 11.0 }, { tid: 15.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.335-0400 m31100| 2015-07-09T14:16:03.328-0400 I SHARDING [conn34] request split points lookup for chunk db65.coll65 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.335-0400 m31100| 2015-07-09T14:16:03.329-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 5.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.335-0400 m31100| 2015-07-09T14:16:03.329-0400 I SHARDING [conn15] request split points lookup for chunk db65.coll65 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.335-0400 m31100| 2015-07-09T14:16:03.330-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 7.0 }, { tid: 11.0 }, { tid: 15.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.346-0400 m31100| 2015-07-09T14:16:03.340-0400 I SHARDING [conn132] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:03.340-0400-559eba63792e00bb67274a8b", server: "bs-osx108-8", clientAddr: "127.0.0.1:63181", time: new Date(1436465763340), what: "multi-split", ns: "db65.coll65", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 1, of: 5, chunk: { min: { tid: MinKey }, max: { tid: 1.0 }, lastmod: Timestamp 1000|1, lastmodEpoch: ObjectId('559eba62ca4787b9985d1ea0') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.348-0400 m30999| 2015-07-09T14:16:03.342-0400 I SHARDING [conn431] ChunkManager: time to load chunks for db65.coll65: 1ms sequenceNumber: 286 version: 1|5||559eba62ca4787b9985d1ea0 based on: 1|0||559eba62ca4787b9985d1ea0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.348-0400 m30998| 2015-07-09T14:16:03.342-0400 I SHARDING [conn429] ChunkManager: time to load chunks for db65.coll65: 0ms sequenceNumber: 80 version: 1|5||559eba62ca4787b9985d1ea0 based on: 1|0||559eba62ca4787b9985d1ea0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.348-0400 m31100| 2015-07-09T14:16:03.343-0400 W SHARDING [conn35] could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db65.coll65 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.349-0400 m29000| 2015-07-09T14:16:03.343-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:63981 #73 (73 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.349-0400 m31100| 2015-07-09T14:16:03.343-0400 W SHARDING [conn37] could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db65.coll65 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.351-0400 m30999| 2015-07-09T14:16:03.343-0400 W SHARDING [conn424] splitChunk failed - cmd: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 7.0 }, { tid: 9.0 }, { tid: 11.0 }, { tid: 13.0 }, { tid: 15.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.351-0400 m30998| 2015-07-09T14:16:03.343-0400 W SHARDING [conn424] splitChunk failed - cmd: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 5.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.352-0400 m31100| 2015-07-09T14:16:03.344-0400 W SHARDING [conn36] could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db65.coll65 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.352-0400 m31100| 2015-07-09T14:16:03.345-0400 W SHARDING [conn32] could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db65.coll65 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.353-0400 m31100| 2015-07-09T14:16:03.345-0400 W SHARDING [conn38] could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db65.coll65 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.353-0400 m31100| 2015-07-09T14:16:03.345-0400 W SHARDING [conn39] could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db65.coll65 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.356-0400 m30999| 2015-07-09T14:16:03.345-0400 W SHARDING [conn428] splitChunk failed - cmd: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 7.0 }, { tid: 9.0 }, { tid: 11.0 }, { tid: 13.0 }, { tid: 15.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.357-0400 m31100| 2015-07-09T14:16:03.345-0400 W SHARDING [conn40] could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db65.coll65 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.358-0400 m30998| 2015-07-09T14:16:03.345-0400 W SHARDING [conn427] splitChunk failed - cmd: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 7.0 }, { tid: 11.0 }, { tid: 15.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.358-0400 m31100| 2015-07-09T14:16:03.345-0400 W SHARDING [conn34] could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db65.coll65 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.359-0400 m30999| 2015-07-09T14:16:03.346-0400 W SHARDING [conn422] splitChunk failed - cmd: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 7.0 }, { tid: 9.0 }, { tid: 11.0 }, { tid: 13.0 }, { tid: 15.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.360-0400 m30998| 2015-07-09T14:16:03.345-0400 W SHARDING [conn426] splitChunk failed - cmd: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 7.0 }, { tid: 9.0 }, { tid: 11.0 }, { tid: 13.0 }, { tid: 15.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.360-0400 m30999| 2015-07-09T14:16:03.346-0400 W SHARDING [conn429] splitChunk failed - cmd: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 5.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.361-0400 m30998| 2015-07-09T14:16:03.345-0400 W SHARDING [conn423] splitChunk failed - cmd: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 7.0 }, { tid: 9.0 }, { tid: 11.0 }, { tid: 13.0 }, { tid: 15.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.362-0400 m31100| 2015-07-09T14:16:03.352-0400 I SHARDING [conn34] request split points lookup for chunk db65.coll65 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.363-0400 m31100| 2015-07-09T14:16:03.352-0400 W SHARDING [conn34] possible low cardinality key detected in db65.coll65 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.364-0400 m31100| 2015-07-09T14:16:03.353-0400 I SHARDING [conn39] request split points lookup for chunk db65.coll65 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.380-0400 m31100| 2015-07-09T14:16:03.353-0400 W SHARDING [conn39] possible low cardinality key detected in db65.coll65 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.380-0400 m31100| 2015-07-09T14:16:03.354-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 }, { tid: 10.0 }, { tid: 11.0 }, { tid: 12.0 }, { tid: 13.0 }, { tid: 14.0 }, { tid: 15.0 }, { tid: 16.0 }, { tid: 17.0 }, { tid: 18.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.381-0400 m31100| 2015-07-09T14:16:03.354-0400 I SHARDING [conn39] received splitChunk request: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 }, { tid: 10.0 }, { tid: 11.0 }, { tid: 12.0 }, { tid: 13.0 }, { tid: 14.0 }, { tid: 15.0 }, { tid: 16.0 }, { tid: 17.0 }, { tid: 18.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.383-0400 m31100| 2015-07-09T14:16:03.354-0400 W SHARDING [conn15] could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db65.coll65 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.383-0400 m30999| 2015-07-09T14:16:03.354-0400 W SHARDING [conn427] splitChunk failed - cmd: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 7.0 }, { tid: 11.0 }, { tid: 15.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.383-0400 m31100| 2015-07-09T14:16:03.355-0400 W SHARDING [conn39] could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db65.coll65 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.384-0400 m30998| 2015-07-09T14:16:03.355-0400 W SHARDING [conn429] splitChunk failed - cmd: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 }, { tid: 10.0 }, { tid: 11.0 }, { tid: 12.0 }, { tid: 13.0 }, { tid: 14.0 }, { tid: 15.0 }, { tid: 16.0 }, { tid: 17.0 }, { tid: 18.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.384-0400 m31100| 2015-07-09T14:16:03.357-0400 W SHARDING [conn34] could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db65.coll65 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.384-0400 m30999| 2015-07-09T14:16:03.357-0400 W SHARDING [conn426] splitChunk failed - cmd: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 }, { tid: 10.0 }, { tid: 11.0 }, { tid: 12.0 }, { tid: 13.0 }, { tid: 14.0 }, { tid: 15.0 }, { tid: 16.0 }, { tid: 17.0 }, { tid: 18.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.444-0400 m31100| 2015-07-09T14:16:03.395-0400 I SHARDING [conn132] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:03.395-0400-559eba63792e00bb67274a8c", server: "bs-osx108-8", clientAddr: "127.0.0.1:63181", time: new Date(1436465763395), what: "multi-split", ns: "db65.coll65", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 2, of: 5, chunk: { min: { tid: 1.0 }, max: { tid: 7.0 }, lastmod: Timestamp 1000|2, lastmodEpoch: ObjectId('559eba62ca4787b9985d1ea0') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.460-0400 m31100| 2015-07-09T14:16:03.457-0400 I SHARDING [conn132] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:03.457-0400-559eba63792e00bb67274a8d", server: "bs-osx108-8", clientAddr: "127.0.0.1:63181", time: new Date(1436465763457), what: "multi-split", ns: "db65.coll65", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 3, of: 5, chunk: { min: { tid: 7.0 }, max: { tid: 12.0 }, lastmod: Timestamp 1000|3, lastmodEpoch: ObjectId('559eba62ca4787b9985d1ea0') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.512-0400 m31100| 2015-07-09T14:16:03.510-0400 I SHARDING [conn132] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:03.510-0400-559eba63792e00bb67274a8e", server: "bs-osx108-8", clientAddr: "127.0.0.1:63181", time: new Date(1436465763510), what: "multi-split", ns: "db65.coll65", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 4, of: 5, chunk: { min: { tid: 12.0 }, max: { tid: 17.0 }, lastmod: Timestamp 1000|4, lastmodEpoch: ObjectId('559eba62ca4787b9985d1ea0') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.564-0400 m31100| 2015-07-09T14:16:03.563-0400 I SHARDING [conn132] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:03.563-0400-559eba63792e00bb67274a8f", server: "bs-osx108-8", clientAddr: "127.0.0.1:63181", time: new Date(1436465763563), what: "multi-split", ns: "db65.coll65", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 5, of: 5, chunk: { min: { tid: 17.0 }, max: { tid: MaxKey }, lastmod: Timestamp 1000|5, lastmodEpoch: ObjectId('559eba62ca4787b9985d1ea0') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.635-0400 m31100| 2015-07-09T14:16:03.633-0400 I SHARDING [conn132] distributed lock 'db65.coll65/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.636-0400 m31100| 2015-07-09T14:16:03.633-0400 I COMMAND [conn132] command db65.coll65 command: splitChunk { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 7.0 }, { tid: 12.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:140 locks:{ Global: { acquireCount: { r: 4, w: 2 } }, Database: { acquireCount: { r: 1, w: 2 } }, Collection: { acquireCount: { r: 1, W: 2 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 7713 } } } protocol:op_command 345ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.642-0400 m30998| 2015-07-09T14:16:03.638-0400 I SHARDING [conn425] autosplitted db65.coll65 shard: ns: db65.coll65, shard: test-rs0, lastmod: 1|0||000000000000000000000000, min: { tid: MinKey }, max: { tid: MaxKey } into 5 (splitThreshold 921) (migrate suggested, but no migrations allowed) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.689-0400 m31100| 2015-07-09T14:16:03.686-0400 I COMMAND [conn184] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 8.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:1399899 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 114ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.775-0400 m31100| 2015-07-09T14:16:03.772-0400 I COMMAND [conn50] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 19.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:466779 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 104ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.832-0400 m31100| 2015-07-09T14:16:03.829-0400 I COMMAND [conn58] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 3.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:466779 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 137ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.853-0400 m31100| 2015-07-09T14:16:03.849-0400 I COMMAND [conn176] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 1.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:466779 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 160ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.902-0400 m31100| 2015-07-09T14:16:03.898-0400 I COMMAND [conn185] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 11.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:155739 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 242ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.920-0400 m31100| 2015-07-09T14:16:03.919-0400 I COMMAND [conn177] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 15.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:466779 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 213ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.923-0400 m31100| 2015-07-09T14:16:03.922-0400 I COMMAND [conn186] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 5.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:466779 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 211ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.927-0400 m31100| 2015-07-09T14:16:03.925-0400 I COMMAND [conn175] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 18.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:466779 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 288ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:03.957-0400 m31100| 2015-07-09T14:16:03.955-0400 I COMMAND [conn178] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 7.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:466779 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 285ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:04.046-0400 m31100| 2015-07-09T14:16:04.045-0400 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1436464517000|1 } } cursorid:21764899830 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:5 nreturned:9 reslen:4045602 locks:{ Global: { acquireCount: { r: 12 } }, Database: { acquireCount: { r: 6 } }, oplog: { acquireCount: { r: 6 } } } 374ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:04.138-0400 m31100| 2015-07-09T14:16:04.136-0400 I COMMAND [conn73] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 9.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:1399899 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 309ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:04.142-0400 m31100| 2015-07-09T14:16:04.142-0400 I QUERY [conn58] query db65.coll65 query: { query: { _id: ObjectId('559eba63eac5440bf8d3f371') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:466682 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } 139ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:04.168-0400 m31100| 2015-07-09T14:16:04.166-0400 I SHARDING [conn132] request split points lookup for chunk db65.coll65 { : 7.0 } -->> { : 12.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:04.173-0400 m31100| 2015-07-09T14:16:04.172-0400 I COMMAND [conn47] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 0.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:1399899 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 439ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:04.206-0400 m31100| 2015-07-09T14:16:04.203-0400 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1436464517000|1 } } cursorid:22174781017 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:3 nreturned:11 reslen:3270238 locks:{ Global: { acquireCount: { r: 8 } }, Database: { acquireCount: { r: 4 } }, oplog: { acquireCount: { r: 4 } } } 386ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:04.236-0400 m31100| 2015-07-09T14:16:04.234-0400 I COMMAND [conn177] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 13.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:155739 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 283ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:04.255-0400 m31100| 2015-07-09T14:16:04.254-0400 I SHARDING [conn34] request split points lookup for chunk db65.coll65 { : MinKey } -->> { : 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:04.292-0400 m31100| 2015-07-09T14:16:04.292-0400 I QUERY [conn58] query db65.coll65 query: { query: { tid: 11.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:1 nscannedObjects:1 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:466682 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } 133ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:04.313-0400 m31100| 2015-07-09T14:16:04.308-0400 I COMMAND [conn186] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 1.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:1399899 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 328ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:04.339-0400 m31100| 2015-07-09T14:16:04.338-0400 I SHARDING [conn132] request split points lookup for chunk db65.coll65 { : 1.0 } -->> { : 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:04.343-0400 m31100| 2015-07-09T14:16:04.342-0400 I COMMAND [conn179] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 2.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:1399899 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 526ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:04.346-0400 m31100| 2015-07-09T14:16:04.344-0400 I QUERY [conn73] query db65.coll65 query: { query: { _id: ObjectId('559eba63eac5440bf8d3f36e') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } cursorid:5253641818701 ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:1399802 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 } } } 151ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:04.373-0400 m31100| 2015-07-09T14:16:04.372-0400 I COMMAND [conn45] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 4.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:1399899 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 544ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:04.380-0400 m31100| 2015-07-09T14:16:04.379-0400 I COMMAND [conn181] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 17.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:1399899 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 629ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:04.393-0400 m31100| 2015-07-09T14:16:04.392-0400 I COMMAND [conn185] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 3.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:1399899 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 418ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:04.400-0400 m31100| 2015-07-09T14:16:04.399-0400 I COMMAND [conn180] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 14.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:1399899 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 568ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:04.402-0400 m31100| 2015-07-09T14:16:04.402-0400 I SHARDING [conn34] request split points lookup for chunk db65.coll65 { : 1.0 } -->> { : 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:04.403-0400 m31100| 2015-07-09T14:16:04.402-0400 I SHARDING [conn15] request split points lookup for chunk db65.coll65 { : 1.0 } -->> { : 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:04.433-0400 m31100| 2015-07-09T14:16:04.433-0400 I SHARDING [conn15] request split points lookup for chunk db65.coll65 { : 12.0 } -->> { : 17.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:04.440-0400 m31100| 2015-07-09T14:16:04.438-0400 I SHARDING [conn132] request split points lookup for chunk db65.coll65 { : 17.0 } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:04.470-0400 m31100| 2015-07-09T14:16:04.469-0400 I COMMAND [conn191] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 10.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:1399899 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 507ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:04.537-0400 m31100| 2015-07-09T14:16:04.536-0400 I QUERY [conn73] query db65.coll65 query: { query: { tid: 9.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:1 nscannedObjects:1 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:1399802 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } 161ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:04.545-0400 m31100| 2015-07-09T14:16:04.543-0400 I QUERY [conn179] query db65.coll65 query: { query: { _id: ObjectId('559eba63eac5440bf8d3f372') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } cursorid:5253802732209 ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:1399802 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 } } } 111ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:04.553-0400 m31100| 2015-07-09T14:16:04.553-0400 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1436464517000|1 } } cursorid:21764899830 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:11 reslen:4358238 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, oplog: { acquireCount: { r: 3 } } } 480ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:04.570-0400 m31100| 2015-07-09T14:16:04.569-0400 I QUERY [conn186] query db65.coll65 query: { query: { _id: ObjectId('559eba63eac5440bf8d3f36c') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } cursorid:5254482315760 ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:1399802 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 } } } 129ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:04.578-0400 m31100| 2015-07-09T14:16:04.577-0400 I QUERY [conn45] query db65.coll65 query: { query: { _id: ObjectId('559eba63eac5440bf8d3f373') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } cursorid:5254462128980 ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:1399802 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 } } } 105ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:04.581-0400 m30999| 2015-07-09T14:16:04.581-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:16:04.575-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30999:1436464534:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:04.595-0400 m31100| 2015-07-09T14:16:04.594-0400 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1436464517000|1 } } cursorid:22174781017 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:3 nreturned:9 reslen:5133602 locks:{ Global: { acquireCount: { r: 8 } }, Database: { acquireCount: { r: 4 } }, oplog: { acquireCount: { r: 4 } } } 362ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:04.607-0400 m31100| 2015-07-09T14:16:04.606-0400 I QUERY [conn180] query db65.coll65 query: { query: { _id: ObjectId('559eba63eac5440bf8d3f36b') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } cursorid:5253236187266 ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:1399802 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 } } } 136ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:05.152-0400 m31100| 2015-07-09T14:16:05.152-0400 I QUERY [conn191] query db65.coll65 query: { query: { tid: 10.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:1 nscannedObjects:1 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:1399802 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 } } } 572ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:05.407-0400 m31100| 2015-07-09T14:16:05.406-0400 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1436464517000|1 } } cursorid:21764899830 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:1399858 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, oplog: { acquireCount: { r: 1 } } } 798ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:05.410-0400 m31100| 2015-07-09T14:16:05.410-0400 I QUERY [conn180] query db65.coll65 query: { query: { tid: 14.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:1 nscannedObjects:1 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:1399802 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 } } } 777ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:05.536-0400 m31100| 2015-07-09T14:16:05.535-0400 I COMMAND [conn177] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 13.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:466779 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 1254ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:05.553-0400 m31100| 2015-07-09T14:16:05.550-0400 I QUERY [conn179] query db65.coll65 query: { query: { tid: 2.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:1 nscannedObjects:1 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:1399802 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } 989ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:05.662-0400 m31100| 2015-07-09T14:16:05.662-0400 I QUERY [conn45] query db65.coll65 query: { query: { tid: 4.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:1 nscannedObjects:1 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:1399802 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } 1069ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:05.749-0400 m31100| 2015-07-09T14:16:05.748-0400 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1436464517000|1 } } cursorid:22174781017 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:5 nreturned:6 reslen:4274888 locks:{ Global: { acquireCount: { r: 12 } }, Database: { acquireCount: { r: 6 } }, oplog: { acquireCount: { r: 6 } } } 346ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:05.764-0400 m31100| 2015-07-09T14:16:05.763-0400 I QUERY [conn186] query db65.coll65 query: { query: { tid: 3.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:1 nscannedObjects:1 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:1399802 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } 1182ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:05.771-0400 m31100| 2015-07-09T14:16:05.770-0400 I COMMAND [conn49] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 15.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:1399899 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 1680ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:05.882-0400 m31100| 2015-07-09T14:16:05.879-0400 I COMMAND [conn50] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 19.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:1399899 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 1868ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:05.888-0400 m31100| 2015-07-09T14:16:05.886-0400 I COMMAND [conn182] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 16.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:1399899 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 1978ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:05.890-0400 m31100| 2015-07-09T14:16:05.890-0400 I SHARDING [conn132] request split points lookup for chunk db65.coll65 { : 12.0 } -->> { : 17.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:05.929-0400 m31100| 2015-07-09T14:16:05.927-0400 I COMMAND [conn183] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 6.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:1399899 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 1967ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:05.932-0400 m31100| 2015-07-09T14:16:05.931-0400 I COMMAND [conn184] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 12.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:1399899 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 2139ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:05.933-0400 m31100| 2015-07-09T14:16:05.933-0400 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1436464517000|1 } } cursorid:21764899830 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:3 nreturned:6 reslen:4274888 locks:{ Global: { acquireCount: { r: 8 } }, Database: { acquireCount: { r: 4 } }, oplog: { acquireCount: { r: 4 } } } 460ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:05.959-0400 m31100| 2015-07-09T14:16:05.956-0400 I COMMAND [conn176] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 5.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:1399899 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 1888ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:05.978-0400 m31100| 2015-07-09T14:16:05.977-0400 I COMMAND [conn188] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 18.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:1399899 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 1919ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:06.015-0400 m30998| 2015-07-09T14:16:05.992-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:16:05.983-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30998:1436464535:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:06.111-0400 m31100| 2015-07-09T14:16:06.111-0400 I SHARDING [conn15] request split points lookup for chunk db65.coll65 { : 12.0 } -->> { : 17.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:06.124-0400 m31100| 2015-07-09T14:16:06.124-0400 I SHARDING [conn132] request split points lookup for chunk db65.coll65 { : 1.0 } -->> { : 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:06.157-0400 m31100| 2015-07-09T14:16:06.153-0400 I QUERY [conn49] query db65.coll65 query: { query: { _id: ObjectId('559eba63eac5440bf8d3f379') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } cursorid:5254883745721 ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:1399802 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 } } } 174ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:06.164-0400 m31100| 2015-07-09T14:16:06.163-0400 I QUERY [conn182] query db65.coll65 query: { query: { _id: ObjectId('559eba63eac5440bf8d3f370') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } cursorid:5254669507439 ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:1399802 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 } } } 211ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:06.195-0400 m31100| 2015-07-09T14:16:06.191-0400 I SHARDING [conn15] request split points lookup for chunk db65.coll65 { : 17.0 } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:06.200-0400 m31100| 2015-07-09T14:16:06.199-0400 I QUERY [conn183] query db65.coll65 query: { query: { _id: ObjectId('559eba63eac5440bf8d3f374') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } cursorid:5253304563086 ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:1399802 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 } } } 151ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:06.264-0400 m31100| 2015-07-09T14:16:06.263-0400 I QUERY [conn177] query db65.coll65 query: { query: { _id: ObjectId('559eba63eac5440bf8d3f369') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:466682 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } 639ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:06.265-0400 m31100| 2015-07-09T14:16:06.264-0400 I QUERY [conn50] query db65.coll65 query: { query: { _id: ObjectId('559eba63eac5440bf8d3f376') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } cursorid:5252932862935 ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:1399802 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 } } } 162ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:06.272-0400 m31100| 2015-07-09T14:16:06.267-0400 I COMMAND [conn175] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 8.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:4199259 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 2036ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:06.277-0400 m31100| 2015-07-09T14:16:06.271-0400 I COMMAND [conn178] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 7.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:1399899 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 1977ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:06.297-0400 m31100| 2015-07-09T14:16:06.296-0400 I QUERY [conn176] query db65.coll65 query: { query: { _id: ObjectId('559eba63eac5440bf8d3f36f') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } cursorid:5254340825695 ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:1399802 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 } } } 127ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:06.392-0400 m31100| 2015-07-09T14:16:06.385-0400 I QUERY [conn188] query db65.coll65 query: { query: { _id: ObjectId('559eba63eac5440bf8d3f375') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } cursorid:5254651865134 ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:1399802 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 } } } 149ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:06.798-0400 m31100| 2015-07-09T14:16:06.390-0400 I QUERY [conn184] query db65.coll65 query: { query: { _id: ObjectId('559eba63eac5440bf8d3f367') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } cursorid:5254870389808 ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:1399802 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 } } } 222ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:06.798-0400 m31100| 2015-07-09T14:16:06.395-0400 I QUERY [conn49] query db65.coll65 query: { query: { tid: 19.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:1 nscannedObjects:1 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:1399802 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } 199ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:06.799-0400 m31100| 2015-07-09T14:16:06.433-0400 I QUERY [conn182] query db65.coll65 query: { query: { tid: 16.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:1 nscannedObjects:1 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:1399802 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } 246ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:06.800-0400 m31100| 2015-07-09T14:16:06.460-0400 I QUERY [conn177] query db65.coll65 query: { query: { tid: 13.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:1 nscannedObjects:1 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:466682 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } 186ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:06.800-0400 m31100| 2015-07-09T14:16:06.518-0400 I SHARDING [conn15] request split points lookup for chunk db65.coll65 { : 7.0 } -->> { : 12.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:06.800-0400 m31100| 2015-07-09T14:16:06.529-0400 I QUERY [conn188] query db65.coll65 query: { query: { tid: 18.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:1 nscannedObjects:1 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:1399802 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } 129ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:06.800-0400 m31100| 2015-07-09T14:16:06.596-0400 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1436464517000|1 } } cursorid:22174781017 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:1399858 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 723ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:06.801-0400 m31100| 2015-07-09T14:16:06.622-0400 I QUERY [conn184] query db65.coll65 query: { query: { tid: 12.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:1 nscannedObjects:1 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:1399802 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } 191ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:06.801-0400 m31100| 2015-07-09T14:16:06.664-0400 I QUERY [conn183] query db65.coll65 query: { query: { tid: 6.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:1 nscannedObjects:1 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:1399802 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } 450ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:06.801-0400 m31100| 2015-07-09T14:16:06.685-0400 I QUERY [conn50] query db65.coll65 query: { query: { tid: 15.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:1 nscannedObjects:1 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:1399802 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } 204ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:06.802-0400 m31100| 2015-07-09T14:16:06.765-0400 I COMMAND [conn58] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 11.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:1399899 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 2427ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:06.802-0400 m31100| 2015-07-09T14:16:06.792-0400 I SHARDING [conn132] request split points lookup for chunk db65.coll65 { : 7.0 } -->> { : 12.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:06.803-0400 m31100| 2015-07-09T14:16:06.796-0400 I QUERY [conn178] query db65.coll65 query: { query: { _id: ObjectId('559eba63eac5440bf8d3f377') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } cursorid:5253032014327 ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:1399802 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 } } } 175ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:06.803-0400 m31100| 2015-07-09T14:16:06.799-0400 I QUERY [conn176] query db65.coll65 query: { query: { tid: 5.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:1 nscannedObjects:1 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:1399802 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } 229ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:06.825-0400 m31100| 2015-07-09T14:16:06.821-0400 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1436464517000|1 } } cursorid:21764899830 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:4 nreturned:5 reslen:4821930 locks:{ Global: { acquireCount: { r: 10 } }, Database: { acquireCount: { r: 5 } }, oplog: { acquireCount: { r: 5 } } } 768ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:06.900-0400 m31100| 2015-07-09T14:16:06.900-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:16:06.898-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31100:1436464536:197041335', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:06.966-0400 m31100| 2015-07-09T14:16:06.965-0400 I QUERY [conn178] query db65.coll65 query: { query: { tid: 7.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:1 nscannedObjects:1 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:1399802 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } 139ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:07.064-0400 m31100| 2015-07-09T14:16:07.062-0400 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1436464517000|1 } } cursorid:22174781017 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:5 reslen:4821930 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, oplog: { acquireCount: { r: 3 } } } 339ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:07.073-0400 m31100| 2015-07-09T14:16:07.072-0400 I QUERY [conn176] query db65.coll65 query: { query: { _id: ObjectId('559eba63eac5440bf8d3f371') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } cursorid:5253872899361 ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:1399802 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 } } } 237ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:07.160-0400 m31100| 2015-07-09T14:16:07.158-0400 I QUERY [conn183] query db65.coll65 query: { query: { _id: ObjectId('559eba63eac5440bf8d3f368') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } cursorid:5254007293474 ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:4199162 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 } } } 340ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:07.313-0400 m31200| 2015-07-09T14:16:07.313-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:16:07.311-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31200:1436464537:809424560', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:07.358-0400 m31100| 2015-07-09T14:16:07.356-0400 I QUERY [conn176] query db65.coll65 query: { query: { tid: 11.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:1 nscannedObjects:1 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:1399802 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } 253ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:07.465-0400 m31100| 2015-07-09T14:16:07.460-0400 I COMMAND [conn177] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 13.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:1399899 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 902ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:07.568-0400 m31100| 2015-07-09T14:16:07.567-0400 I QUERY [conn183] query db65.coll65 query: { query: { tid: 8.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:119 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 231ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:07.597-0400 m31100| 2015-07-09T14:16:07.595-0400 I COMMAND [conn185] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 17.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:4199259 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 1849ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:07.710-0400 m31100| 2015-07-09T14:16:07.706-0400 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1436464517000|1 } } cursorid:22174781017 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:3 reslen:4199534 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 421ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:07.712-0400 m31100| 2015-07-09T14:16:07.711-0400 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1436464517000|1 } } cursorid:21764899830 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:3 reslen:4199534 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, oplog: { acquireCount: { r: 3 } } } 752ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:07.712-0400 m31100| 2015-07-09T14:16:07.712-0400 I SHARDING [conn132] request split points lookup for chunk db65.coll65 { : 17.0 } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:07.788-0400 m31100| 2015-07-09T14:16:07.785-0400 I QUERY [conn176] query db65.coll65 query: { query: { _id: ObjectId('559eba63eac5440bf8d3f369') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } cursorid:5254790184563 ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:1399802 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 } } } 241ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:07.827-0400 m31100| 2015-07-09T14:16:07.825-0400 I COMMAND [conn180] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 10.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:4199259 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 2004ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:07.863-0400 m31100| 2015-07-09T14:16:07.862-0400 I COMMAND [conn73] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 9.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:4199259 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 2262ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:08.039-0400 m31100| 2015-07-09T14:16:08.029-0400 I COMMAND [conn47] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 0.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:4199259 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 2453ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:08.058-0400 m31100| 2015-07-09T14:16:08.057-0400 I QUERY [conn176] query db65.coll65 query: { query: { _id: ObjectId('559eba63eac5440bf8d3f36d') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } cursorid:5254032174193 ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:4199162 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 } } } 202ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:08.067-0400 m31100| 2015-07-09T14:16:08.066-0400 I SHARDING [conn15] request split points lookup for chunk db65.coll65 { : 7.0 } -->> { : 12.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:08.076-0400 m31100| 2015-07-09T14:16:08.067-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 7.0 }, max: { tid: 12.0 }, from: "test-rs0", splitKeys: [ { tid: 8.0 }, { tid: 9.0 }, { tid: 11.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:08.077-0400 m31100| 2015-07-09T14:16:08.076-0400 I COMMAND [conn183] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 8.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 160.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 160.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:379 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 507ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:08.081-0400 m31100| 2015-07-09T14:16:08.079-0400 I QUERY [conn185] query db65.coll65 query: { query: { tid: 13.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:1 nscannedObjects:1 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:1399802 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } 149ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:08.082-0400 m31100| 2015-07-09T14:16:08.081-0400 I SHARDING [conn34] request split points lookup for chunk db65.coll65 { : 7.0 } -->> { : 12.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:08.084-0400 m31100| 2015-07-09T14:16:08.083-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 7.0 }, max: { tid: 12.0 }, from: "test-rs0", splitKeys: [ { tid: 8.0 }, { tid: 9.0 }, { tid: 11.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:08.084-0400 m31100| 2015-07-09T14:16:08.083-0400 I SHARDING [conn15] distributed lock 'db65.coll65/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eba68792e00bb67274a90 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:08.085-0400 m31100| 2015-07-09T14:16:08.083-0400 I SHARDING [conn15] remotely refreshing metadata for db65.coll65 based on current shard version 1|5||559eba62ca4787b9985d1ea0, current metadata version is 1|5||559eba62ca4787b9985d1ea0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:08.089-0400 m31100| 2015-07-09T14:16:08.088-0400 W SHARDING [conn34] could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db65.coll65 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:08.092-0400 m30999| 2015-07-09T14:16:08.089-0400 W SHARDING [conn423] splitChunk failed - cmd: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 7.0 }, max: { tid: 12.0 }, from: "test-rs0", splitKeys: [ { tid: 8.0 }, { tid: 9.0 }, { tid: 11.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:08.160-0400 m31100| 2015-07-09T14:16:08.158-0400 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1436464517000|1 } } cursorid:21764899830 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:3 reslen:4199534 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, oplog: { acquireCount: { r: 3 } } } 179ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:08.168-0400 m31100| 2015-07-09T14:16:08.167-0400 I COMMAND [conn181] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 1.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:4199259 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 2623ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:08.179-0400 m31100| 2015-07-09T14:16:08.175-0400 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1436464517000|1 } } cursorid:22174781017 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:3 reslen:4199534 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, oplog: { acquireCount: { r: 3 } } } 302ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:08.432-0400 m31100| 2015-07-09T14:16:08.431-0400 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1436464517000|1 } } cursorid:21764899830 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:4 reslen:7465612 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 134ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:09.182-0400 m31100| 2015-07-09T14:16:09.181-0400 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1436464517000|1 } } cursorid:21764899830 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:3 reslen:6998894 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, oplog: { acquireCount: { r: 3 } } } 672ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:09.354-0400 m31100| 2015-07-09T14:16:09.352-0400 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1436464517000|1 } } cursorid:22174781017 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:3 nreturned:4 reslen:7465612 locks:{ Global: { acquireCount: { r: 8 } }, Database: { acquireCount: { r: 4 } }, oplog: { acquireCount: { r: 4 } } } 1113ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:09.462-0400 m31100| 2015-07-09T14:16:09.456-0400 I COMMAND [conn188] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 12.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:4199259 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 2192ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:09.697-0400 m31100| 2015-07-09T14:16:09.696-0400 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1436464517000|1 } } cursorid:21764899830 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:4199218 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 370ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:09.738-0400 m31100| 2015-07-09T14:16:09.736-0400 I COMMAND [conn177] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 11.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:4199259 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 1960ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:10.057-0400 m31100| 2015-07-09T14:16:10.055-0400 I COMMAND [conn45] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 2.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:4199259 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 3955ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:10.084-0400 m31100| 2015-07-09T14:16:10.076-0400 I COMMAND [conn182] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 6.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:4199259 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 2700ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:10.217-0400 m31100| 2015-07-09T14:16:10.216-0400 I COMMAND [conn50] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 19.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:4199259 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 3180ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:10.224-0400 m31100| 2015-07-09T14:16:10.222-0400 I COMMAND [conn191] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 4.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:4199259 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 4054ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:10.414-0400 m31100| 2015-07-09T14:16:10.413-0400 I COMMAND [conn175] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 18.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:4199259 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 3178ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:10.510-0400 m31100| 2015-07-09T14:16:10.509-0400 I COMMAND [conn179] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 14.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:4199259 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 4727ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:10.517-0400 m31100| 2015-07-09T14:16:10.516-0400 I COMMAND [conn178] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 15.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:4199259 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 3150ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:10.573-0400 m31100| 2015-07-09T14:16:10.572-0400 I COMMAND [conn184] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 16.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:4199259 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 3483ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:10.576-0400 m31100| 2015-07-09T14:16:10.575-0400 I COMMAND [conn186] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 3.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:4199259 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 4317ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:10.596-0400 m31100| 2015-07-09T14:16:10.595-0400 I COMMAND [conn58] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 5.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:4199259 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 3287ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:10.771-0400 m31100| 2015-07-09T14:16:10.769-0400 I SHARDING [conn132] request split points lookup for chunk db65.coll65 { : 7.0 } -->> { : 12.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:10.772-0400 m31100| 2015-07-09T14:16:10.769-0400 I SHARDING [conn15] metadata of collection db65.coll65 already up to date (shard version : 1|5||559eba62ca4787b9985d1ea0, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:10.772-0400 m31100| 2015-07-09T14:16:10.769-0400 I SHARDING [conn15] splitChunk accepted at version 1|5||559eba62ca4787b9985d1ea0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:10.773-0400 m31100| 2015-07-09T14:16:10.769-0400 I COMMAND [conn132] command db65.coll65 command: splitVector { splitVector: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 7.0 }, max: { tid: 12.0 }, maxChunkSizeBytes: 13107200, maxSplitPoints: 0, maxChunkObjects: 250000 } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:177 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 10236122 } } } protocol:op_command 2678ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:10.773-0400 m31100| 2015-07-09T14:16:10.769-0400 I SHARDING [conn39] request split points lookup for chunk db65.coll65 { : 1.0 } -->> { : 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:10.773-0400 m31100| 2015-07-09T14:16:10.770-0400 I SHARDING [conn37] request split points lookup for chunk db65.coll65 { : 1.0 } -->> { : 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:10.774-0400 m31100| 2015-07-09T14:16:10.770-0400 I COMMAND [conn39] command db65.coll65 command: splitVector { splitVector: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 1.0 }, max: { tid: 7.0 }, maxChunkSizeBytes: 13107200, maxSplitPoints: 0, maxChunkObjects: 250000 } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:177 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 10033858 } } } protocol:op_command 2522ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:10.774-0400 m31100| 2015-07-09T14:16:10.770-0400 I SHARDING [conn36] request split points lookup for chunk db65.coll65 { : 7.0 } -->> { : 12.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:10.775-0400 m31100| 2015-07-09T14:16:10.770-0400 I COMMAND [conn37] command db65.coll65 command: splitVector { splitVector: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 1.0 }, max: { tid: 7.0 }, maxChunkSizeBytes: 13107200, maxSplitPoints: 0, maxChunkObjects: 250000 } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:177 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 485797 } } } protocol:op_command 486ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:10.775-0400 m31100| 2015-07-09T14:16:10.771-0400 I SHARDING [conn40] request split points lookup for chunk db65.coll65 { : 12.0 } -->> { : 17.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:10.776-0400 m31100| 2015-07-09T14:16:10.773-0400 I QUERY [conn43] killcursors db65.coll65 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 439422 } } } 441ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:10.777-0400 m31100| 2015-07-09T14:16:10.773-0400 I SHARDING [conn35] request split points lookup for chunk db65.coll65 { : 12.0 } -->> { : 17.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:10.777-0400 m31100| 2015-07-09T14:16:10.773-0400 I COMMAND [conn36] command db65.coll65 command: splitVector { splitVector: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 7.0 }, max: { tid: 12.0 }, maxChunkSizeBytes: 13107200, maxSplitPoints: 0, maxChunkObjects: 250000 } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:177 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 1318483 } } } protocol:op_command 821ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:10.778-0400 m31100| 2015-07-09T14:16:10.774-0400 I COMMAND [conn40] command db65.coll65 command: splitVector { splitVector: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 12.0 }, max: { tid: 17.0 }, maxChunkSizeBytes: 13107200, maxSplitPoints: 0, maxChunkObjects: 250000 } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:156 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 2697079 } } } protocol:op_command 1198ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:10.779-0400 m31100| 2015-07-09T14:16:10.778-0400 I SHARDING [conn34] request split points lookup for chunk db65.coll65 { : MinKey } -->> { : 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:10.779-0400 m31100| 2015-07-09T14:16:10.778-0400 I SHARDING [conn38] request split points lookup for chunk db65.coll65 { : 1.0 } -->> { : 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:10.780-0400 m31100| 2015-07-09T14:16:10.778-0400 I SHARDING [conn32] request split points lookup for chunk db65.coll65 { : 17.0 } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:10.780-0400 m31100| 2015-07-09T14:16:10.778-0400 I QUERY [conn74] killcursors db65.coll65 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 21609 } } } 26ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:10.782-0400 m31100| 2015-07-09T14:16:10.779-0400 I COMMAND [conn35] command db65.coll65 command: splitVector { splitVector: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 12.0 }, max: { tid: 17.0 }, maxChunkSizeBytes: 13107200, maxSplitPoints: 0, maxChunkObjects: 250000 } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:156 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 218743 } } } protocol:op_command 227ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:10.782-0400 m31100| 2015-07-09T14:16:10.779-0400 I SHARDING [conn132] received splitChunk request: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 7.0 }, max: { tid: 12.0 }, from: "test-rs0", splitKeys: [ { tid: 8.0 }, { tid: 9.0 }, { tid: 11.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:10.783-0400 m31100| 2015-07-09T14:16:10.779-0400 I COMMAND [conn34] command db65.coll65 command: splitVector { splitVector: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: 1.0 }, maxChunkSizeBytes: 13107200, maxSplitPoints: 0, maxChunkObjects: 250000 } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:114 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 10185933 } } } protocol:op_command 2680ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:10.783-0400 m31100| 2015-07-09T14:16:10.781-0400 I SHARDING [conn39] received splitChunk request: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 1.0 }, max: { tid: 7.0 }, from: "test-rs0", splitKeys: [ { tid: 2.0 }, { tid: 4.0 }, { tid: 6.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:10.784-0400 m31100| 2015-07-09T14:16:10.781-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 1.0 }, max: { tid: 7.0 }, from: "test-rs0", splitKeys: [ { tid: 2.0 }, { tid: 4.0 }, { tid: 6.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:10.786-0400 m31100| 2015-07-09T14:16:10.781-0400 I COMMAND [conn38] command db65.coll65 command: splitVector { splitVector: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 1.0 }, max: { tid: 7.0 }, maxChunkSizeBytes: 13107200, maxSplitPoints: 0, maxChunkObjects: 250000 } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:177 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 1031271 } } } protocol:op_command 537ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:10.786-0400 m31100| 2015-07-09T14:16:10.782-0400 I SHARDING [conn36] received splitChunk request: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 7.0 }, max: { tid: 12.0 }, from: "test-rs0", splitKeys: [ { tid: 8.0 }, { tid: 9.0 }, { tid: 11.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:10.787-0400 m31100| 2015-07-09T14:16:10.783-0400 I COMMAND [conn32] command db65.coll65 command: splitVector { splitVector: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 17.0 }, max: { tid: MaxKey }, maxChunkSizeBytes: 13107200, maxSplitPoints: 0, maxChunkObjects: 250000 } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:135 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 349090 } } } protocol:op_command 358ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:10.787-0400 m31100| 2015-07-09T14:16:10.784-0400 W SHARDING [conn132] could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db65.coll65 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:10.788-0400 m31100| 2015-07-09T14:16:10.785-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 12.0 }, max: { tid: 17.0 }, from: "test-rs0", splitKeys: [ { tid: 13.0 }, { tid: 15.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:10.788-0400 m31100| 2015-07-09T14:16:10.785-0400 W SHARDING [conn37] could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db65.coll65 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:10.792-0400 m30998| 2015-07-09T14:16:10.788-0400 W SHARDING [conn426] splitChunk failed - cmd: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 7.0 }, max: { tid: 12.0 }, from: "test-rs0", splitKeys: [ { tid: 8.0 }, { tid: 9.0 }, { tid: 11.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:10.794-0400 m31100| 2015-07-09T14:16:10.787-0400 W SHARDING [conn39] could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db65.coll65 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:10.794-0400 m31100| 2015-07-09T14:16:10.794-0400 W SHARDING [conn36] could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db65.coll65 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:10.797-0400 m30999| 2015-07-09T14:16:10.795-0400 W SHARDING [conn429] splitChunk failed - cmd: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 1.0 }, max: { tid: 7.0 }, from: "test-rs0", splitKeys: [ { tid: 2.0 }, { tid: 4.0 }, { tid: 6.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:10.798-0400 m31100| 2015-07-09T14:16:10.797-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 12.0 }, max: { tid: 17.0 }, from: "test-rs0", splitKeys: [ { tid: 13.0 }, { tid: 15.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:10.809-0400 m30998| 2015-07-09T14:16:10.798-0400 W SHARDING [conn423] splitChunk failed - cmd: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 1.0 }, max: { tid: 7.0 }, from: "test-rs0", splitKeys: [ { tid: 2.0 }, { tid: 4.0 }, { tid: 6.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:10.810-0400 m31100| 2015-07-09T14:16:10.804-0400 W SHARDING [conn40] could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db65.coll65 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:10.812-0400 m31100| 2015-07-09T14:16:10.811-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 1.0 }, max: { tid: 7.0 }, from: "test-rs0", splitKeys: [ { tid: 2.0 }, { tid: 4.0 }, { tid: 6.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:10.819-0400 m31100| 2015-07-09T14:16:10.812-0400 W SHARDING [conn35] could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db65.coll65 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:10.827-0400 m30999| 2015-07-09T14:16:10.812-0400 W SHARDING [conn424] splitChunk failed - cmd: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 12.0 }, max: { tid: 17.0 }, from: "test-rs0", splitKeys: [ { tid: 13.0 }, { tid: 15.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:10.836-0400 m30998| 2015-07-09T14:16:10.813-0400 W SHARDING [conn430] splitChunk failed - cmd: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 7.0 }, max: { tid: 12.0 }, from: "test-rs0", splitKeys: [ { tid: 8.0 }, { tid: 9.0 }, { tid: 11.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:10.837-0400 m30998| 2015-07-09T14:16:10.821-0400 W SHARDING [conn428] splitChunk failed - cmd: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 12.0 }, max: { tid: 17.0 }, from: "test-rs0", splitKeys: [ { tid: 13.0 }, { tid: 15.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:10.837-0400 m31100| 2015-07-09T14:16:10.822-0400 W SHARDING [conn38] could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db65.coll65 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:10.838-0400 m30999| 2015-07-09T14:16:10.825-0400 W SHARDING [conn428] splitChunk failed - cmd: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 1.0 }, max: { tid: 7.0 }, from: "test-rs0", splitKeys: [ { tid: 2.0 }, { tid: 4.0 }, { tid: 6.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:10.973-0400 m31100| 2015-07-09T14:16:10.964-0400 I QUERY [conn184] query db65.coll65 query: { query: { _id: ObjectId('559eba63eac5440bf8d3f36b') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } cursorid:5253327645594 ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:4199162 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 134231 } } } 185ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.017-0400 m31100| 2015-07-09T14:16:11.015-0400 I QUERY [conn191] query db65.coll65 query: { query: { _id: ObjectId('559eba63eac5440bf8d3f373') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } cursorid:5254860338681 ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:4199162 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 438847 } } } 245ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.047-0400 m31100| 2015-07-09T14:16:11.045-0400 I QUERY [conn58] query db65.coll65 query: { query: { _id: ObjectId('559eba63eac5440bf8d3f36f') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } cursorid:5253555423534 ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:4199162 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 19931 } } } 266ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.048-0400 m31100| 2015-07-09T14:16:11.045-0400 I COMMAND [conn49] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 7.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:4199259 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 3629ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.102-0400 m31100| 2015-07-09T14:16:11.101-0400 I QUERY [conn175] query db65.coll65 query: { query: { _id: ObjectId('559eba63eac5440bf8d3f375') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } cursorid:5253921304690 ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:4199162 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 244186 } } } 321ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.236-0400 m31100| 2015-07-09T14:16:11.234-0400 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1436464517000|1 } } cursorid:22174781017 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:3 reslen:6998894 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, oplog: { acquireCount: { r: 3 } } } 1778ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.304-0400 m31100| 2015-07-09T14:16:11.302-0400 I WRITE [conn31] insert db65.coll65 query: { _id: ObjectId('559eba68eac5440bf8d3f37b'), tid: 17.0, length: 1, findAndModify_update_grow: "x" } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 10185351 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } 3204ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.305-0400 m31100| 2015-07-09T14:16:11.303-0400 I COMMAND [conn31] command db65.$cmd command: insert { insert: "coll65", documents: [ { _id: ObjectId('559eba68eac5440bf8d3f37b'), tid: 17.0, length: 1, findAndModify_update_grow: "x" } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|5, ObjectId('559eba62ca4787b9985d1ea0') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 10185351 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 3204ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.352-0400 m31100| 2015-07-09T14:16:11.351-0400 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1436464517000|1 } } cursorid:21764899830 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:4199218 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 1615ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.369-0400 m31100| 2015-07-09T14:16:11.368-0400 I SHARDING [conn35] request split points lookup for chunk db65.coll65 { : 7.0 } -->> { : 12.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.390-0400 m31100| 2015-07-09T14:16:11.389-0400 I QUERY [conn179] query db65.coll65 query: { query: { _id: ObjectId('559eba63eac5440bf8d3f370') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } cursorid:5253953725840 ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:4199162 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 30552 } } } 604ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.391-0400 m31100| 2015-07-09T14:16:11.389-0400 I QUERY [conn43] killcursors db65.coll65 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 1080713 } } } 615ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.391-0400 m31100| 2015-07-09T14:16:11.390-0400 I QUERY [conn74] killcursors db65.coll65 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 1075510 } } } 610ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.393-0400 m31100| 2015-07-09T14:16:11.392-0400 I COMMAND [conn35] command db65.coll65 command: splitVector { splitVector: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 7.0 }, max: { tid: 12.0 }, maxChunkSizeBytes: 13107200, maxSplitPoints: 0, maxChunkObjects: 250000 } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:177 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 280283 } } } protocol:op_command 303ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.394-0400 m31100| 2015-07-09T14:16:11.392-0400 I QUERY [conn183] query db65.coll65 query: { query: { tid: 8.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:282 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 470826 } } } 612ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.394-0400 m31100| 2015-07-09T14:16:11.392-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 7.0 }, max: { tid: 12.0 }, from: "test-rs0", splitKeys: [ { tid: 8.0 }, { tid: 9.0 }, { tid: 11.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.398-0400 m31100| 2015-07-09T14:16:11.397-0400 W SHARDING [conn35] could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db65.coll65 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.399-0400 m30998| 2015-07-09T14:16:11.398-0400 W SHARDING [conn429] splitChunk failed - cmd: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 7.0 }, max: { tid: 12.0 }, from: "test-rs0", splitKeys: [ { tid: 8.0 }, { tid: 9.0 }, { tid: 11.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.410-0400 m31100| 2015-07-09T14:16:11.410-0400 I QUERY [conn186] query db65.coll65 query: { query: { _id: ObjectId('559eba63eac5440bf8d3f36c') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } cursorid:5254395767765 ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:4199162 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 1040650 } } } 629ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.501-0400 m31100| 2015-07-09T14:16:11.500-0400 I QUERY [conn181] query db65.coll65 query: { query: { _id: ObjectId('559eba63eac5440bf8d3f379') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } cursorid:5254827287192 ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:4199162 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 231213 } } } 120ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.506-0400 m31100| 2015-07-09T14:16:11.503-0400 I QUERY [conn58] query db65.coll65 query: { query: { _id: ObjectId('559eba63eac5440bf8d3f376') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } cursorid:5253317777870 ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:4199162 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 258289 } } } 133ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.537-0400 m31100| 2015-07-09T14:16:11.533-0400 I QUERY [conn45] query db65.coll65 query: { query: { _id: ObjectId('559eba63eac5440bf8d3f366') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } cursorid:5254509703086 ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:4199162 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 86508 } } } 146ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.554-0400 m31100| 2015-07-09T14:16:11.552-0400 I QUERY [conn184] query db65.coll65 query: { query: { _id: ObjectId('559eba63eac5440bf8d3f367') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } cursorid:5253522135901 ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:4199162 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 169669 } } } 144ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.646-0400 m31100| 2015-07-09T14:16:11.644-0400 I QUERY [conn175] query db65.coll65 query: { query: { _id: ObjectId('559eba63eac5440bf8d3f374') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } cursorid:5253055601392 ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:4199162 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 206232 } } } 257ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.691-0400 m31100| 2015-07-09T14:16:11.690-0400 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1436464517000|1 } } cursorid:21764899830 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:4199218 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, oplog: { acquireCount: { r: 1 } } } 227ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.724-0400 m31100| 2015-07-09T14:16:11.720-0400 I QUERY [conn178] query db65.coll65 query: { query: { _id: ObjectId('559eba63eac5440bf8d3f371') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } cursorid:5254760346586 ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:4199162 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 226545 } } } 341ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.727-0400 m31100| 2015-07-09T14:16:11.726-0400 I QUERY [conn43] killcursors db65.coll65 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 329383 } } } 336ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.727-0400 m31100| 2015-07-09T14:16:11.726-0400 I QUERY [conn74] killcursors db65.coll65 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 329791 } } } 336ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.727-0400 m31100| 2015-07-09T14:16:11.726-0400 I QUERY [conn191] query db65.coll65 query: { query: { _id: ObjectId('559eba63eac5440bf8d3f372') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } cursorid:5254495973672 ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:4199162 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 198873 } } } 340ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.757-0400 m31100| 2015-07-09T14:16:11.741-0400 I QUERY [conn177] query db65.coll65 query: { query: { tid: 17.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:119 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 392917 } } } 353ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.762-0400 m31100| 2015-07-09T14:16:11.761-0400 I WRITE [conn24] insert db65.coll65 query: { _id: ObjectId('559eba6beac5440bf8d3f37e'), tid: 5.0, length: 1, findAndModify_update_grow: "x" } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 279132 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } 673ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.763-0400 m31100| 2015-07-09T14:16:11.761-0400 I COMMAND [conn24] command db65.$cmd command: insert { insert: "coll65", documents: [ { _id: ObjectId('559eba6beac5440bf8d3f37e'), tid: 5.0, length: 1, findAndModify_update_grow: "x" } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|5, ObjectId('559eba62ca4787b9985d1ea0') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 279132 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 673ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.763-0400 m31100| 2015-07-09T14:16:11.761-0400 I WRITE [conn67] insert db65.coll65 query: { _id: ObjectId('559eba6beac5440bf8d3f383'), tid: 15.0, length: 1, findAndModify_update_grow: "x" } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 158689 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } 194ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.764-0400 m31100| 2015-07-09T14:16:11.761-0400 I COMMAND [conn67] command db65.$cmd command: insert { insert: "coll65", documents: [ { _id: ObjectId('559eba6beac5440bf8d3f383'), tid: 15.0, length: 1, findAndModify_update_grow: "x" } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|5, ObjectId('559eba62ca4787b9985d1ea0') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 158689 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 194ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.764-0400 m31100| 2015-07-09T14:16:11.762-0400 I COMMAND [conn183] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 8.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 4.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 4.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:859 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 328184 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 367ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.765-0400 m31100| 2015-07-09T14:16:11.762-0400 I WRITE [conn70] insert db65.coll65 query: { _id: ObjectId('559eba6beac5440bf8d3f37f'), tid: 18.0, length: 1, findAndModify_update_grow: "x" } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 229173 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } 606ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.766-0400 m31100| 2015-07-09T14:16:11.762-0400 I COMMAND [conn70] command db65.$cmd command: insert { insert: "coll65", documents: [ { _id: ObjectId('559eba6beac5440bf8d3f37f'), tid: 18.0, length: 1, findAndModify_update_grow: "x" } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|5, ObjectId('559eba62ca4787b9985d1ea0') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 229173 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 606ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.766-0400 m31100| 2015-07-09T14:16:11.763-0400 I WRITE [conn23] insert db65.coll65 query: { _id: ObjectId('559eba6beac5440bf8d3f37c'), tid: 14.0, length: 1, findAndModify_update_grow: "x" } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 312238 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } 709ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.767-0400 m31100| 2015-07-09T14:16:11.763-0400 I WRITE [conn29] insert db65.coll65 query: { _id: ObjectId('559eba6beac5440bf8d3f385'), tid: 0.0, length: 1, findAndModify_update_grow: "x" } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 111842 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } 149ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.767-0400 m31100| 2015-07-09T14:16:11.765-0400 I COMMAND [conn23] command db65.$cmd command: insert { insert: "coll65", documents: [ { _id: ObjectId('559eba6beac5440bf8d3f37c'), tid: 14.0, length: 1, findAndModify_update_grow: "x" } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|5, ObjectId('559eba62ca4787b9985d1ea0') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 312238 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 711ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.768-0400 m31100| 2015-07-09T14:16:11.765-0400 I COMMAND [conn29] command db65.$cmd command: insert { insert: "coll65", documents: [ { _id: ObjectId('559eba6beac5440bf8d3f385'), tid: 0.0, length: 1, findAndModify_update_grow: "x" } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|5, ObjectId('559eba62ca4787b9985d1ea0') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 111842 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 151ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.771-0400 m31100| 2015-07-09T14:16:11.769-0400 I WRITE [conn22] insert db65.coll65 query: { _id: ObjectId('559eba6beac5440bf8d3f384'), tid: 12.0, length: 1, findAndModify_update_grow: "x" } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 118725 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } 161ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.771-0400 m31100| 2015-07-09T14:16:11.769-0400 I COMMAND [conn22] command db65.$cmd command: insert { insert: "coll65", documents: [ { _id: ObjectId('559eba6beac5440bf8d3f384'), tid: 12.0, length: 1, findAndModify_update_grow: "x" } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|5, ObjectId('559eba62ca4787b9985d1ea0') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 118725 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 162ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.779-0400 m31100| 2015-07-09T14:16:11.775-0400 I WRITE [conn31] insert db65.coll65 query: { _id: ObjectId('559eba6beac5440bf8d3f381'), tid: 3.0, length: 1, findAndModify_update_grow: "x" } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 259488 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } 309ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.780-0400 m31100| 2015-07-09T14:16:11.776-0400 I WRITE [conn144] insert db65.coll65 query: { _id: ObjectId('559eba6beac5440bf8d3f37d'), tid: 4.0, length: 1, findAndModify_update_grow: "x" } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 279234 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } 687ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.780-0400 m31100| 2015-07-09T14:16:11.776-0400 I COMMAND [conn31] command db65.$cmd command: insert { insert: "coll65", documents: [ { _id: ObjectId('559eba6beac5440bf8d3f381'), tid: 3.0, length: 1, findAndModify_update_grow: "x" } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|5, ObjectId('559eba62ca4787b9985d1ea0') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 259488 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 310ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.790-0400 m31100| 2015-07-09T14:16:11.785-0400 I COMMAND [conn144] command db65.$cmd command: insert { insert: "coll65", documents: [ { _id: ObjectId('559eba6beac5440bf8d3f37d'), tid: 4.0, length: 1, findAndModify_update_grow: "x" } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|5, ObjectId('559eba62ca4787b9985d1ea0') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 279234 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 697ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.790-0400 m31100| 2015-07-09T14:16:11.785-0400 I WRITE [conn68] insert db65.coll65 query: { _id: ObjectId('559eba6beac5440bf8d3f380'), tid: 16.0, length: 1, findAndModify_update_grow: "x" } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 280542 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } 339ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.791-0400 m31100| 2015-07-09T14:16:11.786-0400 I COMMAND [conn68] command db65.$cmd command: insert { insert: "coll65", documents: [ { _id: ObjectId('559eba6beac5440bf8d3f380'), tid: 16.0, length: 1, findAndModify_update_grow: "x" } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|5, ObjectId('559eba62ca4787b9985d1ea0') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 280542 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 340ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.814-0400 m31100| 2015-07-09T14:16:11.813-0400 I WRITE [conn147] insert db65.coll65 query: { _id: ObjectId('559eba6beac5440bf8d3f382'), tid: 19.0, length: 1, findAndModify_update_grow: "x" } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 172969 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } 256ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.815-0400 m31100| 2015-07-09T14:16:11.813-0400 I COMMAND [conn147] command db65.$cmd command: insert { insert: "coll65", documents: [ { _id: ObjectId('559eba6beac5440bf8d3f382'), tid: 19.0, length: 1, findAndModify_update_grow: "x" } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|5, ObjectId('559eba62ca4787b9985d1ea0') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 172969 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 257ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.826-0400 m31100| 2015-07-09T14:16:11.825-0400 I QUERY [conn73] query db65.coll65 query: { query: { _id: ObjectId('559eba63eac5440bf8d3f36e') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } cursorid:5253302275210 ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:4199162 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 328823 } } } 105ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.843-0400 m31100| 2015-07-09T14:16:11.833-0400 I WRITE [conn30] insert db65.coll65 query: { _id: ObjectId('559eba6beac5440bf8d3f386'), tid: 6.0, length: 1, findAndModify_update_grow: "x" } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 67797 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } 163ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.844-0400 m31100| 2015-07-09T14:16:11.834-0400 I COMMAND [conn30] command db65.$cmd command: insert { insert: "coll65", documents: [ { _id: ObjectId('559eba6beac5440bf8d3f386'), tid: 6.0, length: 1, findAndModify_update_grow: "x" } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|5, ObjectId('559eba62ca4787b9985d1ea0') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 67797 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 164ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.875-0400 m31100| 2015-07-09T14:16:11.874-0400 I SHARDING [conn38] request split points lookup for chunk db65.coll65 { : 17.0 } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.875-0400 m31100| 2015-07-09T14:16:11.874-0400 I QUERY [conn74] killcursors db65.coll65 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 147352 } } } 147ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.876-0400 m31100| 2015-07-09T14:16:11.874-0400 I QUERY [conn43] killcursors db65.coll65 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 147473 } } } 148ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.884-0400 m31100| 2015-07-09T14:16:11.875-0400 I COMMAND [conn38] command db65.coll65 command: splitVector { splitVector: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 17.0 }, max: { tid: MaxKey }, maxChunkSizeBytes: 13107200, maxSplitPoints: 0, maxChunkObjects: 250000 } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:156 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 108601 } } } protocol:op_command 109ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.888-0400 m31100| 2015-07-09T14:16:11.885-0400 I WRITE [conn67] insert db65.coll65 query: { _id: ObjectId('559eba6beac5440bf8d3f387'), tid: 11.0, length: 1, findAndModify_update_grow: "x" } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 100442 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } 110ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.890-0400 m31100| 2015-07-09T14:16:11.885-0400 I COMMAND [conn67] command db65.$cmd command: insert { insert: "coll65", documents: [ { _id: ObjectId('559eba6beac5440bf8d3f387'), tid: 11.0, length: 1, findAndModify_update_grow: "x" } ], ordered: true, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|5, ObjectId('559eba62ca4787b9985d1ea0') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 100442 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 111ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.890-0400 m31100| 2015-07-09T14:16:11.888-0400 I QUERY [conn181] query db65.coll65 query: { query: { _id: ObjectId('559eba63eac5440bf8d3f377') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } cursorid:5254275115541 ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:4199162 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 174861 } } } 158ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:11.918-0400 m31100| 2015-07-09T14:16:11.916-0400 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1436464517000|1 } } cursorid:21764899830 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:4199218 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, oplog: { acquireCount: { r: 1 } } } 149ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.009-0400 m31100| 2015-07-09T14:16:11.993-0400 I QUERY [conn74] killcursors db65.coll65 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 116604 } } } 118ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.009-0400 m31100| 2015-07-09T14:16:11.995-0400 I QUERY [conn43] killcursors db65.coll65 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 116550 } } } 120ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.010-0400 m31100| 2015-07-09T14:16:12.003-0400 I QUERY [conn188] query db65.coll65 query: { query: { tid: 6.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:119 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 133562 } } } 127ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.010-0400 m31100| 2015-07-09T14:16:12.004-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 17.0 }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 18.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.011-0400 m31100| 2015-07-09T14:16:12.004-0400 I QUERY [conn186] query db65.coll65 query: { query: { tid: 5.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:119 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 213438 } } } 119ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.029-0400 m31100| 2015-07-09T14:16:12.023-0400 I QUERY [conn45] query db65.coll65 query: { query: { _id: ObjectId('559eba67eac5440bf8d3f37a') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:762 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 211154 } } } 145ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.046-0400 m31100| 2015-07-09T14:16:12.044-0400 W SHARDING [conn38] could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db65.coll65 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.047-0400 m30999| 2015-07-09T14:16:12.045-0400 W SHARDING [conn431] splitChunk failed - cmd: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 17.0 }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 18.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.055-0400 m31100| 2015-07-09T14:16:12.053-0400 I QUERY [conn191] query db65.coll65 query: { query: { tid: 4.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:119 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 210804 } } } 154ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.080-0400 m31100| 2015-07-09T14:16:12.076-0400 I QUERY [conn74] killcursors db65.coll65 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 59388 } } } 82ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.082-0400 m31100| 2015-07-09T14:16:12.081-0400 I QUERY [conn43] killcursors db65.coll65 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 52387 } } } 85ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.085-0400 m31100| 2015-07-09T14:16:12.083-0400 I COMMAND [conn177] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 17.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 160.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 160.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:379 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 }, acquireWaitCount: { w: 3 }, timeAcquiringMicros: { w: 243748 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 313ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.089-0400 m31100| 2015-07-09T14:16:12.088-0400 I QUERY [conn184] query db65.coll65 query: { query: { tid: 12.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:119 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 }, acquireWaitCount: { r: 3 }, timeAcquiringMicros: { r: 238268 } } } 213ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.090-0400 m31100| 2015-07-09T14:16:12.088-0400 I QUERY [conn179] query db65.coll65 query: { query: { tid: 16.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:119 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 }, acquireWaitCount: { r: 3 }, timeAcquiringMicros: { r: 203672 } } } 201ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.090-0400 m31100| 2015-07-09T14:16:12.089-0400 I QUERY [conn58] query db65.coll65 query: { query: { tid: 15.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:119 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 }, acquireWaitCount: { r: 3 }, timeAcquiringMicros: { r: 250231 } } } 203ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.091-0400 m31100| 2015-07-09T14:16:12.090-0400 I QUERY [conn74] killcursors db65.coll65 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 1673 } } } 14ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.096-0400 m31100| 2015-07-09T14:16:12.090-0400 I COMMAND [conn183] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 0.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 160.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 160.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:379 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 }, acquireWaitCount: { w: 2 }, timeAcquiringMicros: { w: 118035 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 193ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.097-0400 m31100| 2015-07-09T14:16:12.090-0400 I COMMAND [conn50] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 19.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 160.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 160.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:379 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 }, acquireWaitCount: { w: 2 }, timeAcquiringMicros: { w: 127665 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 190ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.107-0400 m31100| 2015-07-09T14:16:12.105-0400 I COMMAND [conn178] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 3.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 160.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 160.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:379 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 }, acquireWaitCount: { w: 2 }, timeAcquiringMicros: { w: 126574 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 185ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.145-0400 m31100| 2015-07-09T14:16:12.133-0400 I QUERY [conn43] killcursors db65.coll65 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 14336 } } } 51ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.145-0400 m31100| 2015-07-09T14:16:12.136-0400 I QUERY [conn74] killcursors db65.coll65 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 14467 } } } 45ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.197-0400 m31100| 2015-07-09T14:16:12.196-0400 I COMMAND [conn176] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 13.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:4199259 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 7414251 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 3904ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.214-0400 m31100| 2015-07-09T14:16:12.213-0400 I QUERY [conn175] query db65.coll65 query: { query: { tid: 14.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:119 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 102291 } } } 326ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.231-0400 m31100| 2015-07-09T14:16:12.230-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:12.230-0400-559eba6c792e00bb67274a91", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436465772230), what: "multi-split", ns: "db65.coll65", details: { before: { min: { tid: 7.0 }, max: { tid: 12.0 } }, number: 1, of: 4, chunk: { min: { tid: 7.0 }, max: { tid: 8.0 }, lastmod: Timestamp 1000|6, lastmodEpoch: ObjectId('559eba62ca4787b9985d1ea0') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.307-0400 m31100| 2015-07-09T14:16:12.303-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:12.303-0400-559eba6c792e00bb67274a92", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436465772303), what: "multi-split", ns: "db65.coll65", details: { before: { min: { tid: 7.0 }, max: { tid: 12.0 } }, number: 2, of: 4, chunk: { min: { tid: 8.0 }, max: { tid: 9.0 }, lastmod: Timestamp 1000|7, lastmodEpoch: ObjectId('559eba62ca4787b9985d1ea0') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.311-0400 m31100| 2015-07-09T14:16:12.309-0400 I COMMAND [conn50] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 5.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 4.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 4.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:859 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 14329 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 101ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.332-0400 m31100| 2015-07-09T14:16:12.331-0400 I COMMAND [conn188] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 14.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 160.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 160.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:379 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 14019 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 115ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.352-0400 m31100| 2015-07-09T14:16:12.350-0400 I QUERY [conn179] query db65.coll65 query: { query: { tid: 12.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:282 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 15254 } } } 118ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.353-0400 m31100| 2015-07-09T14:16:12.350-0400 I QUERY [conn181] query db65.coll65 query: { query: { tid: 11.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:282 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 12374 } } } 118ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.354-0400 m31100| 2015-07-09T14:16:12.351-0400 I QUERY [conn58] query db65.coll65 query: { query: { tid: 15.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:282 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 14557 } } } 119ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.355-0400 m31100| 2015-07-09T14:16:12.352-0400 I QUERY [conn184] query db65.coll65 query: { query: { tid: 18.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:282 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 14323 } } } 120ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.356-0400 m31100| 2015-07-09T14:16:12.352-0400 I COMMAND [conn178] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 3.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 4.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 4.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:859 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 112ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.357-0400 m31100| 2015-07-09T14:16:12.352-0400 I COMMAND [conn183] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 0.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 4.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 4.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:859 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 13939 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 133ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.357-0400 m31100| 2015-07-09T14:16:12.354-0400 I QUERY [conn175] query db65.coll65 query: { query: { tid: 8.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:2042 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 14719 } } } 119ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.357-0400 m31100| 2015-07-09T14:16:12.356-0400 I QUERY [conn45] query db65.coll65 query: { query: { tid: 16.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:282 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } 121ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.358-0400 m31100| 2015-07-09T14:16:12.358-0400 I SHARDING [conn35] request split points lookup for chunk db65.coll65 { : 12.0 } -->> { : 17.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.359-0400 m31100| 2015-07-09T14:16:12.358-0400 I COMMAND [conn47] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 2.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 4.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 4.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:859 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 14529 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 141ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.360-0400 m31100| 2015-07-09T14:16:12.360-0400 I COMMAND [conn177] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 17.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 4.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 4.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:859 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 12845 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 141ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.362-0400 m31100| 2015-07-09T14:16:12.361-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:12.361-0400-559eba6c792e00bb67274a93", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436465772361), what: "multi-split", ns: "db65.coll65", details: { before: { min: { tid: 7.0 }, max: { tid: 12.0 } }, number: 3, of: 4, chunk: { min: { tid: 9.0 }, max: { tid: 11.0 }, lastmod: Timestamp 1000|8, lastmodEpoch: ObjectId('559eba62ca4787b9985d1ea0') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.362-0400 m31100| 2015-07-09T14:16:12.362-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 12.0 }, max: { tid: 17.0 }, from: "test-rs0", splitKeys: [ { tid: 13.0 }, { tid: 15.0 }, { tid: 16.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.364-0400 m31100| 2015-07-09T14:16:12.363-0400 W SHARDING [conn35] could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db65.coll65 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.366-0400 m30998| 2015-07-09T14:16:12.364-0400 W SHARDING [conn425] splitChunk failed - cmd: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 12.0 }, max: { tid: 17.0 }, from: "test-rs0", splitKeys: [ { tid: 13.0 }, { tid: 15.0 }, { tid: 16.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.382-0400 m31100| 2015-07-09T14:16:12.379-0400 I COMMAND [conn182] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 6.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 4.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 4.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:859 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 136ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.419-0400 m31100| 2015-07-09T14:16:12.415-0400 I SHARDING [conn15] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:12.415-0400-559eba6c792e00bb67274a94", server: "bs-osx108-8", clientAddr: "127.0.0.1:62566", time: new Date(1436465772415), what: "multi-split", ns: "db65.coll65", details: { before: { min: { tid: 7.0 }, max: { tid: 12.0 } }, number: 4, of: 4, chunk: { min: { tid: 11.0 }, max: { tid: 12.0 }, lastmod: Timestamp 1000|9, lastmodEpoch: ObjectId('559eba62ca4787b9985d1ea0') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.434-0400 m31100| 2015-07-09T14:16:12.433-0400 I COMMAND [conn191] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 4.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 4.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 4.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:859 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 197ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.446-0400 m31100| 2015-07-09T14:16:12.444-0400 I COMMAND [conn186] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 1.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 4.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 4.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:859 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 137ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.460-0400 m31100| 2015-07-09T14:16:12.460-0400 I COMMAND [conn178] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 15.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 4.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 4.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:859 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 103ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.470-0400 m31100| 2015-07-09T14:16:12.469-0400 I COMMAND [conn188] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 8.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:5979 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 102ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.475-0400 m31100| 2015-07-09T14:16:12.472-0400 I QUERY [conn50] query db65.coll65 query: { query: { tid: 5.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:762 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 130ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.489-0400 m31100| 2015-07-09T14:16:12.488-0400 I SHARDING [conn15] distributed lock 'db65.coll65/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.490-0400 m31100| 2015-07-09T14:16:12.488-0400 I COMMAND [conn15] command db65.coll65 command: splitChunk { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 7.0 }, max: { tid: 12.0 }, from: "test-rs0", splitKeys: [ { tid: 8.0 }, { tid: 9.0 }, { tid: 11.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 4, w: 2 } }, Database: { acquireCount: { r: 1, w: 2 } }, Collection: { acquireCount: { r: 1, W: 2 }, acquireWaitCount: { W: 2 }, timeAcquiringMicros: { W: 13157721 } } } protocol:op_command 4421ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.490-0400 m30999| 2015-07-09T14:16:12.489-0400 I SHARDING [conn427] ChunkManager: time to load chunks for db65.coll65: 0ms sequenceNumber: 287 version: 1|9||559eba62ca4787b9985d1ea0 based on: 1|5||559eba62ca4787b9985d1ea0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.506-0400 m30999| 2015-07-09T14:16:12.504-0400 I SHARDING [conn427] autosplitted db65.coll65 shard: ns: db65.coll65, shard: test-rs0, lastmod: 1|3||000000000000000000000000, min: { tid: 7.0 }, max: { tid: 12.0 } into 4 (splitThreshold 13107200) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.529-0400 m31100| 2015-07-09T14:16:12.516-0400 I QUERY [conn73] query db65.coll65 query: { query: { tid: 9.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:762 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 139ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.529-0400 m31100| 2015-07-09T14:16:12.521-0400 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1436464517000|1 } } cursorid:22174781017 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:2 reslen:5599056 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, oplog: { acquireCount: { r: 2 } } } 475ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.533-0400 m31100| 2015-07-09T14:16:12.523-0400 I QUERY [conn177] query db65.coll65 query: { query: { tid: 3.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:762 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 146ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.540-0400 m31100| 2015-07-09T14:16:12.523-0400 I COMMAND [conn181] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 7.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 4.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 4.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:859 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 162ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.563-0400 m31100| 2015-07-09T14:16:12.529-0400 I QUERY [conn179] query db65.coll65 query: { query: { tid: 0.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:762 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 147ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.565-0400 m31100| 2015-07-09T14:16:12.558-0400 I QUERY [conn175] query db65.coll65 query: { query: { tid: 14.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:282 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 173ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.567-0400 m31100| 2015-07-09T14:16:12.563-0400 I COMMAND [conn183] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 12.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 4.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 4.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:859 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 204ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.571-0400 m31100| 2015-07-09T14:16:12.564-0400 I COMMAND [conn49] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 11.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 4.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 4.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:859 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 205ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.576-0400 m31100| 2015-07-09T14:16:12.564-0400 I COMMAND [conn182] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 2.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:2139 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 124ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.605-0400 m31100| 2015-07-09T14:16:12.567-0400 I COMMAND [conn58] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 17.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:2139 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 114ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.606-0400 m31100| 2015-07-09T14:16:12.567-0400 I COMMAND [conn45] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 6.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:2139 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 113ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.607-0400 m31100| 2015-07-09T14:16:12.574-0400 I QUERY [conn184] query db65.coll65 query: { query: { tid: 4.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:762 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 120ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.607-0400 m31100| 2015-07-09T14:16:12.599-0400 I COMMAND [conn47] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 16.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 4.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 4.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:859 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 230ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.710-0400 m31100| 2015-07-09T14:16:12.702-0400 I QUERY [conn179] query db65.coll65 query: { query: { tid: 0.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:2042 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 128ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.735-0400 m31100| 2015-07-09T14:16:12.728-0400 I QUERY [conn49] query db65.coll65 query: { query: { tid: 5.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:2042 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 155ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.736-0400 m31100| 2015-07-09T14:16:12.731-0400 I QUERY [conn47] query db65.coll65 query: { query: { tid: 6.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:2042 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 118ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.736-0400 m31100| 2015-07-09T14:16:12.734-0400 I QUERY [conn45] query db65.coll65 query: { query: { tid: 18.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:2042 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 116ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.757-0400 m31100| 2015-07-09T14:16:12.735-0400 I QUERY [conn182] query db65.coll65 query: { query: { tid: 2.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:2042 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 133ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.757-0400 m31100| 2015-07-09T14:16:12.736-0400 I QUERY [conn73] query db65.coll65 query: { query: { tid: 11.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:762 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 161ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.758-0400 m31100| 2015-07-09T14:16:12.757-0400 I COMMAND [conn183] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 4.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:2139 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 148ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.768-0400 m31100| 2015-07-09T14:16:12.767-0400 I QUERY [conn50] query db65.coll65 query: { query: { tid: 7.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:762 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 199ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.798-0400 m31100| 2015-07-09T14:16:12.796-0400 I QUERY [conn181] query db65.coll65 query: { query: { tid: 15.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:2042 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 119ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.800-0400 m31100| 2015-07-09T14:16:12.799-0400 I COMMAND [conn185] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 19.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:5979 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 232ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.809-0400 m31100| 2015-07-09T14:16:12.803-0400 I QUERY [conn186] query db65.coll65 query: { query: { tid: 1.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:762 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 347ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.821-0400 m31100| 2015-07-09T14:16:12.820-0400 I QUERY [conn191] query db65.coll65 query: { query: { tid: 16.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:762 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 203ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.825-0400 m31100| 2015-07-09T14:16:12.822-0400 I QUERY [conn58] query db65.coll65 query: { query: { tid: 3.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:2042 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 205ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.826-0400 m31100| 2015-07-09T14:16:12.825-0400 I QUERY [conn175] query db65.coll65 query: { query: { tid: 12.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:762 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 247ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.828-0400 m31100| 2015-07-09T14:16:12.826-0400 I QUERY [conn180] query db65.coll65 query: { query: { _id: ObjectId('559eba63eac5440bf8d3f378') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } cursorid:5253305107626 ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:4199162 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 } } } 208ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.830-0400 m31100| 2015-07-09T14:16:12.829-0400 I COMMAND [conn178] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 9.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:5979 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 227ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.834-0400 m31100| 2015-07-09T14:16:12.831-0400 I COMMAND [conn179] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 0.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:5979 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 125ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.835-0400 m31100| 2015-07-09T14:16:12.831-0400 I COMMAND [conn177] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 17.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:5979 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 142ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.838-0400 m31100| 2015-07-09T14:16:12.836-0400 I COMMAND [conn49] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 5.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:5979 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 101ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.866-0400 m31100| 2015-07-09T14:16:12.864-0400 I COMMAND [conn47] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 6.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:5979 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 131ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.918-0400 m31100| 2015-07-09T14:16:12.911-0400 I QUERY [conn176] query db65.coll65 query: { query: { _id: ObjectId('559eba63eac5440bf8d3f369') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } cursorid:5254192199844 ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:4199162 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 } } } 479ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.940-0400 m31100| 2015-07-09T14:16:12.939-0400 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1436464517000|1 } } cursorid:21764899830 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:4199218 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, oplog: { acquireCount: { r: 1 } } } 269ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.967-0400 m31100| 2015-07-09T14:16:12.962-0400 I QUERY [conn181] query db65.coll65 query: { query: { tid: 19.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:5882 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 140ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.982-0400 m31100| 2015-07-09T14:16:12.962-0400 I QUERY [conn178] query db65.coll65 query: { query: { tid: 11.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:2042 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 126ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.982-0400 m31100| 2015-07-09T14:16:12.981-0400 I QUERY [conn177] query db65.coll65 query: { query: { tid: 17.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:5882 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 136ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.996-0400 m31100| 2015-07-09T14:16:12.985-0400 I QUERY [conn184] query db65.coll65 query: { query: { tid: 0.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:5882 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 147ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.997-0400 m31100| 2015-07-09T14:16:12.994-0400 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1436464517000|1 } } cursorid:22174781017 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:4199218 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, oplog: { acquireCount: { r: 1 } } } 395ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:12.999-0400 m31100| 2015-07-09T14:16:12.998-0400 I COMMAND [conn182] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 4.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:5979 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 120ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.001-0400 m31100| 2015-07-09T14:16:12.999-0400 I COMMAND [conn45] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 18.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:5979 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 252ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.001-0400 m31100| 2015-07-09T14:16:12.999-0400 I QUERY [conn49] query db65.coll65 query: { query: { tid: 5.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:5882 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 157ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.002-0400 m31100| 2015-07-09T14:16:13.002-0400 I QUERY [conn73] query db65.coll65 query: { query: { tid: 9.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:5882 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 153ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.026-0400 m31100| 2015-07-09T14:16:13.025-0400 I COMMAND [conn191] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 16.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:2139 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 199ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.042-0400 m31100| 2015-07-09T14:16:13.041-0400 I QUERY [conn186] query db65.coll65 query: { query: { tid: 1.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:2042 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 134ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.043-0400 m31100| 2015-07-09T14:16:13.042-0400 I COMMAND [conn58] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 3.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:5979 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 214ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.045-0400 m31100| 2015-07-09T14:16:13.044-0400 I COMMAND [conn185] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 15.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:5979 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 234ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.045-0400 m31100| 2015-07-09T14:16:13.045-0400 I QUERY [conn50] query db65.coll65 query: { query: { tid: 7.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:2042 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 179ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.053-0400 m31100| 2015-07-09T14:16:13.052-0400 I QUERY [conn188] query db65.coll65 query: { query: { tid: 6.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:5882 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 149ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.065-0400 m31100| 2015-07-09T14:16:13.064-0400 I QUERY [conn179] query db65.coll65 query: { query: { tid: 2.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:5882 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 226ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.066-0400 m31100| 2015-07-09T14:16:13.066-0400 I QUERY [conn183] query db65.coll65 query: { query: { tid: 14.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:2042 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 228ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.072-0400 m31100| 2015-07-09T14:16:13.071-0400 I QUERY [conn175] query db65.coll65 query: { query: { tid: 12.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:2042 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } 159ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.084-0400 m31100| 2015-07-09T14:16:13.083-0400 I QUERY [conn180] query db65.coll65 query: { query: { tid: 10.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:119 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } 101ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.176-0400 m31100| 2015-07-09T14:16:13.175-0400 I COMMAND [conn45] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 18.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:17499 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 115ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.186-0400 m31100| 2015-07-09T14:16:13.180-0400 I QUERY [conn178] query db65.coll65 query: { query: { tid: 15.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:5882 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } 108ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.203-0400 m31100| 2015-07-09T14:16:13.189-0400 I COMMAND [conn179] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 6.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:17499 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 105ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.204-0400 m31100| 2015-07-09T14:16:13.204-0400 I QUERY [conn191] query db65.coll65 query: { query: { tid: 8.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:51962 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 109ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.212-0400 m31100| 2015-07-09T14:16:13.210-0400 I COMMAND [conn73] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 5.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:17499 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 190ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.216-0400 m31100| 2015-07-09T14:16:13.215-0400 I COMMAND [conn180] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 16.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:5979 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 120ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.231-0400 m31100| 2015-07-09T14:16:13.221-0400 I QUERY [conn10] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1436464517000|1 } } cursorid:21764899830 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:2 reslen:4199366 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, oplog: { acquireCount: { r: 1 } } } 177ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.239-0400 m31100| 2015-07-09T14:16:13.225-0400 I COMMAND [conn182] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 10.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 160.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 160.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:379 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 123ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.240-0400 m31100| 2015-07-09T14:16:13.230-0400 I QUERY [conn58] query db65.coll65 query: { query: { tid: 9.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:17402 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } 127ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.240-0400 m31100| 2015-07-09T14:16:13.231-0400 I QUERY [conn49] query db65.coll65 query: { query: { tid: 1.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:5882 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 126ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.241-0400 m31100| 2015-07-09T14:16:13.233-0400 I COMMAND [conn188] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 2.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:17499 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 150ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.245-0400 m31100| 2015-07-09T14:16:13.244-0400 I COMMAND [conn47] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 4.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:17499 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 141ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.265-0400 m31100| 2015-07-09T14:16:13.264-0400 I COMMAND [conn186] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 19.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:17499 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 191ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.266-0400 m31100| 2015-07-09T14:16:13.265-0400 I COMMAND [conn175] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 14.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:5979 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 182ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.269-0400 m31100| 2015-07-09T14:16:13.269-0400 I COMMAND [conn176] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 13.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 160.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 160.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:379 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 132ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.297-0400 m31100| 2015-07-09T14:16:13.292-0400 I COMMAND [conn177] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 11.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:17499 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 108ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.365-0400 m31100| 2015-07-09T14:16:13.364-0400 I COMMAND [conn49] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 1.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:17499 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 123ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.378-0400 m31100| 2015-07-09T14:16:13.376-0400 I COMMAND [conn184] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 0.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:52059 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 163ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.387-0400 m31100| 2015-07-09T14:16:13.386-0400 I QUERY [conn175] query db65.coll65 query: { query: { tid: 4.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:17402 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } 119ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.393-0400 m31100| 2015-07-09T14:16:13.392-0400 I QUERY [conn45] query db65.coll65 query: { query: { tid: 6.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:17402 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 159ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.407-0400 m31100| 2015-07-09T14:16:13.405-0400 I COMMAND [conn179] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 16.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:17499 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 136ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.408-0400 m31100| 2015-07-09T14:16:13.405-0400 I COMMAND [conn181] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 7.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:17499 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 158ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.412-0400 m31100| 2015-07-09T14:16:13.408-0400 I COMMAND [conn58] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 9.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:52059 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 164ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.450-0400 m31100| 2015-07-09T14:16:13.449-0400 I QUERY [conn178] query db65.coll65 query: { query: { tid: 11.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:17402 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 113ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.468-0400 m31100| 2015-07-09T14:16:13.467-0400 I COMMAND [conn188] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 2.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:52059 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 107ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.486-0400 m31100| 2015-07-09T14:16:13.486-0400 I QUERY [conn183] query db65.coll65 query: { query: { tid: 18.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:51962 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 135ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.508-0400 m31100| 2015-07-09T14:16:13.507-0400 I COMMAND [conn73] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 13.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 4.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 4.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:859 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 110ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.509-0400 m31100| 2015-07-09T14:16:13.507-0400 I COMMAND [conn45] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 4.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:52059 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 108ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.516-0400 m31100| 2015-07-09T14:16:13.516-0400 I QUERY [conn180] query db65.coll65 query: { query: { tid: 14.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:17402 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 101ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.520-0400 m31100| 2015-07-09T14:16:13.518-0400 I QUERY [conn50] query db65.coll65 query: { query: { tid: 1.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:17402 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 114ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.521-0400 m31100| 2015-07-09T14:16:13.520-0400 I COMMAND [conn47] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 10.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 4.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 4.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:859 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 161ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.530-0400 m31100| 2015-07-09T14:16:13.527-0400 I QUERY [conn175] query db65.coll65 query: { query: { tid: 0.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:51962 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } 119ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.536-0400 m31100| 2015-07-09T14:16:13.533-0400 I COMMAND [conn186] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 15.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:52059 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 134ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.549-0400 m31100| 2015-07-09T14:16:13.548-0400 I QUERY [conn181] query db65.coll65 query: { query: { tid: 7.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:17402 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 138ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.582-0400 m31100| 2015-07-09T14:16:13.581-0400 I COMMAND [conn176] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 3.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:52059 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 215ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.591-0400 m31100| 2015-07-09T14:16:13.590-0400 I QUERY [conn188] query db65.coll65 query: { query: { tid: 2.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:51962 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 113ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.594-0400 m31100| 2015-07-09T14:16:13.594-0400 I COMMAND [conn184] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 6.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:52059 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 187ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.596-0400 m31100| 2015-07-09T14:16:13.596-0400 I COMMAND [conn191] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 12.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:52059 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 286ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.599-0400 m31100| 2015-07-09T14:16:13.598-0400 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1436464517000|1 } } cursorid:22174781017 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:4199218 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, oplog: { acquireCount: { r: 1 } } } 247ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.630-0400 m31100| 2015-07-09T14:16:13.629-0400 I COMMAND [conn50] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 1.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:52059 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 104ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.651-0400 m31100| 2015-07-09T14:16:13.644-0400 I QUERY [conn45] query db65.coll65 query: { query: { tid: 10.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:762 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 104ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.673-0400 m31100| 2015-07-09T14:16:13.663-0400 I QUERY [conn178] query db65.coll65 query: { query: { tid: 11.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:51962 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 118ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.708-0400 m31100| 2015-07-09T14:16:13.704-0400 I QUERY [conn58] query db65.coll65 query: { query: { tid: 15.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:51962 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 160ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.709-0400 m31100| 2015-07-09T14:16:13.707-0400 I COMMAND [conn180] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 14.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:52059 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 174ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.722-0400 m31100| 2015-07-09T14:16:13.721-0400 I COMMAND [conn182] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 8.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:155739 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 469ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.731-0400 m31100| 2015-07-09T14:16:13.731-0400 I QUERY [conn183] query db65.coll65 query: { query: { tid: 6.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:51962 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 130ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.754-0400 m31100| 2015-07-09T14:16:13.753-0400 I QUERY [conn181] query db65.coll65 query: { query: { tid: 7.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:51962 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 147ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.755-0400 m31100| 2015-07-09T14:16:13.754-0400 I COMMAND [conn186] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 13.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:2139 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 132ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.769-0400 m31100| 2015-07-09T14:16:13.768-0400 I QUERY [conn176] query db65.coll65 query: { query: { tid: 1.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:51962 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 116ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.810-0400 m31100| 2015-07-09T14:16:13.809-0400 I COMMAND [conn185] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 17.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:155739 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 434ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.810-0400 m31100| 2015-07-09T14:16:13.809-0400 I QUERY [conn45] query db65.coll65 query: { query: { tid: 10.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:2042 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 101ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.847-0400 m31100| 2015-07-09T14:16:13.846-0400 I QUERY [conn182] query db65.coll65 query: { query: { _id: ObjectId('559eba67eac5440bf8d3f37a') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:155642 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } 120ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.880-0400 m31100| 2015-07-09T14:16:13.878-0400 I COMMAND [conn184] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 12.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:155739 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 155ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.886-0400 m31100| 2015-07-09T14:16:13.885-0400 I COMMAND [conn179] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 18.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:155739 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 359ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.922-0400 m31100| 2015-07-09T14:16:13.919-0400 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1436464517000|1 } } cursorid:22174781017 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:4199218 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, oplog: { acquireCount: { r: 1 } } } 215ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.929-0400 m31100| 2015-07-09T14:16:13.927-0400 I COMMAND [conn186] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 13.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:5979 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 124ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.956-0400 m31100| 2015-07-09T14:16:13.955-0400 I COMMAND [conn188] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 2.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:155739 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 273ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.962-0400 m31100| 2015-07-09T14:16:13.961-0400 I COMMAND [conn73] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 19.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:155739 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 362ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.982-0400 m31100| 2015-07-09T14:16:13.978-0400 I COMMAND [conn45] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 10.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:5979 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 166ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:13.999-0400 m31100| 2015-07-09T14:16:13.991-0400 I COMMAND [conn58] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 15.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:155739 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 269ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:14.000-0400 m31100| 2015-07-09T14:16:13.992-0400 I COMMAND [conn49] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 5.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:155739 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 474ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:14.000-0400 m31100| 2015-07-09T14:16:13.994-0400 I COMMAND [conn178] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 11.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:155739 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 283ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:14.019-0400 m31100| 2015-07-09T14:16:14.015-0400 I COMMAND [conn176] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 1.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:155739 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 215ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:14.022-0400 m31100| 2015-07-09T14:16:14.020-0400 I COMMAND [conn183] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 6.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:155739 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 241ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:14.030-0400 m31100| 2015-07-09T14:16:14.029-0400 I COMMAND [conn50] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 3.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:155739 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 305ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:14.054-0400 m31100| 2015-07-09T14:16:14.048-0400 I COMMAND [conn181] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 7.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:155739 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 248ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:14.059-0400 m31100| 2015-07-09T14:16:14.056-0400 I COMMAND [conn191] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 16.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:155739 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 317ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:14.060-0400 m31100| 2015-07-09T14:16:14.057-0400 I COMMAND [conn47] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 4.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:155739 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 414ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:14.061-0400 m31100| 2015-07-09T14:16:14.059-0400 I COMMAND [conn180] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 14.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:155739 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 228ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:14.100-0400 m31100| 2015-07-09T14:16:14.098-0400 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1436464517000|1 } } cursorid:22174781017 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:2 reslen:4199536 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, oplog: { acquireCount: { r: 1 } } } 162ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:14.125-0400 m31100| 2015-07-09T14:16:14.124-0400 I COMMAND [conn177] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 9.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:155739 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 566ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:14.205-0400 m31100| 2015-07-09T14:16:14.204-0400 I COMMAND [conn182] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 8.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:466779 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 2 } }, Collection: { acquireCount: { w: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 247ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:14.212-0400 m31100| 2015-07-09T14:16:14.210-0400 I COMMAND [conn175] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 0.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:155739 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 652ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:14.232-0400 m31100| 2015-07-09T14:16:14.231-0400 I COMMAND [conn185] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 17.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:466779 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 260ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:14.299-0400 m31100| 2015-07-09T14:16:14.298-0400 I QUERY [conn45] query db65.coll65 query: { query: { tid: 4.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:155642 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 185ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:14.336-0400 m31100| 2015-07-09T14:16:14.335-0400 I QUERY [conn175] query db65.coll65 query: { query: { _id: ObjectId('559eba6beac5440bf8d3f385') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:155642 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } 108ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:14.487-0400 m31100| 2015-07-09T14:16:14.485-0400 I QUERY [conn185] query db65.coll65 query: { query: { tid: 13.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:51962 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } 152ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:14.506-0400 m31100| 2015-07-09T14:16:14.502-0400 I QUERY [conn58] query db65.coll65 query: { query: { tid: 9.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:155642 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } 174ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:14.512-0400 m31100| 2015-07-09T14:16:14.510-0400 I QUERY [conn182] query db65.coll65 query: { query: { tid: 0.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:155642 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } 166ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:14.522-0400 m31100| 2015-07-09T14:16:14.518-0400 I QUERY [conn45] query db65.coll65 query: { query: { _id: ObjectId('559eba67eac5440bf8d3f37a') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:466682 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } 173ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:14.547-0400 m31100| 2015-07-09T14:16:14.546-0400 I COMMAND [conn183] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 12.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:466779 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 426ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:14.574-0400 m31100| 2015-07-09T14:16:14.572-0400 I QUERY [conn181] query db65.coll65 query: { query: { tid: 17.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:466682 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 134ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:14.600-0400 m31100| 2015-07-09T14:16:14.593-0400 I COMMAND [conn179] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 18.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:466779 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 405ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:14.617-0400 m31100| 2015-07-09T14:16:14.615-0400 I COMMAND [conn47] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 10.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:52059 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 459ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:14.670-0400 m31100| 2015-07-09T14:16:14.668-0400 I QUERY [conn45] query db65.coll65 query: { query: { tid: 8.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:466682 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 136ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:14.694-0400 m31100| 2015-07-09T14:16:14.693-0400 I QUERY [conn183] query db65.coll65 query: { query: { _id: ObjectId('559eba6beac5440bf8d3f384') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:466682 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } 102ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:14.697-0400 m31100| 2015-07-09T14:16:14.696-0400 I COMMAND [conn188] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 2.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:466779 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 509ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:14.714-0400 m31100| 2015-07-09T14:16:14.713-0400 I QUERY [conn179] query db65.coll65 query: { query: { _id: ObjectId('559eba6beac5440bf8d3f37f') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:466682 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } 102ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:14.787-0400 m31100| 2015-07-09T14:16:14.786-0400 I COMMAND [conn175] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 14.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:466779 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 445ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:14.814-0400 m31100| 2015-07-09T14:16:14.811-0400 I QUERY [conn45] query db65.coll65 query: { query: { tid: 12.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:466682 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } 116ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:14.832-0400 m31100| 2015-07-09T14:16:14.831-0400 I COMMAND [conn176] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 19.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:466779 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 644ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:14.877-0400 m31100| 2015-07-09T14:16:14.872-0400 I COMMAND [conn50] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 1.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:466779 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 453ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:14.914-0400 m31100| 2015-07-09T14:16:14.903-0400 I COMMAND [conn178] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 3.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:466779 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 428ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:14.914-0400 m31100| 2015-07-09T14:16:14.909-0400 I COMMAND [conn73] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 15.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:466779 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 425ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:14.949-0400 m31100| 2015-07-09T14:16:14.945-0400 I COMMAND [conn191] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 6.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:466779 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 502ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:14.955-0400 m31100| 2015-07-09T14:16:14.953-0400 I COMMAND [conn49] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 5.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:466779 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 452ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:14.988-0400 m31100| 2015-07-09T14:16:14.987-0400 I QUERY [conn50] query db65.coll65 query: { query: { _id: ObjectId('559eba6beac5440bf8d3f389') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:466682 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } 104ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:14.991-0400 m31100| 2015-07-09T14:16:14.990-0400 I COMMAND [conn186] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 11.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:466779 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 541ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:15.018-0400 m31100| 2015-07-09T14:16:15.016-0400 I QUERY [conn175] query db65.coll65 query: { query: { tid: 14.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:466682 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } 123ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:15.036-0400 m31100| 2015-07-09T14:16:15.035-0400 I QUERY [conn73] query db65.coll65 query: { query: { _id: ObjectId('559eba6beac5440bf8d3f383') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:466682 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } 116ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:15.039-0400 m31100| 2015-07-09T14:16:15.038-0400 I COMMAND [conn185] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 13.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:155739 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 530ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:15.040-0400 m31100| 2015-07-09T14:16:15.039-0400 I QUERY [conn178] query db65.coll65 query: { query: { _id: ObjectId('559eba6beac5440bf8d3f381') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:466682 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } 122ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:15.043-0400 m31100| 2015-07-09T14:16:15.041-0400 I COMMAND [conn177] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 7.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:466779 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 604ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:15.071-0400 m31100| 2015-07-09T14:16:15.068-0400 I QUERY [conn191] query db65.coll65 query: { query: { _id: ObjectId('559eba6beac5440bf8d3f386') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:466682 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } 116ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:15.073-0400 m31100| 2015-07-09T14:16:15.068-0400 I COMMAND [conn180] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 16.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:466779 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 566ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:15.087-0400 m31100| 2015-07-09T14:16:15.084-0400 I COMMAND [conn184] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 4.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:466779 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 591ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:15.093-0400 m31100| 2015-07-09T14:16:15.091-0400 I QUERY [conn49] query db65.coll65 query: { query: { _id: ObjectId('559eba6beac5440bf8d3f37e') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:466682 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } 102ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:15.097-0400 m31100| 2015-07-09T14:16:15.097-0400 I QUERY [conn9] getmore local.oplog.rs query: { ts: { $gte: Timestamp 1436464517000|1 } } cursorid:22174781017 ntoreturn:0 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:3 reslen:1400174 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, oplog: { acquireCount: { r: 3 } } } 168ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:15.132-0400 m31100| 2015-07-09T14:16:15.130-0400 I COMMAND [conn182] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 0.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:466779 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 537ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:15.132-0400 m31100| 2015-07-09T14:16:15.132-0400 I QUERY [conn186] query db65.coll65 query: { query: { _id: ObjectId('559eba6beac5440bf8d3f387') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:466682 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } 116ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:15.147-0400 m31100| 2015-07-09T14:16:15.145-0400 I COMMAND [conn181] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 9.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:466779 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 522ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:15.163-0400 m31100| 2015-07-09T14:16:15.163-0400 I QUERY [conn50] query db65.coll65 query: { query: { tid: 1.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:466682 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 171ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:15.169-0400 m31100| 2015-07-09T14:16:15.169-0400 I QUERY [conn185] query db65.coll65 query: { query: { _id: ObjectId('559eba6ceac5440bf8d3f38d') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:155642 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } 119ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:15.176-0400 m31100| 2015-07-09T14:16:15.176-0400 I QUERY [conn176] query db65.coll65 query: { query: { tid: 19.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:466682 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 237ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:15.192-0400 m31100| 2015-07-09T14:16:15.192-0400 I QUERY [conn73] query db65.coll65 query: { query: { tid: 3.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:466682 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 147ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:15.199-0400 m31100| 2015-07-09T14:16:15.194-0400 I QUERY [conn178] query db65.coll65 query: { query: { tid: 15.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:466682 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 148ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:15.200-0400 m31100| 2015-07-09T14:16:15.200-0400 I QUERY [conn191] query db65.coll65 query: { query: { tid: 6.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:466682 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 128ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:15.218-0400 m31100| 2015-07-09T14:16:15.204-0400 I QUERY [conn177] query db65.coll65 query: { query: { _id: ObjectId('559eba6beac5440bf8d3f38b') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:466682 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } 134ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:15.229-0400 m31100| 2015-07-09T14:16:15.224-0400 I QUERY [conn180] query db65.coll65 query: { query: { _id: ObjectId('559eba6beac5440bf8d3f380') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:466682 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } 130ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:15.235-0400 m31100| 2015-07-09T14:16:15.231-0400 I QUERY [conn184] query db65.coll65 query: { query: { _id: ObjectId('559eba6beac5440bf8d3f37d') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:466682 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } 138ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:15.254-0400 m31100| 2015-07-09T14:16:15.253-0400 I COMMAND [conn47] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 10.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:155739 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 413ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:15.276-0400 m31100| 2015-07-09T14:16:15.271-0400 I QUERY [conn182] query db65.coll65 query: { query: { _id: ObjectId('559eba6beac5440bf8d3f385') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:466682 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } 118ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:15.277-0400 m31100| 2015-07-09T14:16:15.271-0400 I QUERY [conn181] query db65.coll65 query: { query: { _id: ObjectId('559eba6beac5440bf8d3f38a') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:466682 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } 120ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:15.310-0400 m31100| 2015-07-09T14:16:15.309-0400 I QUERY [conn186] query db65.coll65 query: { query: { tid: 11.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:466682 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 155ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:15.315-0400 m31100| 2015-07-09T14:16:15.309-0400 I QUERY [conn185] query db65.coll65 query: { query: { tid: 13.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:155642 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } 117ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:15.315-0400 m31100| 2015-07-09T14:16:15.312-0400 I COMMAND [conn183] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 8.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:1399899 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 610ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:15.333-0400 m31100| 2015-07-09T14:16:15.332-0400 I COMMAND [conn58] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 17.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:1399899 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 650ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:15.336-0400 m31100| 2015-07-09T14:16:15.334-0400 I QUERY [conn49] query db65.coll65 query: { query: { tid: 5.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:466682 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 240ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:15.356-0400 m31100| 2015-07-09T14:16:15.355-0400 I QUERY [conn177] query db65.coll65 query: { query: { tid: 7.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:466682 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } 148ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:15.367-0400 m31100| 2015-07-09T14:16:15.359-0400 I QUERY [conn47] query db65.coll65 query: { query: { _id: ObjectId('559eba6ceac5440bf8d3f38c') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:155642 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } 103ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:15.367-0400 m31100| 2015-07-09T14:16:15.361-0400 I SHARDING [conn35] request split points lookup for chunk db65.coll65 { : 17.0 } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:15.404-0400 m31100| 2015-07-09T14:16:15.403-0400 I QUERY [conn184] query db65.coll65 query: { query: { tid: 4.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:466682 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 156ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:15.430-0400 m31100| 2015-07-09T14:16:15.429-0400 I QUERY [conn182] query db65.coll65 query: { query: { tid: 0.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:466682 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 152ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:15.434-0400 m31100| 2015-07-09T14:16:15.434-0400 I QUERY [conn181] query db65.coll65 query: { query: { tid: 9.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:1 nreturned:1 reslen:466682 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } 138ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:15.435-0400 m31100| 2015-07-09T14:16:15.435-0400 I QUERY [conn180] query db65.coll65 query: { query: { tid: 16.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:466682 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 } } } 190ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:15.495-0400 m31100| 2015-07-09T14:16:15.490-0400 I QUERY [conn35] query db65.coll65 query: { query: {}, orderby: { tid: -1.0 } } planSummary: IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:1 nscannedObjects:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:466663 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 } } } 128ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:15.507-0400 m31100| 2015-07-09T14:16:15.503-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 17.0 }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 18.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:15.509-0400 m31100| 2015-07-09T14:16:15.508-0400 I SHARDING [conn35] distributed lock 'db65.coll65/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eba6f792e00bb67274a95 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:15.509-0400 m31100| 2015-07-09T14:16:15.508-0400 I SHARDING [conn35] remotely refreshing metadata for db65.coll65 based on current shard version 1|9||559eba62ca4787b9985d1ea0, current metadata version is 1|9||559eba62ca4787b9985d1ea0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:15.542-0400 m31100| 2015-07-09T14:16:15.541-0400 I QUERY [conn47] query db65.coll65 query: { query: { _id: ObjectId('559eba67eac5440bf8d3f37a') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } cursorid:5253592923642 ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:1399802 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 } } } 160ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:15.563-0400 m30999| 2015-07-09T14:16:15.563-0400 I NETWORK [conn423] end connection 127.0.0.1:63962 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:15.594-0400 m31100| 2015-07-09T14:16:15.591-0400 I QUERY [conn43] killcursors db65.coll65 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 27936 } } } 28ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:15.615-0400 m31100| 2015-07-09T14:16:15.614-0400 I COMMAND [conn45] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 12.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:1399899 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 746ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:15.657-0400 m31100| 2015-07-09T14:16:15.655-0400 I COMMAND [conn188] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 2.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:1399899 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 668ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:15.664-0400 m31100| 2015-07-09T14:16:15.662-0400 I COMMAND [conn179] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 18.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:1399899 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 805ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:15.745-0400 m31100| 2015-07-09T14:16:15.742-0400 I COMMAND [conn178] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 1.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:1399899 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 522ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:15.787-0400 m31100| 2015-07-09T14:16:15.785-0400 I COMMAND [conn175] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 14.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:1399899 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 689ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:15.825-0400 m31100| 2015-07-09T14:16:15.824-0400 I COMMAND [conn191] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 6.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:1399899 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 563ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:15.876-0400 m31100| 2015-07-09T14:16:15.873-0400 I COMMAND [conn50] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 15.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:1399899 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 600ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:15.896-0400 m31100| 2015-07-09T14:16:15.895-0400 I COMMAND [conn177] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 11.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:1399899 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 528ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:15.934-0400 m31100| 2015-07-09T14:16:15.933-0400 I COMMAND [conn176] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 3.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:1399899 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 685ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.039-0400 m31100| 2015-07-09T14:16:16.038-0400 I COMMAND [conn182] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 4.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:1399899 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 16845 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 600ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.106-0400 m31100| 2015-07-09T14:16:16.105-0400 I COMMAND [conn73] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 19.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:1399899 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 54727 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 864ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.122-0400 m31100| 2015-07-09T14:16:16.121-0400 I COMMAND [conn186] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 5.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:1399899 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 21520 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 676ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.149-0400 m31100| 2015-07-09T14:16:16.148-0400 I COMMAND [conn185] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 13.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:466779 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 4 } }, Collection: { acquireCount: { w: 3 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 2213 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 718ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.369-0400 m31100| 2015-07-09T14:16:16.363-0400 I SHARDING [conn40] request split points lookup for chunk db65.coll65 { : 17.0 } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.370-0400 m31100| 2015-07-09T14:16:16.363-0400 I SHARDING [conn38] request split points lookup for chunk db65.coll65 { : 1.0 } -->> { : 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.370-0400 m31100| 2015-07-09T14:16:16.363-0400 I SHARDING [conn35] metadata of collection db65.coll65 already up to date (shard version : 1|9||559eba62ca4787b9985d1ea0, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.370-0400 m31100| 2015-07-09T14:16:16.363-0400 I SHARDING [conn36] request split points lookup for chunk db65.coll65 { : 1.0 } -->> { : 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.371-0400 m31100| 2015-07-09T14:16:16.363-0400 I SHARDING [conn15] request split points lookup for chunk db65.coll65 { : 12.0 } -->> { : 17.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.371-0400 m31100| 2015-07-09T14:16:16.363-0400 I SHARDING [conn32] request split points lookup for chunk db65.coll65 { : 17.0 } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.372-0400 m31100| 2015-07-09T14:16:16.364-0400 I COMMAND [conn40] command db65.coll65 command: splitVector { splitVector: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 17.0 }, max: { tid: MaxKey }, maxChunkSizeBytes: 13107200, maxSplitPoints: 0, maxChunkObjects: 250000 } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:156 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 1195370 } } } protocol:op_command 694ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.372-0400 m31100| 2015-07-09T14:16:16.365-0400 I SHARDING [conn37] request split points lookup for chunk db65.coll65 { : 12.0 } -->> { : 17.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.372-0400 m31100| 2015-07-09T14:16:16.365-0400 I SHARDING [conn132] request split points lookup for chunk db65.coll65 { : 1.0 } -->> { : 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.372-0400 m31100| 2015-07-09T14:16:16.365-0400 I SHARDING [conn34] request split points lookup for chunk db65.coll65 { : 1.0 } -->> { : 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.373-0400 m31100| 2015-07-09T14:16:16.365-0400 I COMMAND [conn38] command db65.coll65 command: splitVector { splitVector: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 1.0 }, max: { tid: 7.0 }, maxChunkSizeBytes: 13107200, maxSplitPoints: 0, maxChunkObjects: 250000 } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:198 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 1197966 } } } protocol:op_command 698ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.373-0400 m31100| 2015-07-09T14:16:16.365-0400 I SHARDING [conn35] splitChunk accepted at version 1|9||559eba62ca4787b9985d1ea0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.374-0400 m31100| 2015-07-09T14:16:16.366-0400 I COMMAND [conn36] command db65.coll65 command: splitVector { splitVector: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 1.0 }, max: { tid: 7.0 }, maxChunkSizeBytes: 13107200, maxSplitPoints: 0, maxChunkObjects: 250000 } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:198 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 1115520 } } } protocol:op_command 616ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.374-0400 m31100| 2015-07-09T14:16:16.366-0400 I COMMAND [conn15] command db65.coll65 command: splitVector { splitVector: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 12.0 }, max: { tid: 17.0 }, maxChunkSizeBytes: 13107200, maxSplitPoints: 0, maxChunkObjects: 250000 } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:177 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 1232332 } } } protocol:op_command 733ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.375-0400 m31100| 2015-07-09T14:16:16.366-0400 I COMMAND [conn32] command db65.coll65 command: splitVector { splitVector: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 17.0 }, max: { tid: MaxKey }, maxChunkSizeBytes: 13107200, maxSplitPoints: 0, maxChunkObjects: 250000 } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:156 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 244562 } } } protocol:op_command 246ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.375-0400 m31100| 2015-07-09T14:16:16.366-0400 I COMMAND [conn132] command db65.coll65 command: splitVector { splitVector: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 1.0 }, max: { tid: 7.0 }, maxChunkSizeBytes: 13107200, maxSplitPoints: 0, maxChunkObjects: 250000 } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:198 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 422106 } } } protocol:op_command 423ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.375-0400 m31100| 2015-07-09T14:16:16.366-0400 I COMMAND [conn34] command db65.coll65 command: splitVector { splitVector: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 1.0 }, max: { tid: 7.0 }, maxChunkSizeBytes: 13107200, maxSplitPoints: 0, maxChunkObjects: 250000 } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:198 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 1017680 } } } protocol:op_command 517ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.377-0400 m31100| 2015-07-09T14:16:16.367-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 12.0 }, max: { tid: 17.0 }, from: "test-rs0", splitKeys: [ { tid: 13.0 }, { tid: 14.0 }, { tid: 16.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.377-0400 m31100| 2015-07-09T14:16:16.367-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 1.0 }, max: { tid: 7.0 }, from: "test-rs0", splitKeys: [ { tid: 2.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 6.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.380-0400 m31100| 2015-07-09T14:16:16.367-0400 I QUERY [conn183] query db65.coll65 query: { query: { tid: 10.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:155642 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 1277850 } } } 988ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.380-0400 m31100| 2015-07-09T14:16:16.368-0400 I SHARDING [conn36] received splitChunk request: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 1.0 }, max: { tid: 7.0 }, from: "test-rs0", splitKeys: [ { tid: 2.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 6.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.381-0400 m31100| 2015-07-09T14:16:16.368-0400 I COMMAND [conn37] command db65.coll65 command: splitVector { splitVector: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 12.0 }, max: { tid: 17.0 }, maxChunkSizeBytes: 13107200, maxSplitPoints: 0, maxChunkObjects: 250000 } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:177 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 1067974 } } } protocol:op_command 568ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.381-0400 m31100| 2015-07-09T14:16:16.368-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 1.0 }, max: { tid: 7.0 }, from: "test-rs0", splitKeys: [ { tid: 2.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 6.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.381-0400 m31100| 2015-07-09T14:16:16.369-0400 I SHARDING [conn39] request split points lookup for chunk db65.coll65 { : 7.0 } -->> { : 12.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.382-0400 m31100| 2015-07-09T14:16:16.370-0400 I SHARDING [conn132] received splitChunk request: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 1.0 }, max: { tid: 7.0 }, from: "test-rs0", splitKeys: [ { tid: 2.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 6.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.382-0400 m31100| 2015-07-09T14:16:16.374-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 12.0 }, max: { tid: 17.0 }, from: "test-rs0", splitKeys: [ { tid: 13.0 }, { tid: 14.0 }, { tid: 16.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.383-0400 m31100| 2015-07-09T14:16:16.377-0400 I COMMAND [conn39] command db65.coll65 command: splitVector { splitVector: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 7.0 }, max: { tid: 12.0 }, maxChunkSizeBytes: 13107200, maxSplitPoints: 0, maxChunkObjects: 250000 } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:177 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 460049 } } } protocol:op_command 467ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.383-0400 m31100| 2015-07-09T14:16:16.382-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 17.0 }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 18.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.387-0400 m31100| 2015-07-09T14:16:16.384-0400 W SHARDING [conn15] could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db65.coll65 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.387-0400 m30999| 2015-07-09T14:16:16.384-0400 W SHARDING [conn424] splitChunk failed - cmd: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 12.0 }, max: { tid: 17.0 }, from: "test-rs0", splitKeys: [ { tid: 13.0 }, { tid: 14.0 }, { tid: 16.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.388-0400 m31100| 2015-07-09T14:16:16.384-0400 I SHARDING [conn39] received splitChunk request: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 7.0 }, max: { tid: 12.0 }, from: "test-rs0", splitKeys: [ { tid: 8.0 }, { tid: 9.0 }, { tid: 11.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.397-0400 m31100| 2015-07-09T14:16:16.396-0400 W SHARDING [conn38] could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db65.coll65 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.399-0400 m30999| 2015-07-09T14:16:16.397-0400 W SHARDING [conn428] splitChunk failed - cmd: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 1.0 }, max: { tid: 7.0 }, from: "test-rs0", splitKeys: [ { tid: 2.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 6.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.403-0400 m31100| 2015-07-09T14:16:16.400-0400 W SHARDING [conn36] could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db65.coll65 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.408-0400 m30998| 2015-07-09T14:16:16.403-0400 W SHARDING [conn423] splitChunk failed - cmd: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 1.0 }, max: { tid: 7.0 }, from: "test-rs0", splitKeys: [ { tid: 2.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 6.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.409-0400 m30998| 2015-07-09T14:16:16.403-0400 W SHARDING [conn430] splitChunk failed - cmd: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 7.0 }, max: { tid: 12.0 }, from: "test-rs0", splitKeys: [ { tid: 8.0 }, { tid: 9.0 }, { tid: 11.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.410-0400 m31100| 2015-07-09T14:16:16.403-0400 W SHARDING [conn39] could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db65.coll65 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.410-0400 m31100| 2015-07-09T14:16:16.403-0400 W SHARDING [conn40] could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db65.coll65 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.410-0400 m30999| 2015-07-09T14:16:16.404-0400 W SHARDING [conn431] splitChunk failed - cmd: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 17.0 }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 18.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.411-0400 m31100| 2015-07-09T14:16:16.403-0400 W SHARDING [conn34] could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db65.coll65 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.411-0400 m31100| 2015-07-09T14:16:16.405-0400 W SHARDING [conn132] could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db65.coll65 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.414-0400 m30999| 2015-07-09T14:16:16.406-0400 W SHARDING [conn429] splitChunk failed - cmd: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 1.0 }, max: { tid: 7.0 }, from: "test-rs0", splitKeys: [ { tid: 2.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 6.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.414-0400 m30998| 2015-07-09T14:16:16.407-0400 W SHARDING [conn427] splitChunk failed - cmd: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 1.0 }, max: { tid: 7.0 }, from: "test-rs0", splitKeys: [ { tid: 2.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 6.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.414-0400 m31100| 2015-07-09T14:16:16.408-0400 I SHARDING [conn39] received splitChunk request: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 17.0 }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 18.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.416-0400 m31100| 2015-07-09T14:16:16.409-0400 W SHARDING [conn37] could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db65.coll65 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.419-0400 m31100| 2015-07-09T14:16:16.412-0400 W SHARDING [conn39] could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db65.coll65 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.419-0400 m30999| 2015-07-09T14:16:16.411-0400 W SHARDING [conn425] splitChunk failed - cmd: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 12.0 }, max: { tid: 17.0 }, from: "test-rs0", splitKeys: [ { tid: 13.0 }, { tid: 14.0 }, { tid: 16.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.419-0400 m30998| 2015-07-09T14:16:16.412-0400 W SHARDING [conn422] splitChunk failed - cmd: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 17.0 }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 18.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.565-0400 m31100| 2015-07-09T14:16:16.555-0400 I QUERY [conn186] query db65.coll65 query: { query: { _id: ObjectId('559eba6beac5440bf8d3f37e') }, $showDiskLoc: true } planSummary: IXSCAN { _id: 1 } cursorid:5254819025040 ntoreturn:0 ntoskip:0 nscanned:1 nscannedObjects:1 keyUpdates:0 writeConflicts:0 numYields:0 nreturned:1 reslen:1399802 locks:{ Global: { acquireCount: { r: 2 } }, Database: { acquireCount: { r: 1 } }, Collection: { acquireCount: { r: 1 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 224220 } } } 183ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.579-0400 m30998| 2015-07-09T14:16:16.571-0400 I NETWORK [conn428] end connection 127.0.0.1:63974 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.579-0400 m30999| 2015-07-09T14:16:16.571-0400 I NETWORK [conn426] end connection 127.0.0.1:63969 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.586-0400 m31100| 2015-07-09T14:16:16.581-0400 I QUERY [conn185] query db65.coll65 query: { query: { tid: 13.0 }, $showDiskLoc: true, orderby: { length: 1.0 } } planSummary: IXSCAN { tid: 1.0 }, IXSCAN { tid: 1.0 } ntoreturn:1 ntoskip:0 nscanned:2 nscannedObjects:2 scanAndOrder:1 cursorExhausted:1 keyUpdates:0 writeConflicts:0 numYields:2 nreturned:1 reslen:466682 locks:{ Global: { acquireCount: { r: 6 } }, Database: { acquireCount: { r: 3 } }, Collection: { acquireCount: { r: 3 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 165102 } } } 210ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.589-0400 m31100| 2015-07-09T14:16:16.588-0400 I COMMAND [conn180] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 0.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:1399899 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 }, acquireWaitCount: { w: 2 }, timeAcquiringMicros: { w: 1205772 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 1004ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.600-0400 m31100| 2015-07-09T14:16:16.599-0400 I COMMAND [conn49] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 7.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:1399899 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 46369 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 1112ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.615-0400 m31100| 2015-07-09T14:16:16.613-0400 I COMMAND [conn181] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 9.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:1399899 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 }, acquireWaitCount: { w: 2 }, timeAcquiringMicros: { w: 1203978 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 1095ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.628-0400 m31100| 2015-07-09T14:16:16.624-0400 I COMMAND [conn184] command db65.$cmd command: findAndModify { findandmodify: "coll65", query: { tid: 16.0 }, sort: { length: 1.0 }, update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } }, new: true } update: { $set: { findAndModify_update_grow: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx..." }, $mul: { length: 3.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:1399899 locks:{ Global: { acquireCount: { r: 3, w: 3 } }, Database: { acquireCount: { w: 3 } }, Collection: { acquireCount: { w: 2 }, acquireWaitCount: { w: 2 }, timeAcquiringMicros: { w: 1222996 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 1109ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.628-0400 m31100| 2015-07-09T14:16:16.625-0400 I SHARDING [conn35] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:16.625-0400-559eba70792e00bb67274a96", server: "bs-osx108-8", clientAddr: "127.0.0.1:62637", time: new Date(1436465776625), what: "multi-split", ns: "db65.coll65", details: { before: { min: { tid: 17.0 }, max: { tid: MaxKey } }, number: 1, of: 3, chunk: { min: { tid: 17.0 }, max: { tid: 18.0 }, lastmod: Timestamp 1000|10, lastmodEpoch: ObjectId('559eba62ca4787b9985d1ea0') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.647-0400 m31100| 2015-07-09T14:16:16.642-0400 I SHARDING [conn39] request split points lookup for chunk db65.coll65 { : 7.0 } -->> { : 12.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.650-0400 m31100| 2015-07-09T14:16:16.644-0400 I SHARDING [conn39] received splitChunk request: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 7.0 }, max: { tid: 12.0 }, from: "test-rs0", splitKeys: [ { tid: 8.0 }, { tid: 9.0 }, { tid: 11.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.650-0400 m31100| 2015-07-09T14:16:16.647-0400 W SHARDING [conn39] could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db65.coll65 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.651-0400 m30998| 2015-07-09T14:16:16.647-0400 W SHARDING [conn426] splitChunk failed - cmd: { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 7.0 }, max: { tid: 12.0 }, from: "test-rs0", splitKeys: [ { tid: 8.0 }, { tid: 9.0 }, { tid: 11.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db65.coll65 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.686-0400 m30998| 2015-07-09T14:16:16.682-0400 I NETWORK [conn431] end connection 127.0.0.1:63980 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.695-0400 m31100| 2015-07-09T14:16:16.694-0400 I SHARDING [conn35] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:16.694-0400-559eba70792e00bb67274a97", server: "bs-osx108-8", clientAddr: "127.0.0.1:62637", time: new Date(1436465776694), what: "multi-split", ns: "db65.coll65", details: { before: { min: { tid: 17.0 }, max: { tid: MaxKey } }, number: 2, of: 3, chunk: { min: { tid: 18.0 }, max: { tid: 19.0 }, lastmod: Timestamp 1000|11, lastmodEpoch: ObjectId('559eba62ca4787b9985d1ea0') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.748-0400 m31100| 2015-07-09T14:16:16.747-0400 I SHARDING [conn35] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:16.747-0400-559eba70792e00bb67274a98", server: "bs-osx108-8", clientAddr: "127.0.0.1:62637", time: new Date(1436465776747), what: "multi-split", ns: "db65.coll65", details: { before: { min: { tid: 17.0 }, max: { tid: MaxKey } }, number: 3, of: 3, chunk: { min: { tid: 19.0 }, max: { tid: MaxKey }, lastmod: Timestamp 1000|12, lastmodEpoch: ObjectId('559eba62ca4787b9985d1ea0') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:16.824-0400 m31100| 2015-07-09T14:16:16.822-0400 I SHARDING [conn35] distributed lock 'db65.coll65/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:17.061-0400 m31100| 2015-07-09T14:16:16.823-0400 I COMMAND [conn35] command db65.coll65 command: splitChunk { splitChunk: "db65.coll65", keyPattern: { tid: 1.0 }, min: { tid: 17.0 }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 18.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba62ca4787b9985d1ea0') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 4, w: 2 } }, Database: { acquireCount: { r: 1, w: 2 } }, Collection: { acquireCount: { r: 1, W: 2 }, acquireWaitCount: { W: 2 }, timeAcquiringMicros: { W: 1601607 } } } protocol:op_command 1318ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:17.061-0400 m30998| 2015-07-09T14:16:16.829-0400 I SHARDING [conn424] ChunkManager: time to load chunks for db65.coll65: 1ms sequenceNumber: 81 version: 1|12||559eba62ca4787b9985d1ea0 based on: 1|5||559eba62ca4787b9985d1ea0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:17.062-0400 m30998| 2015-07-09T14:16:16.830-0400 I SHARDING [conn424] autosplitted db65.coll65 shard: ns: db65.coll65, shard: test-rs0, lastmod: 1|5||000000000000000000000000, min: { tid: 17.0 }, max: { tid: MaxKey } into 3 (splitThreshold 11796480) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:17.062-0400 m30999| 2015-07-09T14:16:16.831-0400 I NETWORK [conn424] end connection 127.0.0.1:63964 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:17.062-0400 m30998| 2015-07-09T14:16:16.861-0400 I NETWORK [conn422] end connection 127.0.0.1:63963 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:17.062-0400 m30999| 2015-07-09T14:16:16.862-0400 I NETWORK [conn429] end connection 127.0.0.1:63975 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:17.063-0400 m30998| 2015-07-09T14:16:16.901-0400 I NETWORK [conn423] end connection 127.0.0.1:63965 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:17.063-0400 m30999| 2015-07-09T14:16:16.912-0400 I NETWORK [conn428] end connection 127.0.0.1:63973 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:17.063-0400 m30999| 2015-07-09T14:16:16.938-0400 I NETWORK [conn431] end connection 127.0.0.1:63979 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:17.063-0400 m30998| 2015-07-09T14:16:16.970-0400 I NETWORK [conn427] end connection 127.0.0.1:63971 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:17.063-0400 m30999| 2015-07-09T14:16:16.991-0400 I NETWORK [conn430] end connection 127.0.0.1:63978 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:17.064-0400 m30999| 2015-07-09T14:16:17.007-0400 I NETWORK [conn425] end connection 127.0.0.1:63967 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:17.064-0400 m30998| 2015-07-09T14:16:17.048-0400 I NETWORK [conn426] end connection 127.0.0.1:63970 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:17.065-0400 m30998| 2015-07-09T14:16:17.063-0400 I NETWORK [conn430] end connection 127.0.0.1:63977 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:17.065-0400 m30998| 2015-07-09T14:16:17.064-0400 I NETWORK [conn429] end connection 127.0.0.1:63976 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:17.076-0400 m30999| 2015-07-09T14:16:17.075-0400 I NETWORK [conn422] end connection 127.0.0.1:63961 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:17.099-0400 m30998| 2015-07-09T14:16:17.099-0400 I NETWORK [conn424] end connection 127.0.0.1:63966 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:17.176-0400 m30998| 2015-07-09T14:16:17.175-0400 I NETWORK [conn425] end connection 127.0.0.1:63968 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:17.193-0400 m30999| 2015-07-09T14:16:17.192-0400 I NETWORK [conn427] end connection 127.0.0.1:63972 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:17.216-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:17.217-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:17.217-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:17.217-0400 jstests/concurrency/fsm_workloads/findAndModify_update_grow.js: Workload completed in 14251 ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:17.217-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:17.217-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:17.217-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:17.217-0400 m30999| 2015-07-09T14:16:17.217-0400 I COMMAND [conn1] DROP: db65.coll65 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:17.217-0400 m30999| 2015-07-09T14:16:17.217-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:17.217-0400-559eba71ca4787b9985d1ea2", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465777217), what: "dropCollection.start", ns: "db65.coll65", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:17.273-0400 m30999| 2015-07-09T14:16:17.273-0400 I SHARDING [conn1] distributed lock 'db65.coll65/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba71ca4787b9985d1ea3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:17.277-0400 m31100| 2015-07-09T14:16:17.273-0400 I COMMAND [conn37] CMD: drop db65.coll65 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:17.397-0400 m31100| 2015-07-09T14:16:17.396-0400 I COMMAND [conn37] command db65.coll65 command: drop { drop: "coll65" } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:119 locks:{ Global: { acquireCount: { r: 2, w: 2 } }, Database: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 1 } }, oplog: { acquireCount: { w: 1 } } } protocol:op_command 122ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:17.400-0400 m31101| 2015-07-09T14:16:17.399-0400 I COMMAND [repl writer worker 8] CMD: drop db65.coll65 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:17.401-0400 m31200| 2015-07-09T14:16:17.401-0400 I COMMAND [conn63] CMD: drop db65.coll65 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:17.457-0400 m31100| 2015-07-09T14:16:17.456-0400 I SHARDING [conn37] remotely refreshing metadata for db65.coll65 with requested shard version 0|0||000000000000000000000000, current shard version is 1|12||559eba62ca4787b9985d1ea0, current metadata version is 1|12||559eba62ca4787b9985d1ea0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:17.458-0400 m31100| 2015-07-09T14:16:17.458-0400 W SHARDING [conn37] no chunks found when reloading db65.coll65, previous version was 0|0||559eba62ca4787b9985d1ea0, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:17.458-0400 m31100| 2015-07-09T14:16:17.458-0400 I SHARDING [conn37] dropping metadata for db65.coll65 at shard version 1|12||559eba62ca4787b9985d1ea0, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:17.460-0400 m30999| 2015-07-09T14:16:17.460-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:17.460-0400-559eba71ca4787b9985d1ea4", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465777460), what: "dropCollection", ns: "db65.coll65", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:17.515-0400 m30999| 2015-07-09T14:16:17.515-0400 I SHARDING [conn1] distributed lock 'db65.coll65/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:17.571-0400 m30999| 2015-07-09T14:16:17.571-0400 I COMMAND [conn1] DROP DATABASE: db65 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:17.571-0400 m30999| 2015-07-09T14:16:17.571-0400 I SHARDING [conn1] DBConfig::dropDatabase: db65 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:17.572-0400 m30999| 2015-07-09T14:16:17.571-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:17.571-0400-559eba71ca4787b9985d1ea5", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465777571), what: "dropDatabase.start", ns: "db65", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:17.677-0400 m30999| 2015-07-09T14:16:17.677-0400 I SHARDING [conn1] DBConfig::dropDatabase: db65 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:17.678-0400 m31100| 2015-07-09T14:16:17.677-0400 I COMMAND [conn157] dropDatabase db65 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:17.678-0400 m31100| 2015-07-09T14:16:17.677-0400 I COMMAND [conn157] dropDatabase db65 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:17.679-0400 m30999| 2015-07-09T14:16:17.678-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:17.678-0400-559eba71ca4787b9985d1ea6", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465777678), what: "dropDatabase", ns: "db65", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:17.679-0400 m31101| 2015-07-09T14:16:17.678-0400 I COMMAND [repl writer worker 15] dropDatabase db65 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:17.679-0400 m31101| 2015-07-09T14:16:17.678-0400 I COMMAND [repl writer worker 15] dropDatabase db65 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:24.929-0400 m31102| 2015-07-09T14:16:24.928-0400 I COMMAND [repl writer worker 3] CMD: drop db65.coll65 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:24.980-0400 m31102| 2015-07-09T14:16:24.979-0400 I COMMAND [repl writer worker 10] dropDatabase db65 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:24.980-0400 m31102| 2015-07-09T14:16:24.979-0400 I COMMAND [repl writer worker 10] dropDatabase db65 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:24.992-0400 m31100| 2015-07-09T14:16:24.991-0400 I COMMAND [conn1] command test.$cmd command: insert { insert: "fsm_teardown", documents: [ { _id: ObjectId('559eba71eac5440bf8d3f38e'), a: 1.0 } ], ordered: true, writeConcern: { w: 3.0, wtimeout: 300000.0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 3, W: 1 } }, Collection: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 2 } }, oplog: { acquireCount: { w: 2 } } } protocol:op_command 7257ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:24.992-0400 m31100| 2015-07-09T14:16:24.992-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:24.998-0400 m31102| 2015-07-09T14:16:24.998-0400 I COMMAND [repl writer worker 2] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.000-0400 m31101| 2015-07-09T14:16:24.999-0400 I COMMAND [repl writer worker 14] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.035-0400 m31200| 2015-07-09T14:16:25.034-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.038-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.038-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.038-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.038-0400 jstests/concurrency/fsm_workloads/findAndModify_upsert.js [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.039-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.039-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.039-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.039-0400 m31201| 2015-07-09T14:16:25.038-0400 I COMMAND [repl writer worker 1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.039-0400 m31202| 2015-07-09T14:16:25.038-0400 I COMMAND [repl writer worker 13] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.047-0400 m30999| 2015-07-09T14:16:25.046-0400 I SHARDING [conn1] distributed lock 'db66/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba79ca4787b9985d1ea7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.051-0400 m30999| 2015-07-09T14:16:25.050-0400 I SHARDING [conn1] Placing [db66] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.051-0400 m30999| 2015-07-09T14:16:25.050-0400 I SHARDING [conn1] Enabling sharding for database [db66] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.105-0400 m30999| 2015-07-09T14:16:25.104-0400 I SHARDING [conn1] distributed lock 'db66/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.126-0400 m31100| 2015-07-09T14:16:25.126-0400 I INDEX [conn144] build index on: db66.coll66 properties: { v: 1, key: { tid: 1.0 }, name: "tid_1", ns: "db66.coll66" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.127-0400 m31100| 2015-07-09T14:16:25.126-0400 I INDEX [conn144] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.141-0400 m31100| 2015-07-09T14:16:25.140-0400 I INDEX [conn144] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.144-0400 m30999| 2015-07-09T14:16:25.143-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db66.coll66", key: { tid: 1.0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.146-0400 m30999| 2015-07-09T14:16:25.145-0400 I SHARDING [conn1] distributed lock 'db66.coll66/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba79ca4787b9985d1ea8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.147-0400 m30999| 2015-07-09T14:16:25.147-0400 I SHARDING [conn1] enable sharding on: db66.coll66 with shard key: { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.148-0400 m30999| 2015-07-09T14:16:25.147-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:25.147-0400-559eba79ca4787b9985d1ea9", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465785147), what: "shardCollection.start", ns: "db66.coll66", details: { shardKey: { tid: 1.0 }, collection: "db66.coll66", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 1 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.155-0400 m31102| 2015-07-09T14:16:25.154-0400 I INDEX [repl writer worker 6] build index on: db66.coll66 properties: { v: 1, key: { tid: 1.0 }, name: "tid_1", ns: "db66.coll66" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.155-0400 m31102| 2015-07-09T14:16:25.154-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.157-0400 m31101| 2015-07-09T14:16:25.156-0400 I INDEX [repl writer worker 11] build index on: db66.coll66 properties: { v: 1, key: { tid: 1.0 }, name: "tid_1", ns: "db66.coll66" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.157-0400 m31101| 2015-07-09T14:16:25.156-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.164-0400 m31101| 2015-07-09T14:16:25.163-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.164-0400 m31102| 2015-07-09T14:16:25.163-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.200-0400 m30999| 2015-07-09T14:16:25.200-0400 I SHARDING [conn1] going to create 1 chunk(s) for: db66.coll66 using new epoch 559eba79ca4787b9985d1eaa [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.256-0400 m30999| 2015-07-09T14:16:25.255-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db66.coll66: 0ms sequenceNumber: 288 version: 1|0||559eba79ca4787b9985d1eaa based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.312-0400 m30999| 2015-07-09T14:16:25.311-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db66.coll66: 0ms sequenceNumber: 289 version: 1|0||559eba79ca4787b9985d1eaa based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.314-0400 m31100| 2015-07-09T14:16:25.313-0400 I SHARDING [conn184] remotely refreshing metadata for db66.coll66 with requested shard version 1|0||559eba79ca4787b9985d1eaa, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.315-0400 m31100| 2015-07-09T14:16:25.315-0400 I SHARDING [conn184] collection db66.coll66 was previously unsharded, new metadata loaded with shard version 1|0||559eba79ca4787b9985d1eaa [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.315-0400 m31100| 2015-07-09T14:16:25.315-0400 I SHARDING [conn184] collection version was loaded at version 1|0||559eba79ca4787b9985d1eaa, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.316-0400 m30999| 2015-07-09T14:16:25.315-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:25.315-0400-559eba79ca4787b9985d1eab", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465785315), what: "shardCollection", ns: "db66.coll66", details: { version: "1|0||559eba79ca4787b9985d1eaa" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.371-0400 m30999| 2015-07-09T14:16:25.370-0400 I SHARDING [conn1] distributed lock 'db66.coll66/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.372-0400 Using 20 threads (requested 20) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.527-0400 m30998| 2015-07-09T14:16:25.527-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63985 #432 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.539-0400 m30999| 2015-07-09T14:16:25.539-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63986 #432 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.541-0400 m30998| 2015-07-09T14:16:25.541-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63987 #433 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.551-0400 m30999| 2015-07-09T14:16:25.550-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63988 #433 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.592-0400 m30999| 2015-07-09T14:16:25.592-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63989 #434 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.592-0400 m30998| 2015-07-09T14:16:25.592-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63990 #434 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.600-0400 m30999| 2015-07-09T14:16:25.600-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63991 #435 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.601-0400 m30999| 2015-07-09T14:16:25.600-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63992 #436 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.610-0400 m30999| 2015-07-09T14:16:25.610-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63993 #437 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.617-0400 m30998| 2015-07-09T14:16:25.617-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63994 #435 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.618-0400 m30999| 2015-07-09T14:16:25.617-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63995 #438 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.618-0400 m30998| 2015-07-09T14:16:25.618-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63996 #436 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.619-0400 m30999| 2015-07-09T14:16:25.619-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63998 #439 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.619-0400 m30998| 2015-07-09T14:16:25.619-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63997 #437 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.625-0400 m30999| 2015-07-09T14:16:25.625-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:63999 #440 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.626-0400 m30998| 2015-07-09T14:16:25.626-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64000 #438 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.626-0400 m30998| 2015-07-09T14:16:25.626-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64001 #439 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.630-0400 m30999| 2015-07-09T14:16:25.626-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64003 #441 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.630-0400 m30998| 2015-07-09T14:16:25.627-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64002 #440 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.630-0400 m30998| 2015-07-09T14:16:25.630-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64004 #441 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.635-0400 setting random seed: 9097473830915 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.635-0400 setting random seed: 865046707913 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.635-0400 setting random seed: 7754223779775 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.636-0400 setting random seed: 3851553322747 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.636-0400 setting random seed: 4397526606917 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.636-0400 setting random seed: 1242747404612 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.638-0400 m30998| 2015-07-09T14:16:25.637-0400 I SHARDING [conn434] ChunkManager: time to load chunks for db66.coll66: 0ms sequenceNumber: 82 version: 1|0||559eba79ca4787b9985d1eaa based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.639-0400 setting random seed: 4386459784582 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.640-0400 setting random seed: 6855639046989 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.642-0400 setting random seed: 8840026664547 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.643-0400 setting random seed: 4328220235183 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.644-0400 setting random seed: 2978396392427 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.645-0400 setting random seed: 2963398923166 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.645-0400 setting random seed: 455344701185 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.646-0400 setting random seed: 7908190833404 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.647-0400 setting random seed: 9229716872796 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.647-0400 setting random seed: 5845724842511 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.647-0400 setting random seed: 2725516404025 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.648-0400 setting random seed: 5222280300222 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.649-0400 setting random seed: 3229633802548 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.650-0400 setting random seed: 3056725556962 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.668-0400 m31100| 2015-07-09T14:16:25.667-0400 I SHARDING [conn132] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.690-0400 m31100| 2015-07-09T14:16:25.688-0400 I SHARDING [conn40] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.690-0400 m31100| 2015-07-09T14:16:25.689-0400 I SHARDING [conn132] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.690-0400 m31100| 2015-07-09T14:16:25.689-0400 I SHARDING [conn34] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.690-0400 m31100| 2015-07-09T14:16:25.689-0400 I SHARDING [conn37] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.697-0400 m31100| 2015-07-09T14:16:25.689-0400 I SHARDING [conn39] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.697-0400 m31100| 2015-07-09T14:16:25.690-0400 I SHARDING [conn38] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.697-0400 m31100| 2015-07-09T14:16:25.693-0400 I SHARDING [conn15] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.698-0400 m31100| 2015-07-09T14:16:25.697-0400 I SHARDING [conn35] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.698-0400 m31100| 2015-07-09T14:16:25.697-0400 I SHARDING [conn36] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.706-0400 m31100| 2015-07-09T14:16:25.705-0400 I SHARDING [conn32] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.712-0400 m31100| 2015-07-09T14:16:25.710-0400 I SHARDING [conn132] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 14.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.712-0400 m31100| 2015-07-09T14:16:25.710-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 14.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.713-0400 m31100| 2015-07-09T14:16:25.712-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 14.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.713-0400 m31100| 2015-07-09T14:16:25.712-0400 I SHARDING [conn40] could not acquire lock 'db66.coll66/bs-osx108-8:31100:1436464536:197041335' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.713-0400 m31100| 2015-07-09T14:16:25.712-0400 I SHARDING [conn40] distributed lock 'db66.coll66/bs-osx108-8:31100:1436464536:197041335' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.714-0400 m31100| 2015-07-09T14:16:25.712-0400 W SHARDING [conn40] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.715-0400 m31100| 2015-07-09T14:16:25.712-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 14.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.715-0400 m31100| 2015-07-09T14:16:25.713-0400 I SHARDING [conn132] distributed lock 'db66.coll66/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eba79792e00bb67274a9a [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.715-0400 m31100| 2015-07-09T14:16:25.714-0400 W SHARDING [conn34] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.716-0400 m31100| 2015-07-09T14:16:25.715-0400 W SHARDING [conn37] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.723-0400 m30999| 2015-07-09T14:16:25.719-0400 W SHARDING [conn432] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 14.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.723-0400 m30999| 2015-07-09T14:16:25.720-0400 W SHARDING [conn438] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 14.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.726-0400 m31100| 2015-07-09T14:16:25.717-0400 I SHARDING [conn39] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 14.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.727-0400 m31100| 2015-07-09T14:16:25.718-0400 I SHARDING [conn132] remotely refreshing metadata for db66.coll66 based on current shard version 1|0||559eba79ca4787b9985d1eaa, current metadata version is 1|0||559eba79ca4787b9985d1eaa [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.728-0400 m31100| 2015-07-09T14:16:25.719-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 14.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.729-0400 m31100| 2015-07-09T14:16:25.719-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 14.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.729-0400 m31100| 2015-07-09T14:16:25.722-0400 W SHARDING [conn15] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.729-0400 m31100| 2015-07-09T14:16:25.723-0400 I SHARDING [conn36] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 14.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.730-0400 m31100| 2015-07-09T14:16:25.724-0400 W SHARDING [conn39] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.730-0400 m30999| 2015-07-09T14:16:25.724-0400 W SHARDING [conn439] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 14.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.731-0400 m30999| 2015-07-09T14:16:25.724-0400 W SHARDING [conn433] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 14.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.731-0400 m31100| 2015-07-09T14:16:25.725-0400 W SHARDING [conn38] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.731-0400 m31100| 2015-07-09T14:16:25.726-0400 W SHARDING [conn36] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.735-0400 m30998| 2015-07-09T14:16:25.728-0400 W SHARDING [conn441] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 14.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.736-0400 m31100| 2015-07-09T14:16:25.731-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 14.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.736-0400 m31100| 2015-07-09T14:16:25.731-0400 I SHARDING [conn32] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 14.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.736-0400 m30999| 2015-07-09T14:16:25.732-0400 W SHARDING [conn437] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 14.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.737-0400 m30998| 2015-07-09T14:16:25.732-0400 W SHARDING [conn435] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 14.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.739-0400 m31100| 2015-07-09T14:16:25.734-0400 W SHARDING [conn35] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.739-0400 m31100| 2015-07-09T14:16:25.734-0400 W SHARDING [conn32] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.740-0400 m30998| 2015-07-09T14:16:25.737-0400 W SHARDING [conn439] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 14.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.740-0400 m30998| 2015-07-09T14:16:25.737-0400 W SHARDING [conn438] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 14.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.741-0400 m31100| 2015-07-09T14:16:25.740-0400 I SHARDING [conn132] metadata of collection db66.coll66 already up to date (shard version : 1|0||559eba79ca4787b9985d1eaa, took 4ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.742-0400 m31100| 2015-07-09T14:16:25.741-0400 I SHARDING [conn132] splitChunk accepted at version 1|0||559eba79ca4787b9985d1eaa [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.754-0400 m31100| 2015-07-09T14:16:25.753-0400 I SHARDING [conn132] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:25.753-0400-559eba79792e00bb67274a9c", server: "bs-osx108-8", clientAddr: "127.0.0.1:63181", time: new Date(1436465785753), what: "multi-split", ns: "db66.coll66", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 1, of: 3, chunk: { min: { tid: MinKey }, max: { tid: 1.0 }, lastmod: Timestamp 1000|1, lastmodEpoch: ObjectId('559eba79ca4787b9985d1eaa') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.760-0400 m31100| 2015-07-09T14:16:25.759-0400 I SHARDING [conn38] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.761-0400 m31100| 2015-07-09T14:16:25.759-0400 I SHARDING [conn15] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.762-0400 m31100| 2015-07-09T14:16:25.759-0400 I SHARDING [conn37] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.762-0400 m31100| 2015-07-09T14:16:25.759-0400 I SHARDING [conn34] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.762-0400 m31100| 2015-07-09T14:16:25.759-0400 I SHARDING [conn35] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.763-0400 m31100| 2015-07-09T14:16:25.760-0400 I SHARDING [conn40] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.763-0400 m31100| 2015-07-09T14:16:25.760-0400 I SHARDING [conn32] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.763-0400 m31100| 2015-07-09T14:16:25.760-0400 I SHARDING [conn36] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.763-0400 m31100| 2015-07-09T14:16:25.760-0400 I SHARDING [conn39] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.770-0400 m31100| 2015-07-09T14:16:25.769-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 12.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.776-0400 m31100| 2015-07-09T14:16:25.769-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 12.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.776-0400 m31100| 2015-07-09T14:16:25.770-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 12.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.777-0400 m31100| 2015-07-09T14:16:25.770-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 12.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.777-0400 m31100| 2015-07-09T14:16:25.771-0400 W SHARDING [conn15] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.777-0400 m30999| 2015-07-09T14:16:25.771-0400 W SHARDING [conn441] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 12.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.778-0400 m31100| 2015-07-09T14:16:25.771-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 12.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.778-0400 m31100| 2015-07-09T14:16:25.771-0400 W SHARDING [conn38] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.779-0400 m30999| 2015-07-09T14:16:25.771-0400 W SHARDING [conn432] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 12.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.779-0400 m31100| 2015-07-09T14:16:25.771-0400 I SHARDING [conn36] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 12.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.779-0400 m31100| 2015-07-09T14:16:25.771-0400 I SHARDING [conn39] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 12.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.780-0400 m30999| 2015-07-09T14:16:25.774-0400 W SHARDING [conn436] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 12.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.780-0400 m30999| 2015-07-09T14:16:25.774-0400 W SHARDING [conn439] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 12.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.780-0400 m30998| 2015-07-09T14:16:25.775-0400 W SHARDING [conn439] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 12.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.781-0400 m31100| 2015-07-09T14:16:25.771-0400 I SHARDING [conn32] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 12.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.781-0400 m31100| 2015-07-09T14:16:25.772-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 12.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.781-0400 m31100| 2015-07-09T14:16:25.772-0400 W SHARDING [conn37] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.781-0400 m31100| 2015-07-09T14:16:25.773-0400 W SHARDING [conn40] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.782-0400 m31100| 2015-07-09T14:16:25.773-0400 W SHARDING [conn34] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.782-0400 m31100| 2015-07-09T14:16:25.775-0400 W SHARDING [conn36] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.782-0400 m31100| 2015-07-09T14:16:25.775-0400 W SHARDING [conn39] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.782-0400 m31100| 2015-07-09T14:16:25.776-0400 W SHARDING [conn35] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.783-0400 m30999| 2015-07-09T14:16:25.776-0400 W SHARDING [conn438] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 12.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.783-0400 m30998| 2015-07-09T14:16:25.776-0400 W SHARDING [conn435] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 12.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.783-0400 m30998| 2015-07-09T14:16:25.776-0400 W SHARDING [conn434] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 12.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.784-0400 m31100| 2015-07-09T14:16:25.781-0400 W SHARDING [conn32] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.784-0400 m30998| 2015-07-09T14:16:25.782-0400 W SHARDING [conn436] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 12.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.785-0400 m31100| 2015-07-09T14:16:25.783-0400 I SHARDING [conn37] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.785-0400 m31100| 2015-07-09T14:16:25.784-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 10.0 }, { tid: 14.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.785-0400 m31100| 2015-07-09T14:16:25.784-0400 I SHARDING [conn34] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.785-0400 m31100| 2015-07-09T14:16:25.785-0400 I SHARDING [conn40] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.791-0400 m31100| 2015-07-09T14:16:25.786-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 10.0 }, { tid: 14.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.792-0400 m31100| 2015-07-09T14:16:25.787-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 10.0 }, { tid: 14.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.792-0400 m31100| 2015-07-09T14:16:25.787-0400 I SHARDING [conn38] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.793-0400 m31100| 2015-07-09T14:16:25.788-0400 I SHARDING [conn15] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.794-0400 m31100| 2015-07-09T14:16:25.788-0400 W SHARDING [conn37] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.794-0400 m31100| 2015-07-09T14:16:25.788-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 10.0 }, { tid: 14.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.795-0400 m30999| 2015-07-09T14:16:25.788-0400 W SHARDING [conn436] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 10.0 }, { tid: 14.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.795-0400 m31100| 2015-07-09T14:16:25.789-0400 W SHARDING [conn34] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.795-0400 m31100| 2015-07-09T14:16:25.789-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 10.0 }, { tid: 14.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.795-0400 m30999| 2015-07-09T14:16:25.789-0400 W SHARDING [conn441] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 10.0 }, { tid: 14.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.796-0400 m31100| 2015-07-09T14:16:25.789-0400 W SHARDING [conn40] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.797-0400 m30999| 2015-07-09T14:16:25.789-0400 W SHARDING [conn438] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 10.0 }, { tid: 14.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.797-0400 m31100| 2015-07-09T14:16:25.790-0400 W SHARDING [conn38] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.798-0400 m30999| 2015-07-09T14:16:25.790-0400 W SHARDING [conn435] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 10.0 }, { tid: 14.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.798-0400 m31100| 2015-07-09T14:16:25.792-0400 W SHARDING [conn15] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.798-0400 m30999| 2015-07-09T14:16:25.793-0400 W SHARDING [conn434] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 10.0 }, { tid: 14.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.798-0400 m31100| 2015-07-09T14:16:25.796-0400 I SHARDING [conn32] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.803-0400 m31100| 2015-07-09T14:16:25.798-0400 I SHARDING [conn32] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 10.0 }, { tid: 14.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.803-0400 m31100| 2015-07-09T14:16:25.799-0400 I SHARDING [conn35] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.803-0400 m31100| 2015-07-09T14:16:25.802-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 10.0 }, { tid: 14.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.803-0400 m31100| 2015-07-09T14:16:25.802-0400 I SHARDING [conn39] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.804-0400 m31100| 2015-07-09T14:16:25.803-0400 I SHARDING [conn39] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 10.0 }, { tid: 14.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.808-0400 m31100| 2015-07-09T14:16:25.806-0400 I SHARDING [conn36] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.811-0400 m31100| 2015-07-09T14:16:25.807-0400 W SHARDING [conn32] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.821-0400 m30998| 2015-07-09T14:16:25.807-0400 W SHARDING [conn437] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 10.0 }, { tid: 14.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.823-0400 m30998| 2015-07-09T14:16:25.808-0400 W SHARDING [conn440] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 10.0 }, { tid: 14.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.823-0400 m31100| 2015-07-09T14:16:25.807-0400 W SHARDING [conn39] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.823-0400 m31100| 2015-07-09T14:16:25.807-0400 I SHARDING [conn15] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.823-0400 m31100| 2015-07-09T14:16:25.808-0400 W SHARDING [conn35] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.824-0400 m30998| 2015-07-09T14:16:25.808-0400 W SHARDING [conn434] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 10.0 }, { tid: 14.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.824-0400 m31100| 2015-07-09T14:16:25.810-0400 I SHARDING [conn38] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.824-0400 m31100| 2015-07-09T14:16:25.811-0400 I SHARDING [conn40] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.824-0400 m31100| 2015-07-09T14:16:25.812-0400 I SHARDING [conn34] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.824-0400 m31100| 2015-07-09T14:16:25.813-0400 I SHARDING [conn37] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.824-0400 m31100| 2015-07-09T14:16:25.814-0400 I SHARDING [conn36] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 10.0 }, { tid: 14.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.825-0400 m31100| 2015-07-09T14:16:25.814-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 10.0 }, { tid: 14.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.825-0400 m30999| 2015-07-09T14:16:25.817-0400 W SHARDING [conn433] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 10.0 }, { tid: 14.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.825-0400 m31100| 2015-07-09T14:16:25.816-0400 W SHARDING [conn15] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.825-0400 m30998| 2015-07-09T14:16:25.817-0400 W SHARDING [conn438] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 10.0 }, { tid: 14.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.826-0400 m31100| 2015-07-09T14:16:25.816-0400 W SHARDING [conn36] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.826-0400 m31100| 2015-07-09T14:16:25.817-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 10.0 }, { tid: 14.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.826-0400 m31100| 2015-07-09T14:16:25.817-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 10.0 }, { tid: 14.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.826-0400 m31100| 2015-07-09T14:16:25.818-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 10.0 }, { tid: 14.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.827-0400 m31100| 2015-07-09T14:16:25.819-0400 W SHARDING [conn40] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.827-0400 m30999| 2015-07-09T14:16:25.819-0400 W SHARDING [conn434] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 10.0 }, { tid: 14.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.833-0400 m31100| 2015-07-09T14:16:25.819-0400 W SHARDING [conn34] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.834-0400 m30999| 2015-07-09T14:16:25.819-0400 W SHARDING [conn441] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 10.0 }, { tid: 14.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.834-0400 m31100| 2015-07-09T14:16:25.819-0400 I SHARDING [conn132] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:25.819-0400-559eba79792e00bb67274a9d", server: "bs-osx108-8", clientAddr: "127.0.0.1:63181", time: new Date(1436465785819), what: "multi-split", ns: "db66.coll66", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 2, of: 3, chunk: { min: { tid: 1.0 }, max: { tid: 14.0 }, lastmod: Timestamp 1000|2, lastmodEpoch: ObjectId('559eba79ca4787b9985d1eaa') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.834-0400 m31100| 2015-07-09T14:16:25.819-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 10.0 }, { tid: 14.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.835-0400 m31100| 2015-07-09T14:16:25.819-0400 W SHARDING [conn37] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.835-0400 m30999| 2015-07-09T14:16:25.820-0400 W SHARDING [conn439] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 10.0 }, { tid: 14.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.836-0400 m31100| 2015-07-09T14:16:25.823-0400 W SHARDING [conn38] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.836-0400 m30999| 2015-07-09T14:16:25.824-0400 W SHARDING [conn440] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 10.0 }, { tid: 14.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.839-0400 m31100| 2015-07-09T14:16:25.837-0400 I SHARDING [conn36] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.840-0400 m31100| 2015-07-09T14:16:25.838-0400 I SHARDING [conn38] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.843-0400 m31100| 2015-07-09T14:16:25.842-0400 I SHARDING [conn37] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.844-0400 m31100| 2015-07-09T14:16:25.843-0400 I SHARDING [conn34] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.847-0400 m31100| 2015-07-09T14:16:25.847-0400 I SHARDING [conn32] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.851-0400 m31100| 2015-07-09T14:16:25.848-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 10.0 }, { tid: 14.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.853-0400 m31100| 2015-07-09T14:16:25.849-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 10.0 }, { tid: 14.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.854-0400 m31100| 2015-07-09T14:16:25.850-0400 I SHARDING [conn39] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.854-0400 m31100| 2015-07-09T14:16:25.850-0400 I SHARDING [conn40] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.854-0400 m31100| 2015-07-09T14:16:25.851-0400 I SHARDING [conn35] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.854-0400 m31100| 2015-07-09T14:16:25.851-0400 I SHARDING [conn15] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.855-0400 m31100| 2015-07-09T14:16:25.852-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 10.0 }, { tid: 14.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.855-0400 m31100| 2015-07-09T14:16:25.852-0400 W SHARDING [conn34] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.855-0400 m31100| 2015-07-09T14:16:25.853-0400 W SHARDING [conn38] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.855-0400 m31100| 2015-07-09T14:16:25.854-0400 W SHARDING [conn37] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.856-0400 m31100| 2015-07-09T14:16:25.854-0400 I SHARDING [conn36] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 10.0 }, { tid: 14.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.857-0400 m30999| 2015-07-09T14:16:25.855-0400 W SHARDING [conn438] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 10.0 }, { tid: 14.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.857-0400 m31100| 2015-07-09T14:16:25.856-0400 W SHARDING [conn36] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.860-0400 m30998| 2015-07-09T14:16:25.858-0400 W SHARDING [conn436] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 10.0 }, { tid: 14.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.860-0400 m30999| 2015-07-09T14:16:25.856-0400 W SHARDING [conn434] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 10.0 }, { tid: 14.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.861-0400 m30999| 2015-07-09T14:16:25.856-0400 W SHARDING [conn441] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 10.0 }, { tid: 14.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.862-0400 m31100| 2015-07-09T14:16:25.861-0400 I SHARDING [conn32] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 10.0 }, { tid: 14.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.870-0400 m31100| 2015-07-09T14:16:25.863-0400 W SHARDING [conn32] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.870-0400 m31100| 2015-07-09T14:16:25.864-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 9.0 }, { tid: 13.0 }, { tid: 16.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.870-0400 m31100| 2015-07-09T14:16:25.864-0400 I SHARDING [conn39] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 9.0 }, { tid: 13.0 }, { tid: 16.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.873-0400 m31100| 2015-07-09T14:16:25.865-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 9.0 }, { tid: 13.0 }, { tid: 16.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.873-0400 m31100| 2015-07-09T14:16:25.866-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 9.0 }, { tid: 13.0 }, { tid: 16.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.873-0400 m30998| 2015-07-09T14:16:25.867-0400 W SHARDING [conn432] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 10.0 }, { tid: 14.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.874-0400 m31100| 2015-07-09T14:16:25.869-0400 W SHARDING [conn35] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.874-0400 m31100| 2015-07-09T14:16:25.870-0400 W SHARDING [conn15] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.874-0400 m31100| 2015-07-09T14:16:25.870-0400 W SHARDING [conn39] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.874-0400 m30998| 2015-07-09T14:16:25.870-0400 W SHARDING [conn438] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 9.0 }, { tid: 13.0 }, { tid: 16.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.875-0400 m30999| 2015-07-09T14:16:25.871-0400 W SHARDING [conn439] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 9.0 }, { tid: 13.0 }, { tid: 16.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.875-0400 m30998| 2015-07-09T14:16:25.871-0400 W SHARDING [conn441] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 9.0 }, { tid: 13.0 }, { tid: 16.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.875-0400 m31100| 2015-07-09T14:16:25.872-0400 W SHARDING [conn40] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.876-0400 m30999| 2015-07-09T14:16:25.872-0400 W SHARDING [conn432] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 9.0 }, { tid: 13.0 }, { tid: 16.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.876-0400 m31100| 2015-07-09T14:16:25.876-0400 I SHARDING [conn132] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:25.876-0400-559eba79792e00bb67274a9e", server: "bs-osx108-8", clientAddr: "127.0.0.1:63181", time: new Date(1436465785876), what: "multi-split", ns: "db66.coll66", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 3, of: 3, chunk: { min: { tid: 14.0 }, max: { tid: MaxKey }, lastmod: Timestamp 1000|3, lastmodEpoch: ObjectId('559eba79ca4787b9985d1eaa') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.895-0400 m31100| 2015-07-09T14:16:25.895-0400 I SHARDING [conn40] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.899-0400 m31100| 2015-07-09T14:16:25.895-0400 I SHARDING [conn35] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.900-0400 m31100| 2015-07-09T14:16:25.896-0400 I SHARDING [conn39] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.902-0400 m31100| 2015-07-09T14:16:25.896-0400 I SHARDING [conn32] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.902-0400 m31100| 2015-07-09T14:16:25.898-0400 I SHARDING [conn15] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.902-0400 m31100| 2015-07-09T14:16:25.899-0400 I SHARDING [conn37] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.903-0400 m31100| 2015-07-09T14:16:25.899-0400 I SHARDING [conn38] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.903-0400 m31100| 2015-07-09T14:16:25.902-0400 I SHARDING [conn34] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.906-0400 m31100| 2015-07-09T14:16:25.906-0400 I SHARDING [conn36] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.909-0400 m31100| 2015-07-09T14:16:25.908-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 6.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.911-0400 m31100| 2015-07-09T14:16:25.910-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 6.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.911-0400 m31100| 2015-07-09T14:16:25.910-0400 W SHARDING [conn40] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.912-0400 m31100| 2015-07-09T14:16:25.911-0400 I SHARDING [conn32] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 6.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.913-0400 m30999| 2015-07-09T14:16:25.913-0400 W SHARDING [conn435] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 6.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.914-0400 m31100| 2015-07-09T14:16:25.913-0400 W SHARDING [conn32] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.917-0400 m31100| 2015-07-09T14:16:25.914-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 6.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.918-0400 m31100| 2015-07-09T14:16:25.914-0400 W SHARDING [conn35] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.918-0400 m31100| 2015-07-09T14:16:25.914-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 6.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.918-0400 m30998| 2015-07-09T14:16:25.915-0400 W SHARDING [conn434] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 6.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.919-0400 m30998| 2015-07-09T14:16:25.916-0400 W SHARDING [conn437] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 6.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.922-0400 m31100| 2015-07-09T14:16:25.916-0400 I SHARDING [conn39] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 6.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.925-0400 m31100| 2015-07-09T14:16:25.919-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 6.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.925-0400 m31100| 2015-07-09T14:16:25.919-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 6.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.927-0400 m31100| 2015-07-09T14:16:25.920-0400 W SHARDING [conn37] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.927-0400 m31100| 2015-07-09T14:16:25.921-0400 I SHARDING [conn36] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 6.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.928-0400 m31100| 2015-07-09T14:16:25.922-0400 W SHARDING [conn15] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.928-0400 m31100| 2015-07-09T14:16:25.922-0400 W SHARDING [conn39] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.930-0400 m31100| 2015-07-09T14:16:25.923-0400 W SHARDING [conn38] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.930-0400 m31100| 2015-07-09T14:16:25.925-0400 W SHARDING [conn34] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.930-0400 m30999| 2015-07-09T14:16:25.926-0400 W SHARDING [conn432] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 6.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.930-0400 m31100| 2015-07-09T14:16:25.926-0400 W SHARDING [conn36] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.932-0400 m30998| 2015-07-09T14:16:25.928-0400 W SHARDING [conn438] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 6.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.932-0400 m30999| 2015-07-09T14:16:25.928-0400 W SHARDING [conn439] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 6.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.932-0400 m30999| 2015-07-09T14:16:25.931-0400 W SHARDING [conn440] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 6.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.933-0400 m30999| 2015-07-09T14:16:25.931-0400 W SHARDING [conn433] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 6.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.933-0400 m30998| 2015-07-09T14:16:25.931-0400 W SHARDING [conn441] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 6.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.934-0400 m31100| 2015-07-09T14:16:25.934-0400 I SHARDING [conn132] distributed lock 'db66.coll66/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.937-0400 m31100| 2015-07-09T14:16:25.937-0400 I COMMAND [conn132] command db66.coll66 command: splitChunk { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 1.0 }, { tid: 14.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 4, w: 2 } }, Database: { acquireCount: { r: 1, w: 2 } }, Collection: { acquireCount: { r: 1, W: 2 }, acquireWaitCount: { W: 2 }, timeAcquiringMicros: { W: 22924 } } } protocol:op_command 238ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.940-0400 m30998| 2015-07-09T14:16:25.938-0400 I SHARDING [conn433] ChunkManager: time to load chunks for db66.coll66: 0ms sequenceNumber: 83 version: 1|3||559eba79ca4787b9985d1eaa based on: 1|0||559eba79ca4787b9985d1eaa [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.941-0400 m30998| 2015-07-09T14:16:25.939-0400 I SHARDING [conn433] autosplitted db66.coll66 shard: ns: db66.coll66, shard: test-rs0, lastmod: 1|0||000000000000000000000000, min: { tid: MinKey }, max: { tid: MaxKey } into 3 (splitThreshold 921) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.953-0400 m31100| 2015-07-09T14:16:25.951-0400 I SHARDING [conn132] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.955-0400 m31100| 2015-07-09T14:16:25.955-0400 I SHARDING [conn36] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.956-0400 m31100| 2015-07-09T14:16:25.956-0400 I SHARDING [conn39] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.963-0400 m31100| 2015-07-09T14:16:25.963-0400 I SHARDING [conn38] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.964-0400 m31100| 2015-07-09T14:16:25.963-0400 I SHARDING [conn15] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.965-0400 m31100| 2015-07-09T14:16:25.963-0400 I SHARDING [conn35] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.965-0400 m31100| 2015-07-09T14:16:25.964-0400 I SHARDING [conn34] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.965-0400 m31100| 2015-07-09T14:16:25.965-0400 I SHARDING [conn37] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.967-0400 m31100| 2015-07-09T14:16:25.966-0400 I SHARDING [conn32] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.972-0400 m31100| 2015-07-09T14:16:25.972-0400 I SHARDING [conn37] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.974-0400 m31100| 2015-07-09T14:16:25.972-0400 I SHARDING [conn132] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 6.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.976-0400 m31100| 2015-07-09T14:16:25.975-0400 I SHARDING [conn39] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 5.0 }, { tid: 10.0 }, { tid: 13.0 }, { tid: 16.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.981-0400 m31100| 2015-07-09T14:16:25.975-0400 I SHARDING [conn36] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 5.0 }, { tid: 10.0 }, { tid: 13.0 }, { tid: 16.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.982-0400 m30998| 2015-07-09T14:16:25.980-0400 W SHARDING [conn438] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 5.0 }, { tid: 10.0 }, { tid: 13.0 }, { tid: 16.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.982-0400 m31100| 2015-07-09T14:16:25.977-0400 W SHARDING [conn39] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.984-0400 m31100| 2015-07-09T14:16:25.979-0400 I SHARDING [conn132] distributed lock 'db66.coll66/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eba79792e00bb67274a9f [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.984-0400 m31100| 2015-07-09T14:16:25.980-0400 I SHARDING [conn132] remotely refreshing metadata for db66.coll66 based on current shard version 1|3||559eba79ca4787b9985d1eaa, current metadata version is 1|3||559eba79ca4787b9985d1eaa [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.984-0400 m31100| 2015-07-09T14:16:25.980-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 5.0 }, { tid: 10.0 }, { tid: 13.0 }, { tid: 16.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.987-0400 m31100| 2015-07-09T14:16:25.980-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 5.0 }, { tid: 10.0 }, { tid: 13.0 }, { tid: 16.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.987-0400 m31100| 2015-07-09T14:16:25.982-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 5.0 }, { tid: 10.0 }, { tid: 13.0 }, { tid: 16.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.990-0400 m31100| 2015-07-09T14:16:25.982-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 5.0 }, { tid: 10.0 }, { tid: 13.0 }, { tid: 16.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.991-0400 m31100| 2015-07-09T14:16:25.982-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 5.0 }, { tid: 10.0 }, { tid: 13.0 }, { tid: 16.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.991-0400 m31100| 2015-07-09T14:16:25.983-0400 I SHARDING [conn32] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 5.0 }, { tid: 10.0 }, { tid: 13.0 }, { tid: 16.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.991-0400 m31100| 2015-07-09T14:16:25.986-0400 W SHARDING [conn15] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.992-0400 m31100| 2015-07-09T14:16:25.986-0400 W SHARDING [conn38] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.992-0400 m31100| 2015-07-09T14:16:25.987-0400 W SHARDING [conn36] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.992-0400 m31100| 2015-07-09T14:16:25.988-0400 W SHARDING [conn40] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.993-0400 m31100| 2015-07-09T14:16:25.988-0400 W SHARDING [conn32] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.993-0400 m31100| 2015-07-09T14:16:25.988-0400 W SHARDING [conn35] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.994-0400 m30999| 2015-07-09T14:16:25.989-0400 W SHARDING [conn432] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 5.0 }, { tid: 10.0 }, { tid: 13.0 }, { tid: 16.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.995-0400 m30999| 2015-07-09T14:16:25.989-0400 W SHARDING [conn435] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 5.0 }, { tid: 10.0 }, { tid: 13.0 }, { tid: 16.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.996-0400 m30999| 2015-07-09T14:16:25.989-0400 W SHARDING [conn433] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 5.0 }, { tid: 10.0 }, { tid: 13.0 }, { tid: 16.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.996-0400 m30998| 2015-07-09T14:16:25.989-0400 W SHARDING [conn441] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 5.0 }, { tid: 10.0 }, { tid: 13.0 }, { tid: 16.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.996-0400 m31100| 2015-07-09T14:16:25.990-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 5.0 }, { tid: 9.0 }, { tid: 13.0 }, { tid: 15.0 }, { tid: 17.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.997-0400 m31100| 2015-07-09T14:16:25.993-0400 W SHARDING [conn34] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:25.997-0400 m30998| 2015-07-09T14:16:25.994-0400 W SHARDING [conn436] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 5.0 }, { tid: 10.0 }, { tid: 13.0 }, { tid: 16.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.000-0400 m30998| 2015-07-09T14:16:25.999-0400 W SHARDING [conn434] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 5.0 }, { tid: 10.0 }, { tid: 13.0 }, { tid: 16.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.001-0400 m30999| 2015-07-09T14:16:25.999-0400 W SHARDING [conn441] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 5.0 }, { tid: 10.0 }, { tid: 13.0 }, { tid: 16.0 }, { tid: 17.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.002-0400 m30999| 2015-07-09T14:16:25.999-0400 W SHARDING [conn434] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 5.0 }, { tid: 9.0 }, { tid: 13.0 }, { tid: 15.0 }, { tid: 17.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.003-0400 m31100| 2015-07-09T14:16:25.996-0400 W SHARDING [conn37] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.003-0400 m31100| 2015-07-09T14:16:26.001-0400 I SHARDING [conn132] metadata of collection db66.coll66 already up to date (shard version : 1|3||559eba79ca4787b9985d1eaa, took 6ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.003-0400 m31100| 2015-07-09T14:16:26.001-0400 W SHARDING [conn132] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.018-0400 m31100| 2015-07-09T14:16:26.007-0400 I SHARDING [conn34] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.018-0400 m31100| 2015-07-09T14:16:26.008-0400 I SHARDING [conn37] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.018-0400 m31100| 2015-07-09T14:16:26.008-0400 I SHARDING [conn132] distributed lock 'db66.coll66/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.019-0400 m30998| 2015-07-09T14:16:26.009-0400 W SHARDING [conn435] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 6.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.019-0400 m31100| 2015-07-09T14:16:26.009-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 6.0 }, { tid: 10.0 }, { tid: 13.0 }, { tid: 15.0 }, { tid: 17.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.020-0400 m31100| 2015-07-09T14:16:26.011-0400 I SHARDING [conn40] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.020-0400 m31100| 2015-07-09T14:16:26.012-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 6.0 }, { tid: 10.0 }, { tid: 13.0 }, { tid: 15.0 }, { tid: 17.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.021-0400 m31100| 2015-07-09T14:16:26.015-0400 W SHARDING [conn37] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.021-0400 m31100| 2015-07-09T14:16:26.015-0400 I SHARDING [conn34] distributed lock 'db66.coll66/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eba7a792e00bb67274aa0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.021-0400 m31100| 2015-07-09T14:16:26.015-0400 I SHARDING [conn38] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.021-0400 m30999| 2015-07-09T14:16:26.016-0400 W SHARDING [conn436] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 6.0 }, { tid: 10.0 }, { tid: 13.0 }, { tid: 15.0 }, { tid: 17.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.022-0400 m31100| 2015-07-09T14:16:26.016-0400 I SHARDING [conn15] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.022-0400 m31100| 2015-07-09T14:16:26.016-0400 I SHARDING [conn34] remotely refreshing metadata for db66.coll66 based on current shard version 1|3||559eba79ca4787b9985d1eaa, current metadata version is 1|3||559eba79ca4787b9985d1eaa [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.026-0400 m31100| 2015-07-09T14:16:26.025-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 6.0 }, { tid: 9.0 }, { tid: 13.0 }, { tid: 15.0 }, { tid: 17.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.030-0400 m31100| 2015-07-09T14:16:26.029-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 9.0 }, { tid: 12.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.031-0400 m31100| 2015-07-09T14:16:26.030-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 9.0 }, { tid: 12.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.033-0400 m31100| 2015-07-09T14:16:26.032-0400 W SHARDING [conn40] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.034-0400 m31100| 2015-07-09T14:16:26.033-0400 W SHARDING [conn15] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.041-0400 m31100| 2015-07-09T14:16:26.034-0400 W SHARDING [conn38] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.042-0400 m30999| 2015-07-09T14:16:26.034-0400 W SHARDING [conn435] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 6.0 }, { tid: 9.0 }, { tid: 13.0 }, { tid: 15.0 }, { tid: 17.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.044-0400 m30999| 2015-07-09T14:16:26.034-0400 W SHARDING [conn441] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 9.0 }, { tid: 12.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.044-0400 m30999| 2015-07-09T14:16:26.037-0400 W SHARDING [conn432] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 9.0 }, { tid: 12.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.050-0400 m31100| 2015-07-09T14:16:26.042-0400 I SHARDING [conn34] metadata of collection db66.coll66 already up to date (shard version : 1|3||559eba79ca4787b9985d1eaa, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.050-0400 m31100| 2015-07-09T14:16:26.042-0400 W SHARDING [conn34] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.050-0400 m31100| 2015-07-09T14:16:26.047-0400 I SHARDING [conn38] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.050-0400 m31100| 2015-07-09T14:16:26.048-0400 I SHARDING [conn15] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.051-0400 m31100| 2015-07-09T14:16:26.049-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 9.0 }, { tid: 12.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.053-0400 m31100| 2015-07-09T14:16:26.051-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 9.0 }, { tid: 12.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.054-0400 m31100| 2015-07-09T14:16:26.053-0400 I SHARDING [conn40] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.056-0400 m31100| 2015-07-09T14:16:26.055-0400 I SHARDING [conn37] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.062-0400 m31100| 2015-07-09T14:16:26.057-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 9.0 }, { tid: 12.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 18.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.062-0400 m31100| 2015-07-09T14:16:26.058-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 9.0 }, { tid: 12.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 18.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.062-0400 m31100| 2015-07-09T14:16:26.059-0400 I SHARDING [conn34] distributed lock 'db66.coll66/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.062-0400 m30999| 2015-07-09T14:16:26.059-0400 W SHARDING [conn433] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 6.0 }, { tid: 10.0 }, { tid: 13.0 }, { tid: 15.0 }, { tid: 17.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.063-0400 m31100| 2015-07-09T14:16:26.060-0400 W SHARDING [conn40] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.063-0400 m31100| 2015-07-09T14:16:26.060-0400 W SHARDING [conn38] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.063-0400 m30999| 2015-07-09T14:16:26.060-0400 W SHARDING [conn441] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 9.0 }, { tid: 12.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 18.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.064-0400 m31100| 2015-07-09T14:16:26.060-0400 W SHARDING [conn15] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.064-0400 m30999| 2015-07-09T14:16:26.060-0400 W SHARDING [conn440] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 9.0 }, { tid: 12.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.064-0400 m30999| 2015-07-09T14:16:26.061-0400 W SHARDING [conn439] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 9.0 }, { tid: 12.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.074-0400 m31100| 2015-07-09T14:16:26.073-0400 I SHARDING [conn15] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.086-0400 m31100| 2015-07-09T14:16:26.076-0400 I SHARDING [conn37] distributed lock 'db66.coll66/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eba7a792e00bb67274aa1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.086-0400 m31100| 2015-07-09T14:16:26.076-0400 I SHARDING [conn37] remotely refreshing metadata for db66.coll66 based on current shard version 1|3||559eba79ca4787b9985d1eaa, current metadata version is 1|3||559eba79ca4787b9985d1eaa [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.087-0400 m31100| 2015-07-09T14:16:26.077-0400 I SHARDING [conn38] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.087-0400 m31100| 2015-07-09T14:16:26.078-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 9.0 }, { tid: 12.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 18.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.088-0400 m31100| 2015-07-09T14:16:26.079-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 9.0 }, { tid: 12.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 18.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.090-0400 m30998| 2015-07-09T14:16:26.082-0400 I NETWORK [conn440] end connection 127.0.0.1:64002 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.090-0400 m30998| 2015-07-09T14:16:26.082-0400 I NETWORK [conn432] end connection 127.0.0.1:63985 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.091-0400 m30999| 2015-07-09T14:16:26.084-0400 W SHARDING [conn439] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 9.0 }, { tid: 12.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 18.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.091-0400 m31100| 2015-07-09T14:16:26.084-0400 W SHARDING [conn38] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.092-0400 m30999| 2015-07-09T14:16:26.090-0400 W SHARDING [conn441] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 9.0 }, { tid: 12.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 18.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.093-0400 m31100| 2015-07-09T14:16:26.090-0400 W SHARDING [conn15] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.100-0400 m31100| 2015-07-09T14:16:26.099-0400 I SHARDING [conn37] metadata of collection db66.coll66 already up to date (shard version : 1|3||559eba79ca4787b9985d1eaa, took 2ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.101-0400 m31100| 2015-07-09T14:16:26.099-0400 W SHARDING [conn37] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.101-0400 m31100| 2015-07-09T14:16:26.100-0400 I SHARDING [conn34] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.101-0400 m31100| 2015-07-09T14:16:26.100-0400 I SHARDING [conn37] distributed lock 'db66.coll66/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.102-0400 m30999| 2015-07-09T14:16:26.101-0400 W SHARDING [conn435] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 9.0 }, { tid: 12.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 18.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.103-0400 m31100| 2015-07-09T14:16:26.102-0400 I SHARDING [conn40] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.111-0400 m30998| 2015-07-09T14:16:26.110-0400 I NETWORK [conn437] end connection 127.0.0.1:63997 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.113-0400 m31100| 2015-07-09T14:16:26.111-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 17.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.118-0400 m31100| 2015-07-09T14:16:26.117-0400 I SHARDING [conn34] distributed lock 'db66.coll66/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eba7a792e00bb67274aa2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.119-0400 m31100| 2015-07-09T14:16:26.118-0400 I SHARDING [conn34] remotely refreshing metadata for db66.coll66 based on current shard version 1|3||559eba79ca4787b9985d1eaa, current metadata version is 1|3||559eba79ca4787b9985d1eaa [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.123-0400 m31100| 2015-07-09T14:16:26.121-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 17.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.124-0400 m31100| 2015-07-09T14:16:26.124-0400 I SHARDING [conn37] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.127-0400 m31100| 2015-07-09T14:16:26.126-0400 W SHARDING [conn40] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.132-0400 m30999| 2015-07-09T14:16:26.129-0400 W SHARDING [conn436] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 17.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.132-0400 m31100| 2015-07-09T14:16:26.127-0400 I SHARDING [conn15] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.139-0400 m30999| 2015-07-09T14:16:26.138-0400 I NETWORK [conn434] end connection 127.0.0.1:63989 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.139-0400 m31100| 2015-07-09T14:16:26.136-0400 I SHARDING [conn34] metadata of collection db66.coll66 already up to date (shard version : 1|3||559eba79ca4787b9985d1eaa, took 7ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.139-0400 m31100| 2015-07-09T14:16:26.136-0400 W SHARDING [conn34] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.140-0400 m31100| 2015-07-09T14:16:26.137-0400 I SHARDING [conn40] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.143-0400 m31100| 2015-07-09T14:16:26.141-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 7.0 }, { tid: 9.0 }, { tid: 13.0 }, { tid: 15.0 }, { tid: 16.0 }, { tid: 18.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.143-0400 m31100| 2015-07-09T14:16:26.142-0400 I SHARDING [conn34] distributed lock 'db66.coll66/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.144-0400 m30999| 2015-07-09T14:16:26.143-0400 W SHARDING [conn437] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 8.0 }, { tid: 11.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 17.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.146-0400 m31100| 2015-07-09T14:16:26.146-0400 I SHARDING [conn37] distributed lock 'db66.coll66/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eba7a792e00bb67274aa3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.150-0400 m31100| 2015-07-09T14:16:26.149-0400 I SHARDING [conn37] remotely refreshing metadata for db66.coll66 based on current shard version 1|3||559eba79ca4787b9985d1eaa, current metadata version is 1|3||559eba79ca4787b9985d1eaa [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.156-0400 m31100| 2015-07-09T14:16:26.152-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 7.0 }, { tid: 9.0 }, { tid: 13.0 }, { tid: 15.0 }, { tid: 16.0 }, { tid: 18.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.156-0400 m31100| 2015-07-09T14:16:26.155-0400 W SHARDING [conn15] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.160-0400 m30999| 2015-07-09T14:16:26.158-0400 W SHARDING [conn438] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 7.0 }, { tid: 9.0 }, { tid: 13.0 }, { tid: 15.0 }, { tid: 16.0 }, { tid: 18.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.160-0400 m31100| 2015-07-09T14:16:26.159-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 7.0 }, { tid: 9.0 }, { tid: 13.0 }, { tid: 15.0 }, { tid: 16.0 }, { tid: 18.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.162-0400 m31100| 2015-07-09T14:16:26.161-0400 W SHARDING [conn40] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.162-0400 m30999| 2015-07-09T14:16:26.162-0400 I NETWORK [conn436] end connection 127.0.0.1:63992 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.171-0400 m30999| 2015-07-09T14:16:26.162-0400 W SHARDING [conn435] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 7.0 }, { tid: 9.0 }, { tid: 13.0 }, { tid: 15.0 }, { tid: 16.0 }, { tid: 18.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.171-0400 m30998| 2015-07-09T14:16:26.164-0400 I NETWORK [conn439] end connection 127.0.0.1:64001 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.173-0400 m31100| 2015-07-09T14:16:26.173-0400 I SHARDING [conn37] metadata of collection db66.coll66 already up to date (shard version : 1|3||559eba79ca4787b9985d1eaa, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.175-0400 m30998| 2015-07-09T14:16:26.175-0400 I NETWORK [conn434] end connection 127.0.0.1:63990 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.177-0400 m31100| 2015-07-09T14:16:26.177-0400 W SHARDING [conn37] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.180-0400 m31100| 2015-07-09T14:16:26.179-0400 I SHARDING [conn37] distributed lock 'db66.coll66/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.188-0400 m30999| 2015-07-09T14:16:26.183-0400 W SHARDING [conn441] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 7.0 }, { tid: 9.0 }, { tid: 13.0 }, { tid: 15.0 }, { tid: 16.0 }, { tid: 18.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.188-0400 m30998| 2015-07-09T14:16:26.186-0400 I NETWORK [conn441] end connection 127.0.0.1:64004 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.191-0400 m30999| 2015-07-09T14:16:26.186-0400 I NETWORK [conn440] end connection 127.0.0.1:63999 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.192-0400 m30999| 2015-07-09T14:16:26.187-0400 I NETWORK [conn438] end connection 127.0.0.1:63995 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.205-0400 m30999| 2015-07-09T14:16:26.198-0400 I NETWORK [conn433] end connection 127.0.0.1:63988 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.217-0400 m30999| 2015-07-09T14:16:26.217-0400 I NETWORK [conn437] end connection 127.0.0.1:63993 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.235-0400 m30998| 2015-07-09T14:16:26.231-0400 I NETWORK [conn435] end connection 127.0.0.1:63994 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.236-0400 m31100| 2015-07-09T14:16:26.235-0400 I SHARDING [conn37] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.243-0400 m31100| 2015-07-09T14:16:26.238-0400 I SHARDING [conn40] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.243-0400 m31100| 2015-07-09T14:16:26.241-0400 I SHARDING [conn15] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.243-0400 m31100| 2015-07-09T14:16:26.243-0400 I SHARDING [conn34] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.252-0400 m31100| 2015-07-09T14:16:26.251-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 9.0 }, { tid: 11.0 }, { tid: 13.0 }, { tid: 15.0 }, { tid: 16.0 }, { tid: 17.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.253-0400 m31100| 2015-07-09T14:16:26.252-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 9.0 }, { tid: 11.0 }, { tid: 13.0 }, { tid: 15.0 }, { tid: 16.0 }, { tid: 17.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.253-0400 m31100| 2015-07-09T14:16:26.252-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 9.0 }, { tid: 11.0 }, { tid: 13.0 }, { tid: 15.0 }, { tid: 16.0 }, { tid: 17.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.254-0400 m31100| 2015-07-09T14:16:26.253-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 9.0 }, { tid: 11.0 }, { tid: 13.0 }, { tid: 15.0 }, { tid: 16.0 }, { tid: 17.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.254-0400 m30998| 2015-07-09T14:16:26.253-0400 I NETWORK [conn438] end connection 127.0.0.1:64000 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.256-0400 m31100| 2015-07-09T14:16:26.255-0400 I SHARDING [conn15] could not acquire lock 'db66.coll66/bs-osx108-8:31100:1436464536:197041335' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.257-0400 m31100| 2015-07-09T14:16:26.255-0400 I SHARDING [conn15] distributed lock 'db66.coll66/bs-osx108-8:31100:1436464536:197041335' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.257-0400 m31100| 2015-07-09T14:16:26.255-0400 W SHARDING [conn15] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.257-0400 m30999| 2015-07-09T14:16:26.255-0400 W SHARDING [conn435] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 9.0 }, { tid: 11.0 }, { tid: 13.0 }, { tid: 15.0 }, { tid: 16.0 }, { tid: 17.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.257-0400 m31100| 2015-07-09T14:16:26.256-0400 I SHARDING [conn40] could not acquire lock 'db66.coll66/bs-osx108-8:31100:1436464536:197041335' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.258-0400 m31100| 2015-07-09T14:16:26.256-0400 I SHARDING [conn40] distributed lock 'db66.coll66/bs-osx108-8:31100:1436464536:197041335' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.258-0400 m31100| 2015-07-09T14:16:26.256-0400 W SHARDING [conn40] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.258-0400 m30999| 2015-07-09T14:16:26.256-0400 W SHARDING [conn432] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 9.0 }, { tid: 11.0 }, { tid: 13.0 }, { tid: 15.0 }, { tid: 16.0 }, { tid: 17.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.259-0400 m31100| 2015-07-09T14:16:26.259-0400 I SHARDING [conn37] could not acquire lock 'db66.coll66/bs-osx108-8:31100:1436464536:197041335' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.259-0400 m31100| 2015-07-09T14:16:26.259-0400 I SHARDING [conn37] distributed lock 'db66.coll66/bs-osx108-8:31100:1436464536:197041335' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.260-0400 m31100| 2015-07-09T14:16:26.259-0400 W SHARDING [conn37] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.261-0400 m31100| 2015-07-09T14:16:26.260-0400 I SHARDING [conn34] distributed lock 'db66.coll66/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eba7a792e00bb67274aa4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.261-0400 m31100| 2015-07-09T14:16:26.261-0400 I SHARDING [conn34] remotely refreshing metadata for db66.coll66 based on current shard version 1|3||559eba79ca4787b9985d1eaa, current metadata version is 1|3||559eba79ca4787b9985d1eaa [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.265-0400 m30999| 2015-07-09T14:16:26.262-0400 W SHARDING [conn441] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 9.0 }, { tid: 11.0 }, { tid: 13.0 }, { tid: 15.0 }, { tid: 16.0 }, { tid: 17.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.271-0400 m30998| 2015-07-09T14:16:26.266-0400 I NETWORK [conn436] end connection 127.0.0.1:63996 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.272-0400 m31100| 2015-07-09T14:16:26.272-0400 I SHARDING [conn34] metadata of collection db66.coll66 already up to date (shard version : 1|3||559eba79ca4787b9985d1eaa, took 7ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.273-0400 m31100| 2015-07-09T14:16:26.272-0400 W SHARDING [conn34] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.274-0400 m31100| 2015-07-09T14:16:26.273-0400 I SHARDING [conn34] distributed lock 'db66.coll66/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.287-0400 m30999| 2015-07-09T14:16:26.278-0400 W SHARDING [conn439] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 9.0 }, { tid: 11.0 }, { tid: 13.0 }, { tid: 15.0 }, { tid: 16.0 }, { tid: 17.0 }, { tid: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.287-0400 m30999| 2015-07-09T14:16:26.282-0400 I NETWORK [conn435] end connection 127.0.0.1:63991 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.304-0400 m31100| 2015-07-09T14:16:26.303-0400 I SHARDING [conn34] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.304-0400 m30998| 2015-07-09T14:16:26.304-0400 I NETWORK [conn433] end connection 127.0.0.1:63987 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.310-0400 m31100| 2015-07-09T14:16:26.304-0400 I SHARDING [conn37] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.311-0400 m31100| 2015-07-09T14:16:26.305-0400 I SHARDING [conn34] request split points lookup for chunk db66.coll66 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.311-0400 m31100| 2015-07-09T14:16:26.306-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 9.0 }, { tid: 11.0 }, { tid: 12.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 17.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.322-0400 m31100| 2015-07-09T14:16:26.307-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 9.0 }, { tid: 11.0 }, { tid: 12.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 17.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.322-0400 m31100| 2015-07-09T14:16:26.308-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 9.0 }, { tid: 11.0 }, { tid: 12.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 17.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.323-0400 m30999| 2015-07-09T14:16:26.309-0400 W SHARDING [conn439] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 9.0 }, { tid: 11.0 }, { tid: 12.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 17.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.323-0400 m31100| 2015-07-09T14:16:26.309-0400 W SHARDING [conn34] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.323-0400 m31100| 2015-07-09T14:16:26.310-0400 I SHARDING [conn37] distributed lock 'db66.coll66/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eba7a792e00bb67274aa8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.323-0400 m31100| 2015-07-09T14:16:26.310-0400 I SHARDING [conn37] remotely refreshing metadata for db66.coll66 based on current shard version 1|3||559eba79ca4787b9985d1eaa, current metadata version is 1|3||559eba79ca4787b9985d1eaa [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.324-0400 m31100| 2015-07-09T14:16:26.311-0400 W SHARDING [conn40] could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db66.coll66 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.324-0400 m30999| 2015-07-09T14:16:26.311-0400 W SHARDING [conn441] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 9.0 }, { tid: 11.0 }, { tid: 12.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 17.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db66.coll66 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.324-0400 m31100| 2015-07-09T14:16:26.316-0400 I SHARDING [conn37] metadata of collection db66.coll66 already up to date (shard version : 1|3||559eba79ca4787b9985d1eaa, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.324-0400 m31100| 2015-07-09T14:16:26.316-0400 W SHARDING [conn37] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.325-0400 m30999| 2015-07-09T14:16:26.316-0400 I NETWORK [conn441] end connection 127.0.0.1:64003 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.325-0400 m30999| 2015-07-09T14:16:26.321-0400 I NETWORK [conn439] end connection 127.0.0.1:63998 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.325-0400 m31100| 2015-07-09T14:16:26.322-0400 I SHARDING [conn37] distributed lock 'db66.coll66/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.325-0400 m30999| 2015-07-09T14:16:26.322-0400 W SHARDING [conn432] splitChunk failed - cmd: { splitChunk: "db66.coll66", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 9.0 }, { tid: 11.0 }, { tid: 12.0 }, { tid: 14.0 }, { tid: 16.0 }, { tid: 17.0 }, { tid: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba79ca4787b9985d1eaa') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.341-0400 m30999| 2015-07-09T14:16:26.341-0400 I NETWORK [conn432] end connection 127.0.0.1:63986 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.365-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.365-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.366-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.366-0400 jstests/concurrency/fsm_workloads/findAndModify_upsert.js: Workload completed in 994 ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.366-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.366-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.366-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.367-0400 m30999| 2015-07-09T14:16:26.366-0400 I COMMAND [conn1] DROP: db66.coll66 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.367-0400 m30999| 2015-07-09T14:16:26.366-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:26.366-0400-559eba7aca4787b9985d1eac", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465786366), what: "dropCollection.start", ns: "db66.coll66", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.425-0400 m30999| 2015-07-09T14:16:26.424-0400 I SHARDING [conn1] distributed lock 'db66.coll66/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba7aca4787b9985d1ead [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.425-0400 m31100| 2015-07-09T14:16:26.425-0400 I COMMAND [conn37] CMD: drop db66.coll66 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.439-0400 m31200| 2015-07-09T14:16:26.438-0400 I COMMAND [conn63] CMD: drop db66.coll66 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.440-0400 m31102| 2015-07-09T14:16:26.440-0400 I COMMAND [repl writer worker 1] CMD: drop db66.coll66 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.444-0400 m31101| 2015-07-09T14:16:26.442-0400 I COMMAND [repl writer worker 10] CMD: drop db66.coll66 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.494-0400 m31100| 2015-07-09T14:16:26.493-0400 I SHARDING [conn37] remotely refreshing metadata for db66.coll66 with requested shard version 0|0||000000000000000000000000, current shard version is 1|3||559eba79ca4787b9985d1eaa, current metadata version is 1|3||559eba79ca4787b9985d1eaa [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.496-0400 m31100| 2015-07-09T14:16:26.495-0400 W SHARDING [conn37] no chunks found when reloading db66.coll66, previous version was 0|0||559eba79ca4787b9985d1eaa, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.496-0400 m31100| 2015-07-09T14:16:26.495-0400 I SHARDING [conn37] dropping metadata for db66.coll66 at shard version 1|3||559eba79ca4787b9985d1eaa, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.498-0400 m30999| 2015-07-09T14:16:26.498-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:26.498-0400-559eba7aca4787b9985d1eae", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465786498), what: "dropCollection", ns: "db66.coll66", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.553-0400 m30999| 2015-07-09T14:16:26.552-0400 I SHARDING [conn1] distributed lock 'db66.coll66/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.609-0400 m30999| 2015-07-09T14:16:26.609-0400 I COMMAND [conn1] DROP DATABASE: db66 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.610-0400 m30999| 2015-07-09T14:16:26.609-0400 I SHARDING [conn1] DBConfig::dropDatabase: db66 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.610-0400 m30999| 2015-07-09T14:16:26.609-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:26.609-0400-559eba7aca4787b9985d1eaf", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465786609), what: "dropDatabase.start", ns: "db66", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.716-0400 m30999| 2015-07-09T14:16:26.715-0400 I SHARDING [conn1] DBConfig::dropDatabase: db66 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.717-0400 m31100| 2015-07-09T14:16:26.716-0400 I COMMAND [conn157] dropDatabase db66 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.717-0400 m31100| 2015-07-09T14:16:26.716-0400 I COMMAND [conn157] dropDatabase db66 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.717-0400 m30999| 2015-07-09T14:16:26.717-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:26.717-0400-559eba7aca4787b9985d1eb0", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465786717), what: "dropDatabase", ns: "db66", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.718-0400 m31102| 2015-07-09T14:16:26.717-0400 I COMMAND [repl writer worker 11] dropDatabase db66 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.718-0400 m31102| 2015-07-09T14:16:26.717-0400 I COMMAND [repl writer worker 11] dropDatabase db66 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.719-0400 m31101| 2015-07-09T14:16:26.719-0400 I COMMAND [repl writer worker 2] dropDatabase db66 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.719-0400 m31101| 2015-07-09T14:16:26.719-0400 I COMMAND [repl writer worker 2] dropDatabase db66 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.810-0400 m31100| 2015-07-09T14:16:26.810-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.813-0400 m31101| 2015-07-09T14:16:26.812-0400 I COMMAND [repl writer worker 3] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.821-0400 m31102| 2015-07-09T14:16:26.820-0400 I COMMAND [repl writer worker 12] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.856-0400 m31200| 2015-07-09T14:16:26.855-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.857-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.857-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.857-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.858-0400 jstests/concurrency/fsm_workloads/count_indexed.js [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.858-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.858-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.858-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.859-0400 m31201| 2015-07-09T14:16:26.859-0400 I COMMAND [repl writer worker 13] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.859-0400 m31202| 2015-07-09T14:16:26.859-0400 I COMMAND [repl writer worker 8] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.864-0400 m30999| 2015-07-09T14:16:26.864-0400 I SHARDING [conn1] distributed lock 'db67/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba7aca4787b9985d1eb1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.868-0400 m30999| 2015-07-09T14:16:26.867-0400 I SHARDING [conn1] Placing [db67] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.868-0400 m30999| 2015-07-09T14:16:26.867-0400 I SHARDING [conn1] Enabling sharding for database [db67] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.922-0400 m30999| 2015-07-09T14:16:26.922-0400 I SHARDING [conn1] distributed lock 'db67/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.950-0400 m31100| 2015-07-09T14:16:26.950-0400 I INDEX [conn144] build index on: db67.coll67 properties: { v: 1, key: { tid: 1.0, i: 1.0 }, name: "tid_1_i_1", ns: "db67.coll67" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.951-0400 m31100| 2015-07-09T14:16:26.950-0400 I INDEX [conn144] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.953-0400 m31100| 2015-07-09T14:16:26.952-0400 I INDEX [conn144] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.954-0400 m30999| 2015-07-09T14:16:26.954-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db67.coll67", key: { tid: 1.0, i: 1.0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.959-0400 m30999| 2015-07-09T14:16:26.957-0400 I SHARDING [conn1] distributed lock 'db67.coll67/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba7aca4787b9985d1eb2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.959-0400 m30999| 2015-07-09T14:16:26.958-0400 I SHARDING [conn1] enable sharding on: db67.coll67 with shard key: { tid: 1.0, i: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.960-0400 m30999| 2015-07-09T14:16:26.958-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:26.958-0400-559eba7aca4787b9985d1eb3", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465786958), what: "shardCollection.start", ns: "db67.coll67", details: { shardKey: { tid: 1.0, i: 1.0 }, collection: "db67.coll67", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 1 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.972-0400 m31101| 2015-07-09T14:16:26.971-0400 I INDEX [repl writer worker 11] build index on: db67.coll67 properties: { v: 1, key: { tid: 1.0, i: 1.0 }, name: "tid_1_i_1", ns: "db67.coll67" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.972-0400 m31101| 2015-07-09T14:16:26.971-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.978-0400 m31102| 2015-07-09T14:16:26.978-0400 I INDEX [repl writer worker 7] build index on: db67.coll67 properties: { v: 1, key: { tid: 1.0, i: 1.0 }, name: "tid_1_i_1", ns: "db67.coll67" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.978-0400 m31102| 2015-07-09T14:16:26.978-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.980-0400 m31101| 2015-07-09T14:16:26.979-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:26.983-0400 m31102| 2015-07-09T14:16:26.983-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.013-0400 m30999| 2015-07-09T14:16:27.012-0400 I SHARDING [conn1] going to create 1 chunk(s) for: db67.coll67 using new epoch 559eba7bca4787b9985d1eb4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.068-0400 m30999| 2015-07-09T14:16:27.067-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db67.coll67: 0ms sequenceNumber: 290 version: 1|0||559eba7bca4787b9985d1eb4 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.594-0400 m30999| 2015-07-09T14:16:27.123-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db67.coll67: 0ms sequenceNumber: 291 version: 1|0||559eba7bca4787b9985d1eb4 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.595-0400 m31100| 2015-07-09T14:16:27.125-0400 I SHARDING [conn175] remotely refreshing metadata for db67.coll67 with requested shard version 1|0||559eba7bca4787b9985d1eb4, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.596-0400 m31100| 2015-07-09T14:16:27.126-0400 I SHARDING [conn175] collection db67.coll67 was previously unsharded, new metadata loaded with shard version 1|0||559eba7bca4787b9985d1eb4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.597-0400 m31100| 2015-07-09T14:16:27.126-0400 I SHARDING [conn175] collection version was loaded at version 1|0||559eba7bca4787b9985d1eb4, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.597-0400 m30999| 2015-07-09T14:16:27.127-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:27.126-0400-559eba7bca4787b9985d1eb5", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465787126), what: "shardCollection", ns: "db67.coll67", details: { version: "1|0||559eba7bca4787b9985d1eb4" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.597-0400 m30999| 2015-07-09T14:16:27.182-0400 I SHARDING [conn1] distributed lock 'db67.coll67/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.597-0400 Using 10 threads (requested 10) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.597-0400 m30998| 2015-07-09T14:16:27.285-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64005 #442 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.598-0400 m30998| 2015-07-09T14:16:27.312-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64006 #443 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.598-0400 m30999| 2015-07-09T14:16:27.326-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64007 #442 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.599-0400 m30998| 2015-07-09T14:16:27.328-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64008 #444 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.599-0400 m30999| 2015-07-09T14:16:27.329-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64009 #443 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.599-0400 m30999| 2015-07-09T14:16:27.338-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64010 #444 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.600-0400 m30998| 2015-07-09T14:16:27.347-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64011 #445 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.600-0400 m30999| 2015-07-09T14:16:27.347-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64012 #445 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.600-0400 m30998| 2015-07-09T14:16:27.351-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64013 #446 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.600-0400 m30999| 2015-07-09T14:16:27.351-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64014 #446 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.600-0400 setting random seed: 4013694869354 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.600-0400 setting random seed: 8558604260906 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.601-0400 setting random seed: 4461880037561 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.601-0400 setting random seed: 8169700647704 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.601-0400 setting random seed: 5205095321871 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.601-0400 setting random seed: 5239127366803 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.601-0400 setting random seed: 8158849901519 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.601-0400 setting random seed: 817826208658 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.601-0400 m30998| 2015-07-09T14:16:27.388-0400 I SHARDING [conn443] ChunkManager: time to load chunks for db67.coll67: 0ms sequenceNumber: 84 version: 1|0||559eba7bca4787b9985d1eb4 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.601-0400 setting random seed: 2037115427665 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.602-0400 setting random seed: 5790358693338 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.603-0400 m31100| 2015-07-09T14:16:27.522-0400 I WRITE [conn67] insert db67.count_fsm_3 query: { _id: ObjectId('559eba7beac5440bf8d3fb53'), i: 0.0, tid: 3.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 3, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 45738, W: 41336 } }, Collection: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 2 } }, oplog: { acquireCount: { w: 2 } } } 102ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.603-0400 m31100| 2015-07-09T14:16:27.535-0400 I WRITE [conn30] insert db67.count_fsm_4 query: { _id: ObjectId('559eba7beac5440bf8d3fd14'), i: 0.0, tid: 4.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 3, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 43954, W: 56660 } }, Collection: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 2 } }, oplog: { acquireCount: { w: 2 } } } 112ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.604-0400 m31100| 2015-07-09T14:16:27.553-0400 I WRITE [conn29] insert db67.count_fsm_0 query: { _id: ObjectId('559eba7beac5440bf8d4023f'), i: 0.0, tid: 0.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 3, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 26285, W: 69008 } }, Collection: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 2 } }, oplog: { acquireCount: { w: 2 } } } 113ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.605-0400 m31100| 2015-07-09T14:16:27.573-0400 I WRITE [conn24] insert db67.count_fsm_9 query: { _id: ObjectId('559eba7beac5440bf8d403ad'), i: 0.0, tid: 9.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 3, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 23505, W: 86927 } }, Collection: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 2 } }, oplog: { acquireCount: { w: 2 } } } 130ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.605-0400 m31100| 2015-07-09T14:16:27.588-0400 I WRITE [conn146] insert db67.count_fsm_7 query: { _id: ObjectId('559eba7beac5440bf8d401b2'), i: 0.0, tid: 7.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 3, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 22341, W: 107708 } }, Collection: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 2 } }, oplog: { acquireCount: { w: 2 } } } 144ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.616-0400 m31100| 2015-07-09T14:16:27.616-0400 I WRITE [conn22] insert db67.count_fsm_2 query: { _id: ObjectId('559eba7beac5440bf8d3fd10'), i: 0.0, tid: 2.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 4, w: 4 } }, Database: { acquireCount: { w: 3, W: 1 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 33484, W: 121642 } }, Collection: { acquireCount: { w: 1, W: 1 } }, Metadata: { acquireCount: { w: 2 } }, oplog: { acquireCount: { w: 2 } } } 182ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.618-0400 m31100| 2015-07-09T14:16:27.617-0400 I WRITE [conn68] insert db67.count_fsm_6 query: { _id: ObjectId('559eba7beac5440bf8d3f882'), i: 1.0, tid: 6.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 6, w: 6 } }, Database: { acquireCount: { w: 5, W: 1 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 150944, W: 14144 } }, Collection: { acquireCount: { w: 2, W: 1 } }, Metadata: { acquireCount: { w: 3 } }, oplog: { acquireCount: { w: 3 } } } 109ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.618-0400 m31100| 2015-07-09T14:16:27.617-0400 I WRITE [conn31] insert db67.count_fsm_5 query: { _id: ObjectId('559eba7beac5440bf8d3f3d7'), i: 5.0, tid: 5.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 18, w: 18 } }, Database: { acquireCount: { w: 17, W: 1 }, acquireWaitCount: { w: 2 }, timeAcquiringMicros: { w: 175557 } }, Collection: { acquireCount: { w: 4, W: 1 } }, Metadata: { acquireCount: { w: 13 } }, oplog: { acquireCount: { w: 13 } } } 149ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.619-0400 m31100| 2015-07-09T14:16:27.618-0400 I WRITE [conn144] insert db67.count_fsm_8 query: { _id: ObjectId('559eba7beac5440bf8d3f70b'), i: 1.0, tid: 8.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 52, w: 52 } }, Database: { acquireCount: { w: 51, W: 1 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 161422, W: 31176 } }, Collection: { acquireCount: { w: 3, W: 1 } }, Metadata: { acquireCount: { w: 48 } }, oplog: { acquireCount: { w: 48 } } } 138ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.619-0400 m31100| 2015-07-09T14:16:27.619-0400 I WRITE [conn147] insert db67.count_fsm_1 query: { _id: ObjectId('559eba7beac5440bf8d3f538'), i: 4.0, tid: 1.0 } ninserted:1 keyUpdates:0 writeConflicts:0 numYields:0 locks:{ Global: { acquireCount: { r: 10, w: 10 } }, Database: { acquireCount: { w: 9, W: 1 }, acquireWaitCount: { w: 3, W: 1 }, timeAcquiringMicros: { w: 192623, W: 19978 } }, Collection: { acquireCount: { w: 3, W: 1 } }, Metadata: { acquireCount: { w: 6 } }, oplog: { acquireCount: { w: 6 } } } 149ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.923-0400 m31100| 2015-07-09T14:16:27.923-0400 I COMMAND [conn67] command db67.$cmd command: insert { insert: "count_fsm_3", documents: 330, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 349, w: 349 } }, Database: { acquireCount: { w: 348, W: 1 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 139681, W: 41336 } }, Collection: { acquireCount: { w: 17, W: 1 } }, Metadata: { acquireCount: { w: 331 } }, oplog: { acquireCount: { w: 331 } } } protocol:op_command 503ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.931-0400 m31100| 2015-07-09T14:16:27.931-0400 I COMMAND [conn31] command db67.$cmd command: insert { insert: "count_fsm_5", documents: 360, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 381, w: 381 } }, Database: { acquireCount: { w: 380, W: 1 }, acquireWaitCount: { w: 2 }, timeAcquiringMicros: { w: 175557 } }, Collection: { acquireCount: { w: 19, W: 1 } }, Metadata: { acquireCount: { w: 361 } }, oplog: { acquireCount: { w: 361 } } } protocol:op_command 542ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.938-0400 m31100| 2015-07-09T14:16:27.938-0400 I INDEX [conn177] build index on: db67.count_fsm_3 properties: { v: 1, key: { tid: 1.0, i: 1.0 }, name: "tid_1_i_1", ns: "db67.count_fsm_3" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.938-0400 m31100| 2015-07-09T14:16:27.938-0400 I INDEX [conn177] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.946-0400 m31100| 2015-07-09T14:16:27.946-0400 I INDEX [conn177] build index done. scanned 330 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.960-0400 m31100| 2015-07-09T14:16:27.960-0400 I INDEX [conn185] build index on: db67.count_fsm_5 properties: { v: 1, key: { tid: 1.0, i: 1.0 }, name: "tid_1_i_1", ns: "db67.count_fsm_5" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.960-0400 m31100| 2015-07-09T14:16:27.960-0400 I INDEX [conn185] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.961-0400 m31102| 2015-07-09T14:16:27.960-0400 I INDEX [repl writer worker 0] build index on: db67.count_fsm_3 properties: { v: 1, key: { tid: 1.0, i: 1.0 }, name: "tid_1_i_1", ns: "db67.count_fsm_3" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.961-0400 m31102| 2015-07-09T14:16:27.960-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.969-0400 m31100| 2015-07-09T14:16:27.969-0400 I INDEX [conn185] build index done. scanned 360 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.969-0400 m31101| 2015-07-09T14:16:27.969-0400 I INDEX [repl writer worker 1] build index on: db67.count_fsm_3 properties: { v: 1, key: { tid: 1.0, i: 1.0 }, name: "tid_1_i_1", ns: "db67.count_fsm_3" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.969-0400 m31101| 2015-07-09T14:16:27.969-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.975-0400 m31102| 2015-07-09T14:16:27.975-0400 I INDEX [repl writer worker 0] build index done. scanned 330 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.979-0400 m31101| 2015-07-09T14:16:27.978-0400 I INDEX [repl writer worker 1] build index done. scanned 330 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.981-0400 m31100| 2015-07-09T14:16:27.980-0400 I COMMAND [conn144] command db67.$cmd command: insert { insert: "count_fsm_8", documents: 420, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 443, w: 443 } }, Database: { acquireCount: { w: 442, W: 1 }, acquireWaitCount: { w: 4, W: 1 }, timeAcquiringMicros: { w: 192964, W: 31176 } }, Collection: { acquireCount: { w: 21, W: 1 } }, Metadata: { acquireCount: { w: 421 } }, oplog: { acquireCount: { w: 421 } } } protocol:op_command 586ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.997-0400 m31101| 2015-07-09T14:16:27.997-0400 I INDEX [repl writer worker 12] build index on: db67.count_fsm_5 properties: { v: 1, key: { tid: 1.0, i: 1.0 }, name: "tid_1_i_1", ns: "db67.count_fsm_5" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.997-0400 m31101| 2015-07-09T14:16:27.997-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.998-0400 m31102| 2015-07-09T14:16:27.998-0400 I INDEX [repl writer worker 13] build index on: db67.count_fsm_5 properties: { v: 1, key: { tid: 1.0, i: 1.0 }, name: "tid_1_i_1", ns: "db67.count_fsm_5" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:27.998-0400 m31102| 2015-07-09T14:16:27.998-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.004-0400 m31100| 2015-07-09T14:16:28.004-0400 I INDEX [conn175] build index on: db67.count_fsm_8 properties: { v: 1, key: { tid: 1.0, i: 1.0 }, name: "tid_1_i_1", ns: "db67.count_fsm_8" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.005-0400 m31100| 2015-07-09T14:16:28.004-0400 I INDEX [conn175] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.006-0400 m31102| 2015-07-09T14:16:28.006-0400 I INDEX [repl writer worker 13] build index done. scanned 360 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.009-0400 m31101| 2015-07-09T14:16:28.009-0400 I INDEX [repl writer worker 12] build index done. scanned 360 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.012-0400 m31100| 2015-07-09T14:16:28.012-0400 I INDEX [conn175] build index done. scanned 420 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.045-0400 m30998| 2015-07-09T14:16:28.044-0400 I NETWORK [conn446] end connection 127.0.0.1:64013 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.047-0400 m30998| 2015-07-09T14:16:28.047-0400 I NETWORK [conn443] end connection 127.0.0.1:64006 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.055-0400 m30999| 2015-07-09T14:16:28.055-0400 I NETWORK [conn445] end connection 127.0.0.1:64012 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.070-0400 m31100| 2015-07-09T14:16:28.069-0400 I COMMAND [conn147] command db67.$cmd command: insert { insert: "count_fsm_1", documents: 425, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 452, w: 452 } }, Database: { acquireCount: { w: 451, W: 1 }, acquireWaitCount: { w: 6, W: 1 }, timeAcquiringMicros: { w: 235609, W: 19978 } }, Collection: { acquireCount: { w: 25, W: 1 } }, Metadata: { acquireCount: { w: 426 } }, oplog: { acquireCount: { w: 426 } } } protocol:op_command 680ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.079-0400 m31102| 2015-07-09T14:16:28.079-0400 I INDEX [repl writer worker 13] build index on: db67.count_fsm_8 properties: { v: 1, key: { tid: 1.0, i: 1.0 }, name: "tid_1_i_1", ns: "db67.count_fsm_8" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.080-0400 m31102| 2015-07-09T14:16:28.079-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.088-0400 m31101| 2015-07-09T14:16:28.087-0400 I INDEX [repl writer worker 10] build index on: db67.count_fsm_8 properties: { v: 1, key: { tid: 1.0, i: 1.0 }, name: "tid_1_i_1", ns: "db67.count_fsm_8" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.088-0400 m31101| 2015-07-09T14:16:28.087-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.090-0400 m31102| 2015-07-09T14:16:28.089-0400 I INDEX [repl writer worker 13] build index done. scanned 420 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.094-0400 m31100| 2015-07-09T14:16:28.093-0400 I INDEX [conn185] build index on: db67.count_fsm_1 properties: { v: 1, key: { tid: 1.0, i: 1.0 }, name: "tid_1_i_1", ns: "db67.count_fsm_1" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.094-0400 m31100| 2015-07-09T14:16:28.093-0400 I INDEX [conn185] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.096-0400 m31101| 2015-07-09T14:16:28.096-0400 I INDEX [repl writer worker 10] build index done. scanned 420 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.100-0400 m31100| 2015-07-09T14:16:28.100-0400 I INDEX [conn185] build index done. scanned 425 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.123-0400 m31102| 2015-07-09T14:16:28.123-0400 I INDEX [repl writer worker 2] build index on: db67.count_fsm_1 properties: { v: 1, key: { tid: 1.0, i: 1.0 }, name: "tid_1_i_1", ns: "db67.count_fsm_1" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.123-0400 m31102| 2015-07-09T14:16:28.123-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.126-0400 m31100| 2015-07-09T14:16:28.124-0400 I COMMAND [conn30] command db67.$cmd command: insert { insert: "count_fsm_4", documents: 486, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 514, w: 514 } }, Database: { acquireCount: { w: 513, W: 1 }, acquireWaitCount: { w: 6, W: 1 }, timeAcquiringMicros: { w: 194167, W: 56660 } }, Collection: { acquireCount: { w: 26, W: 1 } }, Metadata: { acquireCount: { w: 487 } }, oplog: { acquireCount: { w: 487 } } } protocol:op_command 702ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.129-0400 m31102| 2015-07-09T14:16:28.129-0400 I INDEX [repl writer worker 2] build index done. scanned 425 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.134-0400 m31101| 2015-07-09T14:16:28.134-0400 I INDEX [repl writer worker 9] build index on: db67.count_fsm_1 properties: { v: 1, key: { tid: 1.0, i: 1.0 }, name: "tid_1_i_1", ns: "db67.count_fsm_1" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.134-0400 m31101| 2015-07-09T14:16:28.134-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.142-0400 m31101| 2015-07-09T14:16:28.142-0400 I INDEX [repl writer worker 9] build index done. scanned 425 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.143-0400 m31100| 2015-07-09T14:16:28.143-0400 I INDEX [conn175] build index on: db67.count_fsm_4 properties: { v: 1, key: { tid: 1.0, i: 1.0 }, name: "tid_1_i_1", ns: "db67.count_fsm_4" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.143-0400 m31100| 2015-07-09T14:16:28.143-0400 I INDEX [conn175] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.153-0400 m31100| 2015-07-09T14:16:28.152-0400 I INDEX [conn175] build index done. scanned 486 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.163-0400 m31102| 2015-07-09T14:16:28.162-0400 I INDEX [repl writer worker 13] build index on: db67.count_fsm_4 properties: { v: 1, key: { tid: 1.0, i: 1.0 }, name: "tid_1_i_1", ns: "db67.count_fsm_4" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.163-0400 m31102| 2015-07-09T14:16:28.162-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.164-0400 m30998| 2015-07-09T14:16:28.163-0400 I NETWORK [conn442] end connection 127.0.0.1:64005 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.175-0400 m31102| 2015-07-09T14:16:28.175-0400 I INDEX [repl writer worker 13] build index done. scanned 486 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.185-0400 m31101| 2015-07-09T14:16:28.185-0400 I INDEX [repl writer worker 1] build index on: db67.count_fsm_4 properties: { v: 1, key: { tid: 1.0, i: 1.0 }, name: "tid_1_i_1", ns: "db67.count_fsm_4" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.186-0400 m31101| 2015-07-09T14:16:28.185-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.192-0400 m31101| 2015-07-09T14:16:28.192-0400 I INDEX [repl writer worker 1] build index done. scanned 486 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.200-0400 m30999| 2015-07-09T14:16:28.199-0400 I NETWORK [conn446] end connection 127.0.0.1:64014 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.256-0400 m31100| 2015-07-09T14:16:28.254-0400 I COMMAND [conn29] command db67.$cmd command: insert { insert: "count_fsm_0", documents: 672, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 706, w: 706 } }, Database: { acquireCount: { w: 705, W: 1 }, acquireWaitCount: { w: 7, W: 1 }, timeAcquiringMicros: { w: 176930, W: 69008 } }, Collection: { acquireCount: { w: 32, W: 1 } }, Metadata: { acquireCount: { w: 673 } }, oplog: { acquireCount: { w: 673 } } } protocol:op_command 815ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.272-0400 m31100| 2015-07-09T14:16:28.272-0400 I COMMAND [conn24] command db67.$cmd command: insert { insert: "count_fsm_9", documents: 672, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 707, w: 707 } }, Database: { acquireCount: { w: 706, W: 1 }, acquireWaitCount: { w: 7, W: 1 }, timeAcquiringMicros: { w: 151035, W: 86927 } }, Collection: { acquireCount: { w: 33, W: 1 } }, Metadata: { acquireCount: { w: 673 } }, oplog: { acquireCount: { w: 673 } } } protocol:op_command 829ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.283-0400 m31100| 2015-07-09T14:16:28.282-0400 I INDEX [conn175] build index on: db67.count_fsm_0 properties: { v: 1, key: { tid: 1.0, i: 1.0 }, name: "tid_1_i_1", ns: "db67.count_fsm_0" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.283-0400 m31100| 2015-07-09T14:16:28.282-0400 I INDEX [conn175] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.292-0400 m31100| 2015-07-09T14:16:28.292-0400 I INDEX [conn175] build index done. scanned 672 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.296-0400 m31101| 2015-07-09T14:16:28.296-0400 I INDEX [repl writer worker 2] build index on: db67.count_fsm_0 properties: { v: 1, key: { tid: 1.0, i: 1.0 }, name: "tid_1_i_1", ns: "db67.count_fsm_0" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.297-0400 m31101| 2015-07-09T14:16:28.296-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.304-0400 m31100| 2015-07-09T14:16:28.303-0400 I INDEX [conn185] build index on: db67.count_fsm_9 properties: { v: 1, key: { tid: 1.0, i: 1.0 }, name: "tid_1_i_1", ns: "db67.count_fsm_9" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.304-0400 m31100| 2015-07-09T14:16:28.303-0400 I INDEX [conn185] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.306-0400 m31102| 2015-07-09T14:16:28.304-0400 I INDEX [repl writer worker 13] build index on: db67.count_fsm_0 properties: { v: 1, key: { tid: 1.0, i: 1.0 }, name: "tid_1_i_1", ns: "db67.count_fsm_0" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.306-0400 m31102| 2015-07-09T14:16:28.304-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.313-0400 m31101| 2015-07-09T14:16:28.313-0400 I INDEX [repl writer worker 2] build index done. scanned 672 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.315-0400 m31100| 2015-07-09T14:16:28.314-0400 I INDEX [conn185] build index done. scanned 672 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.317-0400 m31102| 2015-07-09T14:16:28.317-0400 I INDEX [repl writer worker 13] build index done. scanned 672 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.325-0400 m31100| 2015-07-09T14:16:28.323-0400 I COMMAND [conn146] command db67.$cmd command: insert { insert: "count_fsm_7", documents: 700, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 737, w: 737 } }, Database: { acquireCount: { w: 736, W: 1 }, acquireWaitCount: { w: 8, W: 1 }, timeAcquiringMicros: { w: 175099, W: 107708 } }, Collection: { acquireCount: { w: 35, W: 1 } }, Metadata: { acquireCount: { w: 701 } }, oplog: { acquireCount: { w: 701 } } } protocol:op_command 879ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.325-0400 m31101| 2015-07-09T14:16:28.324-0400 I INDEX [repl writer worker 3] build index on: db67.count_fsm_9 properties: { v: 1, key: { tid: 1.0, i: 1.0 }, name: "tid_1_i_1", ns: "db67.count_fsm_9" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.329-0400 m31101| 2015-07-09T14:16:28.324-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.330-0400 m31102| 2015-07-09T14:16:28.325-0400 I INDEX [repl writer worker 9] build index on: db67.count_fsm_9 properties: { v: 1, key: { tid: 1.0, i: 1.0 }, name: "tid_1_i_1", ns: "db67.count_fsm_9" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.330-0400 m31102| 2015-07-09T14:16:28.325-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.337-0400 m31102| 2015-07-09T14:16:28.337-0400 I INDEX [repl writer worker 9] build index done. scanned 672 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.339-0400 m31101| 2015-07-09T14:16:28.338-0400 I INDEX [repl writer worker 3] build index done. scanned 672 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.340-0400 m31100| 2015-07-09T14:16:28.339-0400 I INDEX [conn177] build index on: db67.count_fsm_7 properties: { v: 1, key: { tid: 1.0, i: 1.0 }, name: "tid_1_i_1", ns: "db67.count_fsm_7" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.340-0400 m31100| 2015-07-09T14:16:28.339-0400 I INDEX [conn177] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.351-0400 m31100| 2015-07-09T14:16:28.350-0400 I INDEX [conn177] build index done. scanned 700 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.358-0400 m31100| 2015-07-09T14:16:28.355-0400 I COMMAND [conn22] command db67.$cmd command: insert { insert: "count_fsm_2", documents: 700, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 738, w: 738 } }, Database: { acquireCount: { w: 737, W: 1 }, acquireWaitCount: { w: 8, W: 1 }, timeAcquiringMicros: { w: 180243, W: 121642 } }, Collection: { acquireCount: { w: 36, W: 1 } }, Metadata: { acquireCount: { w: 701 } }, oplog: { acquireCount: { w: 701 } } } protocol:op_command 922ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.359-0400 m31101| 2015-07-09T14:16:28.358-0400 I INDEX [repl writer worker 3] build index on: db67.count_fsm_7 properties: { v: 1, key: { tid: 1.0, i: 1.0 }, name: "tid_1_i_1", ns: "db67.count_fsm_7" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.359-0400 m31101| 2015-07-09T14:16:28.359-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.362-0400 m31102| 2015-07-09T14:16:28.362-0400 I INDEX [repl writer worker 9] build index on: db67.count_fsm_7 properties: { v: 1, key: { tid: 1.0, i: 1.0 }, name: "tid_1_i_1", ns: "db67.count_fsm_7" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.362-0400 m31102| 2015-07-09T14:16:28.362-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.369-0400 m31101| 2015-07-09T14:16:28.368-0400 I INDEX [repl writer worker 3] build index done. scanned 700 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.374-0400 m31102| 2015-07-09T14:16:28.373-0400 I INDEX [repl writer worker 9] build index done. scanned 700 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.378-0400 m31100| 2015-07-09T14:16:28.377-0400 I INDEX [conn179] build index on: db67.count_fsm_2 properties: { v: 1, key: { tid: 1.0, i: 1.0 }, name: "tid_1_i_1", ns: "db67.count_fsm_2" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.378-0400 m31100| 2015-07-09T14:16:28.377-0400 I INDEX [conn179] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.386-0400 m31100| 2015-07-09T14:16:28.385-0400 I INDEX [conn179] build index done. scanned 700 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.398-0400 m31101| 2015-07-09T14:16:28.398-0400 I INDEX [repl writer worker 5] build index on: db67.count_fsm_2 properties: { v: 1, key: { tid: 1.0, i: 1.0 }, name: "tid_1_i_1", ns: "db67.count_fsm_2" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.398-0400 m31101| 2015-07-09T14:16:28.398-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.401-0400 m31102| 2015-07-09T14:16:28.400-0400 I INDEX [repl writer worker 9] build index on: db67.count_fsm_2 properties: { v: 1, key: { tid: 1.0, i: 1.0 }, name: "tid_1_i_1", ns: "db67.count_fsm_2" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.401-0400 m31102| 2015-07-09T14:16:28.400-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.408-0400 m31100| 2015-07-09T14:16:28.405-0400 I COMMAND [conn68] command db67.$cmd command: insert { insert: "count_fsm_6", documents: 837, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 878, w: 878 } }, Database: { acquireCount: { w: 877, W: 1 }, acquireWaitCount: { w: 10, W: 1 }, timeAcquiringMicros: { w: 315426, W: 14144 } }, Collection: { acquireCount: { w: 39, W: 1 } }, Metadata: { acquireCount: { w: 838 } }, oplog: { acquireCount: { w: 838 } } } protocol:op_command 981ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.409-0400 m31101| 2015-07-09T14:16:28.409-0400 I INDEX [repl writer worker 5] build index done. scanned 700 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.414-0400 m31102| 2015-07-09T14:16:28.414-0400 I INDEX [repl writer worker 9] build index done. scanned 700 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.414-0400 m31100| 2015-07-09T14:16:28.414-0400 I INDEX [conn191] build index on: db67.count_fsm_6 properties: { v: 1, key: { tid: 1.0, i: 1.0 }, name: "tid_1_i_1", ns: "db67.count_fsm_6" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.414-0400 m31100| 2015-07-09T14:16:28.414-0400 I INDEX [conn191] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.422-0400 m31100| 2015-07-09T14:16:28.422-0400 I INDEX [conn191] build index done. scanned 837 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.434-0400 m31101| 2015-07-09T14:16:28.433-0400 I INDEX [repl writer worker 11] build index on: db67.count_fsm_6 properties: { v: 1, key: { tid: 1.0, i: 1.0 }, name: "tid_1_i_1", ns: "db67.count_fsm_6" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.435-0400 m31101| 2015-07-09T14:16:28.433-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.438-0400 m31102| 2015-07-09T14:16:28.436-0400 I INDEX [repl writer worker 10] build index on: db67.count_fsm_6 properties: { v: 1, key: { tid: 1.0, i: 1.0 }, name: "tid_1_i_1", ns: "db67.count_fsm_6" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.438-0400 m31102| 2015-07-09T14:16:28.436-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.445-0400 m31101| 2015-07-09T14:16:28.445-0400 I INDEX [repl writer worker 11] build index done. scanned 837 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.448-0400 m30998| 2015-07-09T14:16:28.448-0400 I NETWORK [conn444] end connection 127.0.0.1:64008 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.453-0400 m31102| 2015-07-09T14:16:28.451-0400 I INDEX [repl writer worker 10] build index done. scanned 837 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.454-0400 m30999| 2015-07-09T14:16:28.453-0400 I NETWORK [conn444] end connection 127.0.0.1:64010 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.460-0400 m30999| 2015-07-09T14:16:28.459-0400 I NETWORK [conn442] end connection 127.0.0.1:64007 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.460-0400 m30998| 2015-07-09T14:16:28.459-0400 I NETWORK [conn445] end connection 127.0.0.1:64011 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.487-0400 m30999| 2015-07-09T14:16:28.486-0400 I NETWORK [conn443] end connection 127.0.0.1:64009 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.507-0400 m30999| 2015-07-09T14:16:28.507-0400 I COMMAND [conn1] DROP: db67.count_fsm_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.507-0400 m30999| 2015-07-09T14:16:28.507-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.507-0400 m31100| 2015-07-09T14:16:28.507-0400 I COMMAND [conn179] CMD: drop db67.count_fsm_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.514-0400 m31102| 2015-07-09T14:16:28.514-0400 I COMMAND [repl writer worker 8] CMD: drop db67.count_fsm_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.514-0400 m30999| 2015-07-09T14:16:28.514-0400 I COMMAND [conn1] DROP: db67.count_fsm_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.515-0400 m31100| 2015-07-09T14:16:28.514-0400 I COMMAND [conn179] CMD: drop db67.count_fsm_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.515-0400 m31101| 2015-07-09T14:16:28.514-0400 I COMMAND [repl writer worker 9] CMD: drop db67.count_fsm_0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.515-0400 m30999| 2015-07-09T14:16:28.514-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.516-0400 m30999| 2015-07-09T14:16:28.516-0400 I COMMAND [conn1] DROP: db67.count_fsm_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.517-0400 m30999| 2015-07-09T14:16:28.516-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.517-0400 m31100| 2015-07-09T14:16:28.516-0400 I COMMAND [conn179] CMD: drop db67.count_fsm_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.519-0400 m30999| 2015-07-09T14:16:28.519-0400 I COMMAND [conn1] DROP: db67.count_fsm_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.519-0400 m30999| 2015-07-09T14:16:28.519-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.520-0400 m31100| 2015-07-09T14:16:28.519-0400 I COMMAND [conn179] CMD: drop db67.count_fsm_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.521-0400 m31102| 2015-07-09T14:16:28.521-0400 I COMMAND [repl writer worker 4] CMD: drop db67.count_fsm_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.522-0400 m31101| 2015-07-09T14:16:28.522-0400 I COMMAND [repl writer worker 10] CMD: drop db67.count_fsm_1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.522-0400 m31102| 2015-07-09T14:16:28.522-0400 I COMMAND [repl writer worker 12] CMD: drop db67.count_fsm_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.525-0400 m30999| 2015-07-09T14:16:28.524-0400 I COMMAND [conn1] DROP: db67.count_fsm_4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.525-0400 m30999| 2015-07-09T14:16:28.524-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.525-0400 m31100| 2015-07-09T14:16:28.525-0400 I COMMAND [conn179] CMD: drop db67.count_fsm_4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.526-0400 m31102| 2015-07-09T14:16:28.526-0400 I COMMAND [repl writer worker 6] CMD: drop db67.count_fsm_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.527-0400 m31101| 2015-07-09T14:16:28.526-0400 I COMMAND [repl writer worker 5] CMD: drop db67.count_fsm_2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.527-0400 m30999| 2015-07-09T14:16:28.527-0400 I COMMAND [conn1] DROP: db67.count_fsm_5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.527-0400 m30999| 2015-07-09T14:16:28.527-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.528-0400 m31100| 2015-07-09T14:16:28.527-0400 I COMMAND [conn179] CMD: drop db67.count_fsm_5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.529-0400 m31102| 2015-07-09T14:16:28.529-0400 I COMMAND [repl writer worker 13] CMD: drop db67.count_fsm_4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.530-0400 m30999| 2015-07-09T14:16:28.529-0400 I COMMAND [conn1] DROP: db67.count_fsm_6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.530-0400 m30999| 2015-07-09T14:16:28.529-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.530-0400 m31100| 2015-07-09T14:16:28.530-0400 I COMMAND [conn179] CMD: drop db67.count_fsm_6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.530-0400 m31101| 2015-07-09T14:16:28.530-0400 I COMMAND [repl writer worker 15] CMD: drop db67.count_fsm_3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.532-0400 m31102| 2015-07-09T14:16:28.531-0400 I COMMAND [repl writer worker 15] CMD: drop db67.count_fsm_5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.532-0400 m30999| 2015-07-09T14:16:28.531-0400 I COMMAND [conn1] DROP: db67.count_fsm_7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.532-0400 m30999| 2015-07-09T14:16:28.531-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.532-0400 m31100| 2015-07-09T14:16:28.532-0400 I COMMAND [conn179] CMD: drop db67.count_fsm_7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.532-0400 m31101| 2015-07-09T14:16:28.532-0400 I COMMAND [repl writer worker 12] CMD: drop db67.count_fsm_4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.534-0400 m31102| 2015-07-09T14:16:28.534-0400 I COMMAND [repl writer worker 14] CMD: drop db67.count_fsm_6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.534-0400 m31101| 2015-07-09T14:16:28.534-0400 I COMMAND [repl writer worker 0] CMD: drop db67.count_fsm_5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.535-0400 m30999| 2015-07-09T14:16:28.534-0400 I COMMAND [conn1] DROP: db67.count_fsm_8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.535-0400 m30999| 2015-07-09T14:16:28.534-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.535-0400 m31100| 2015-07-09T14:16:28.534-0400 I COMMAND [conn179] CMD: drop db67.count_fsm_8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.536-0400 m31101| 2015-07-09T14:16:28.535-0400 I COMMAND [repl writer worker 7] CMD: drop db67.count_fsm_6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.536-0400 m30999| 2015-07-09T14:16:28.536-0400 I COMMAND [conn1] DROP: db67.count_fsm_9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.537-0400 m30999| 2015-07-09T14:16:28.536-0400 I COMMAND [conn1] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.537-0400 m31100| 2015-07-09T14:16:28.536-0400 I COMMAND [conn179] CMD: drop db67.count_fsm_9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.537-0400 m31102| 2015-07-09T14:16:28.536-0400 I COMMAND [repl writer worker 11] CMD: drop db67.count_fsm_7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.538-0400 m31101| 2015-07-09T14:16:28.538-0400 I COMMAND [repl writer worker 2] CMD: drop db67.count_fsm_7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.539-0400 m31102| 2015-07-09T14:16:28.539-0400 I COMMAND [repl writer worker 7] CMD: drop db67.count_fsm_8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.539-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.539-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.539-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.540-0400 jstests/concurrency/fsm_workloads/count_indexed.js: Workload completed in 1323 ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.540-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.541-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.541-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.541-0400 m30999| 2015-07-09T14:16:28.539-0400 I COMMAND [conn1] DROP: db67.coll67 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.541-0400 m30999| 2015-07-09T14:16:28.539-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:28.539-0400-559eba7cca4787b9985d1eb6", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465788539), what: "dropCollection.start", ns: "db67.coll67", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.542-0400 m31101| 2015-07-09T14:16:28.540-0400 I COMMAND [repl writer worker 8] CMD: drop db67.count_fsm_8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.542-0400 m31102| 2015-07-09T14:16:28.541-0400 I COMMAND [repl writer worker 3] CMD: drop db67.count_fsm_9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.542-0400 m31101| 2015-07-09T14:16:28.542-0400 I COMMAND [repl writer worker 4] CMD: drop db67.count_fsm_9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.606-0400 m30999| 2015-07-09T14:16:28.605-0400 I SHARDING [conn1] distributed lock 'db67.coll67/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba7cca4787b9985d1eb7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.606-0400 m31100| 2015-07-09T14:16:28.606-0400 I COMMAND [conn37] CMD: drop db67.coll67 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.610-0400 m31200| 2015-07-09T14:16:28.609-0400 I COMMAND [conn63] CMD: drop db67.coll67 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.610-0400 m31102| 2015-07-09T14:16:28.610-0400 I COMMAND [repl writer worker 2] CMD: drop db67.coll67 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.610-0400 m31101| 2015-07-09T14:16:28.610-0400 I COMMAND [repl writer worker 6] CMD: drop db67.coll67 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.663-0400 m31100| 2015-07-09T14:16:28.662-0400 I SHARDING [conn37] remotely refreshing metadata for db67.coll67 with requested shard version 0|0||000000000000000000000000, current shard version is 1|0||559eba7bca4787b9985d1eb4, current metadata version is 1|0||559eba7bca4787b9985d1eb4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.665-0400 m31100| 2015-07-09T14:16:28.664-0400 W SHARDING [conn37] no chunks found when reloading db67.coll67, previous version was 0|0||559eba7bca4787b9985d1eb4, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.665-0400 m31100| 2015-07-09T14:16:28.664-0400 I SHARDING [conn37] dropping metadata for db67.coll67 at shard version 1|0||559eba7bca4787b9985d1eb4, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.667-0400 m30999| 2015-07-09T14:16:28.666-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:28.666-0400-559eba7cca4787b9985d1eb8", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465788666), what: "dropCollection", ns: "db67.coll67", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.721-0400 m30999| 2015-07-09T14:16:28.721-0400 I SHARDING [conn1] distributed lock 'db67.coll67/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.777-0400 m30999| 2015-07-09T14:16:28.777-0400 I COMMAND [conn1] DROP DATABASE: db67 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.777-0400 m30999| 2015-07-09T14:16:28.777-0400 I SHARDING [conn1] DBConfig::dropDatabase: db67 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.778-0400 m30999| 2015-07-09T14:16:28.777-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:28.777-0400-559eba7cca4787b9985d1eb9", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465788777), what: "dropDatabase.start", ns: "db67", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.884-0400 m30999| 2015-07-09T14:16:28.883-0400 I SHARDING [conn1] DBConfig::dropDatabase: db67 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.884-0400 m31100| 2015-07-09T14:16:28.884-0400 I COMMAND [conn157] dropDatabase db67 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.884-0400 m31100| 2015-07-09T14:16:28.884-0400 I COMMAND [conn157] dropDatabase db67 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.885-0400 m30999| 2015-07-09T14:16:28.885-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:28.885-0400-559eba7cca4787b9985d1eba", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465788885), what: "dropDatabase", ns: "db67", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.885-0400 m31101| 2015-07-09T14:16:28.885-0400 I COMMAND [repl writer worker 1] dropDatabase db67 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.886-0400 m31101| 2015-07-09T14:16:28.885-0400 I COMMAND [repl writer worker 1] dropDatabase db67 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.886-0400 m31102| 2015-07-09T14:16:28.885-0400 I COMMAND [repl writer worker 1] dropDatabase db67 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.886-0400 m31102| 2015-07-09T14:16:28.885-0400 I COMMAND [repl writer worker 1] dropDatabase db67 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.975-0400 m31100| 2015-07-09T14:16:28.974-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.979-0400 m31102| 2015-07-09T14:16:28.978-0400 I COMMAND [repl writer worker 9] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:28.979-0400 m31101| 2015-07-09T14:16:28.978-0400 I COMMAND [repl writer worker 13] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.016-0400 m31200| 2015-07-09T14:16:29.016-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.018-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.019-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.019-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.019-0400 jstests/concurrency/fsm_workloads/map_reduce_drop.js [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.019-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.019-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.019-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.020-0400 m31201| 2015-07-09T14:16:29.019-0400 I COMMAND [repl writer worker 14] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.020-0400 m31202| 2015-07-09T14:16:29.019-0400 I COMMAND [repl writer worker 14] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.029-0400 m30999| 2015-07-09T14:16:29.029-0400 I SHARDING [conn1] distributed lock 'db68/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba7dca4787b9985d1ebb [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.033-0400 m30999| 2015-07-09T14:16:29.032-0400 I SHARDING [conn1] Placing [db68] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.033-0400 m30999| 2015-07-09T14:16:29.032-0400 I SHARDING [conn1] Enabling sharding for database [db68] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.087-0400 m30999| 2015-07-09T14:16:29.087-0400 I SHARDING [conn1] distributed lock 'db68/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.111-0400 m31100| 2015-07-09T14:16:29.111-0400 I INDEX [conn68] build index on: db68.coll68 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db68.coll68" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.112-0400 m31100| 2015-07-09T14:16:29.111-0400 I INDEX [conn68] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.125-0400 m31100| 2015-07-09T14:16:29.124-0400 I INDEX [conn68] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.126-0400 m30999| 2015-07-09T14:16:29.126-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db68.coll68", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.130-0400 m30999| 2015-07-09T14:16:29.129-0400 I SHARDING [conn1] distributed lock 'db68.coll68/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba7dca4787b9985d1ebc [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.132-0400 m30999| 2015-07-09T14:16:29.131-0400 I SHARDING [conn1] enable sharding on: db68.coll68 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.133-0400 m30999| 2015-07-09T14:16:29.131-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:29.131-0400-559eba7dca4787b9985d1ebd", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465789131), what: "shardCollection.start", ns: "db68.coll68", details: { shardKey: { _id: "hashed" }, collection: "db68.coll68", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.133-0400 m31102| 2015-07-09T14:16:29.133-0400 I INDEX [repl writer worker 8] build index on: db68.coll68 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db68.coll68" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.134-0400 m31102| 2015-07-09T14:16:29.133-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.141-0400 m31102| 2015-07-09T14:16:29.140-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.143-0400 m31101| 2015-07-09T14:16:29.142-0400 I INDEX [repl writer worker 9] build index on: db68.coll68 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db68.coll68" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.143-0400 m31101| 2015-07-09T14:16:29.142-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.148-0400 m31101| 2015-07-09T14:16:29.147-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.187-0400 m30999| 2015-07-09T14:16:29.187-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db68.coll68 using new epoch 559eba7dca4787b9985d1ebe [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.248-0400 m30999| 2015-07-09T14:16:29.248-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db68.coll68: 0ms sequenceNumber: 292 version: 1|1||559eba7dca4787b9985d1ebe based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.304-0400 m30999| 2015-07-09T14:16:29.303-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db68.coll68: 0ms sequenceNumber: 293 version: 1|1||559eba7dca4787b9985d1ebe based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.305-0400 m31100| 2015-07-09T14:16:29.305-0400 I SHARDING [conn179] remotely refreshing metadata for db68.coll68 with requested shard version 1|1||559eba7dca4787b9985d1ebe, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.307-0400 m31100| 2015-07-09T14:16:29.306-0400 I SHARDING [conn179] collection db68.coll68 was previously unsharded, new metadata loaded with shard version 1|1||559eba7dca4787b9985d1ebe [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.307-0400 m31100| 2015-07-09T14:16:29.306-0400 I SHARDING [conn179] collection version was loaded at version 1|1||559eba7dca4787b9985d1ebe, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.308-0400 m30999| 2015-07-09T14:16:29.307-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:29.307-0400-559eba7dca4787b9985d1ebf", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465789307), what: "shardCollection", ns: "db68.coll68", details: { version: "1|1||559eba7dca4787b9985d1ebe" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.361-0400 m30999| 2015-07-09T14:16:29.361-0400 I SHARDING [conn1] distributed lock 'db68.coll68/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.362-0400 m30999| 2015-07-09T14:16:29.362-0400 I SHARDING [conn1] moving chunk ns: db68.coll68 moving ( ns: db68.coll68, shard: test-rs0, lastmod: 1|1||000000000000000000000000, min: { _id: 0 }, max: { _id: MaxKey }) test-rs0 -> test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.363-0400 m31100| 2015-07-09T14:16:29.362-0400 I SHARDING [conn37] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.364-0400 m31100| 2015-07-09T14:16:29.363-0400 I SHARDING [conn37] received moveChunk request: { moveChunk: "db68.coll68", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eba7dca4787b9985d1ebe') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.368-0400 m31100| 2015-07-09T14:16:29.367-0400 I SHARDING [conn37] distributed lock 'db68.coll68/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eba7d792e00bb67274aab [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.368-0400 m31100| 2015-07-09T14:16:29.367-0400 I SHARDING [conn37] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:29.367-0400-559eba7d792e00bb67274aac", server: "bs-osx108-8", clientAddr: "127.0.0.1:62639", time: new Date(1436465789367), what: "moveChunk.start", ns: "db68.coll68", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.420-0400 m31100| 2015-07-09T14:16:29.420-0400 I SHARDING [conn37] remotely refreshing metadata for db68.coll68 based on current shard version 1|1||559eba7dca4787b9985d1ebe, current metadata version is 1|1||559eba7dca4787b9985d1ebe [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.422-0400 m31100| 2015-07-09T14:16:29.422-0400 I SHARDING [conn37] metadata of collection db68.coll68 already up to date (shard version : 1|1||559eba7dca4787b9985d1ebe, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.422-0400 m31100| 2015-07-09T14:16:29.422-0400 I SHARDING [conn37] moveChunk request accepted at version 1|1||559eba7dca4787b9985d1ebe [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.423-0400 m31100| 2015-07-09T14:16:29.422-0400 I SHARDING [conn37] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.423-0400 m31200| 2015-07-09T14:16:29.423-0400 I SHARDING [conn16] remotely refreshing metadata for db68.coll68, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.425-0400 m31200| 2015-07-09T14:16:29.424-0400 I SHARDING [conn16] collection db68.coll68 was previously unsharded, new metadata loaded with shard version 0|0||559eba7dca4787b9985d1ebe [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.425-0400 m31200| 2015-07-09T14:16:29.424-0400 I SHARDING [conn16] collection version was loaded at version 1|1||559eba7dca4787b9985d1ebe, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.425-0400 m31200| 2015-07-09T14:16:29.424-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: 0 } -> { _id: MaxKey } for collection db68.coll68 from test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 at epoch 559eba7dca4787b9985d1ebe [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.427-0400 m31100| 2015-07-09T14:16:29.427-0400 I SHARDING [conn37] moveChunk data transfer progress: { active: true, ns: "db68.coll68", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.430-0400 m31100| 2015-07-09T14:16:29.430-0400 I SHARDING [conn37] moveChunk data transfer progress: { active: true, ns: "db68.coll68", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.436-0400 m31100| 2015-07-09T14:16:29.435-0400 I SHARDING [conn37] moveChunk data transfer progress: { active: true, ns: "db68.coll68", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.442-0400 m31200| 2015-07-09T14:16:29.442-0400 I INDEX [migrateThread] build index on: db68.coll68 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db68.coll68" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.442-0400 m31200| 2015-07-09T14:16:29.442-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.446-0400 m31100| 2015-07-09T14:16:29.445-0400 I SHARDING [conn37] moveChunk data transfer progress: { active: true, ns: "db68.coll68", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.447-0400 m31200| 2015-07-09T14:16:29.447-0400 I INDEX [migrateThread] build index on: db68.coll68 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db68.coll68" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.448-0400 m31200| 2015-07-09T14:16:29.447-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.459-0400 m31200| 2015-07-09T14:16:29.459-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.460-0400 m31200| 2015-07-09T14:16:29.460-0400 I SHARDING [migrateThread] Deleter starting delete for: db68.coll68 from { _id: 0 } -> { _id: MaxKey }, with opId: 95604 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.461-0400 m31200| 2015-07-09T14:16:29.460-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db68.coll68 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.463-0400 m31100| 2015-07-09T14:16:29.462-0400 I SHARDING [conn37] moveChunk data transfer progress: { active: true, ns: "db68.coll68", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.468-0400 m31202| 2015-07-09T14:16:29.468-0400 I INDEX [repl writer worker 9] build index on: db68.coll68 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db68.coll68" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.469-0400 m31202| 2015-07-09T14:16:29.468-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.471-0400 m31201| 2015-07-09T14:16:29.470-0400 I INDEX [repl writer worker 8] build index on: db68.coll68 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db68.coll68" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.471-0400 m31201| 2015-07-09T14:16:29.470-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.475-0400 m31202| 2015-07-09T14:16:29.475-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.478-0400 m31201| 2015-07-09T14:16:29.477-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.478-0400 m31200| 2015-07-09T14:16:29.478-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.478-0400 m31200| 2015-07-09T14:16:29.478-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db68.coll68' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.497-0400 m31100| 2015-07-09T14:16:29.496-0400 I SHARDING [conn37] moveChunk data transfer progress: { active: true, ns: "db68.coll68", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.497-0400 m31100| 2015-07-09T14:16:29.496-0400 I SHARDING [conn37] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.498-0400 m31100| 2015-07-09T14:16:29.497-0400 I SHARDING [conn37] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.498-0400 m31100| 2015-07-09T14:16:29.497-0400 I SHARDING [conn37] moveChunk setting version to: 2|0||559eba7dca4787b9985d1ebe [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.502-0400 m31200| 2015-07-09T14:16:29.502-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db68.coll68' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.502-0400 m31200| 2015-07-09T14:16:29.502-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:29.502-0400-559eba7dd5a107a5b9c0db6a", server: "bs-osx108-8", clientAddr: "", time: new Date(1436465789502), what: "moveChunk.to", ns: "db68.coll68", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 5: 35, step 2 of 5: 16, step 3 of 5: 0, step 4 of 5: 1, step 5 of 5: 24, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.556-0400 m31100| 2015-07-09T14:16:29.556-0400 I SHARDING [conn37] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db68.coll68", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.557-0400 m31100| 2015-07-09T14:16:29.556-0400 I SHARDING [conn37] moveChunk updating self version to: 2|1||559eba7dca4787b9985d1ebe through { _id: MinKey } -> { _id: 0 } for collection 'db68.coll68' [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.558-0400 m31100| 2015-07-09T14:16:29.558-0400 I SHARDING [conn37] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:29.558-0400-559eba7d792e00bb67274aad", server: "bs-osx108-8", clientAddr: "127.0.0.1:62639", time: new Date(1436465789558), what: "moveChunk.commit", ns: "db68.coll68", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.612-0400 m31100| 2015-07-09T14:16:29.611-0400 I SHARDING [conn37] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.612-0400 m31100| 2015-07-09T14:16:29.611-0400 I SHARDING [conn37] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.612-0400 m31100| 2015-07-09T14:16:29.612-0400 I SHARDING [conn37] Deleter starting delete for: db68.coll68 from { _id: 0 } -> { _id: MaxKey }, with opId: 200353 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.612-0400 m31100| 2015-07-09T14:16:29.612-0400 I SHARDING [conn37] rangeDeleter deleted 0 documents for db68.coll68 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.612-0400 m31100| 2015-07-09T14:16:29.612-0400 I SHARDING [conn37] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.614-0400 m31100| 2015-07-09T14:16:29.613-0400 I SHARDING [conn37] distributed lock 'db68.coll68/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.614-0400 m31100| 2015-07-09T14:16:29.613-0400 I SHARDING [conn37] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:29.613-0400-559eba7d792e00bb67274aae", server: "bs-osx108-8", clientAddr: "127.0.0.1:62639", time: new Date(1436465789613), what: "moveChunk.from", ns: "db68.coll68", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 6: 0, step 2 of 6: 58, step 3 of 6: 2, step 4 of 6: 71, step 5 of 6: 114, step 6 of 6: 0, to: "test-rs1", from: "test-rs0", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.668-0400 m31100| 2015-07-09T14:16:29.666-0400 I COMMAND [conn37] command db68.coll68 command: moveChunk { moveChunk: "db68.coll68", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eba7dca4787b9985d1ebe') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 303ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.669-0400 m30999| 2015-07-09T14:16:29.668-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db68.coll68: 0ms sequenceNumber: 294 version: 2|1||559eba7dca4787b9985d1ebe based on: 1|1||559eba7dca4787b9985d1ebe [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.670-0400 m31100| 2015-07-09T14:16:29.670-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db68.coll68", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba7dca4787b9985d1ebe') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.674-0400 m31100| 2015-07-09T14:16:29.674-0400 I SHARDING [conn37] distributed lock 'db68.coll68/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eba7d792e00bb67274aaf [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.675-0400 m31100| 2015-07-09T14:16:29.674-0400 I SHARDING [conn37] remotely refreshing metadata for db68.coll68 based on current shard version 2|0||559eba7dca4787b9985d1ebe, current metadata version is 2|0||559eba7dca4787b9985d1ebe [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.676-0400 m31100| 2015-07-09T14:16:29.676-0400 I SHARDING [conn37] updating metadata for db68.coll68 from shard version 2|0||559eba7dca4787b9985d1ebe to shard version 2|1||559eba7dca4787b9985d1ebe [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.677-0400 m31100| 2015-07-09T14:16:29.676-0400 I SHARDING [conn37] collection version was loaded at version 2|1||559eba7dca4787b9985d1ebe, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.677-0400 m31100| 2015-07-09T14:16:29.676-0400 I SHARDING [conn37] splitChunk accepted at version 2|1||559eba7dca4787b9985d1ebe [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.680-0400 m31100| 2015-07-09T14:16:29.679-0400 I SHARDING [conn37] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:29.679-0400-559eba7d792e00bb67274ab0", server: "bs-osx108-8", clientAddr: "127.0.0.1:62639", time: new Date(1436465789679), what: "split", ns: "db68.coll68", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559eba7dca4787b9985d1ebe') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559eba7dca4787b9985d1ebe') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.734-0400 m31100| 2015-07-09T14:16:29.734-0400 I SHARDING [conn37] distributed lock 'db68.coll68/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.737-0400 m30999| 2015-07-09T14:16:29.736-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db68.coll68: 0ms sequenceNumber: 295 version: 2|3||559eba7dca4787b9985d1ebe based on: 2|1||559eba7dca4787b9985d1ebe [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.738-0400 m31200| 2015-07-09T14:16:29.737-0400 I SHARDING [conn63] received splitChunk request: { splitChunk: "db68.coll68", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba7dca4787b9985d1ebe') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.742-0400 m31200| 2015-07-09T14:16:29.741-0400 I SHARDING [conn63] distributed lock 'db68.coll68/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eba7dd5a107a5b9c0db6b [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.742-0400 m31200| 2015-07-09T14:16:29.741-0400 I SHARDING [conn63] remotely refreshing metadata for db68.coll68 based on current shard version 0|0||559eba7dca4787b9985d1ebe, current metadata version is 1|1||559eba7dca4787b9985d1ebe [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.744-0400 m31200| 2015-07-09T14:16:29.743-0400 I SHARDING [conn63] updating metadata for db68.coll68 from shard version 0|0||559eba7dca4787b9985d1ebe to shard version 2|0||559eba7dca4787b9985d1ebe [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.744-0400 m31200| 2015-07-09T14:16:29.743-0400 I SHARDING [conn63] collection version was loaded at version 2|3||559eba7dca4787b9985d1ebe, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.744-0400 m31200| 2015-07-09T14:16:29.743-0400 I SHARDING [conn63] splitChunk accepted at version 2|0||559eba7dca4787b9985d1ebe [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.746-0400 m31200| 2015-07-09T14:16:29.745-0400 I SHARDING [conn63] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:29.745-0400-559eba7dd5a107a5b9c0db6c", server: "bs-osx108-8", clientAddr: "127.0.0.1:62862", time: new Date(1436465789745), what: "split", ns: "db68.coll68", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559eba7dca4787b9985d1ebe') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559eba7dca4787b9985d1ebe') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.801-0400 m31200| 2015-07-09T14:16:29.800-0400 I SHARDING [conn63] distributed lock 'db68.coll68/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.803-0400 m30999| 2015-07-09T14:16:29.803-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db68.coll68: 0ms sequenceNumber: 296 version: 2|5||559eba7dca4787b9985d1ebe based on: 2|3||559eba7dca4787b9985d1ebe [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.804-0400 Using 5 threads (requested 5) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.866-0400 m30998| 2015-07-09T14:16:29.863-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64016 #447 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.870-0400 m30998| 2015-07-09T14:16:29.869-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64017 #448 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.871-0400 m30999| 2015-07-09T14:16:29.871-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64018 #447 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.878-0400 m30999| 2015-07-09T14:16:29.878-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64019 #448 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.884-0400 m30999| 2015-07-09T14:16:29.883-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64020 #449 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.895-0400 setting random seed: 2027328973636 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.895-0400 setting random seed: 3318075765855 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.895-0400 setting random seed: 7799008712172 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.895-0400 setting random seed: 8949962947517 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.895-0400 setting random seed: 7939486517570 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.913-0400 m30999| 2015-07-09T14:16:29.912-0400 I SHARDING [conn449] distributed lock 'map_reduce_drop/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba7dca4787b9985d1ec0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.916-0400 m30999| 2015-07-09T14:16:29.916-0400 I SHARDING [conn449] Placing [map_reduce_drop] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:29.970-0400 m30999| 2015-07-09T14:16:29.969-0400 I SHARDING [conn449] distributed lock 'map_reduce_drop/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.058-0400 m31100| 2015-07-09T14:16:30.057-0400 I COMMAND [conn179] CMD: drop map_reduce_drop.tmp.mr.coll68_315 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.164-0400 m31100| 2015-07-09T14:16:30.164-0400 I COMMAND [conn179] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.177-0400 m31100| 2015-07-09T14:16:30.176-0400 I COMMAND [conn179] CMD: drop map_reduce_drop.tmp.mr.coll68_315 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.177-0400 m31100| 2015-07-09T14:16:30.176-0400 I COMMAND [conn179] CMD: drop map_reduce_drop.tmp.mr.coll68_315 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.177-0400 m31100| 2015-07-09T14:16:30.177-0400 I COMMAND [conn179] CMD: drop map_reduce_drop.tmp.mr.coll68_315 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.188-0400 m31100| 2015-07-09T14:16:30.188-0400 I COMMAND [conn179] command map_reduce_drop.coll68_out command: mapReduce { mapreduce: "coll68", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.189-0400 m31100| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.189-0400 m31100| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.189-0400 m31100| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.190-0400 m31100| return redu..., out: "coll68_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:179 locks:{ Global: { acquireCount: { r: 1274, w: 755, W: 3 } }, Database: { acquireCount: { r: 254, w: 747, R: 4, W: 11 } }, Collection: { acquireCount: { r: 254, w: 501 } }, Metadata: { acquireCount: { w: 249 } }, oplog: { acquireCount: { w: 249 } } } protocol:op_command 149ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.236-0400 m31100| 2015-07-09T14:16:30.236-0400 I COMMAND [conn179] CMD: drop map_reduce_drop.tmp.mr.coll68_316 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.415-0400 m30999| 2015-07-09T14:16:30.415-0400 I SHARDING [conn447] could not acquire lock 'map_reduce_drop/bs-osx108-8:30999:1436464534:16807' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.416-0400 m30999| 2015-07-09T14:16:30.415-0400 I SHARDING [conn447] distributed lock 'map_reduce_drop/bs-osx108-8:30999:1436464534:16807' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.416-0400 m30998| 2015-07-09T14:16:30.415-0400 I SHARDING [conn448] distributed lock 'map_reduce_drop/bs-osx108-8:30998:1436464535:16807' acquired, ts : 559eba7e0bd550bed3408b30 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.417-0400 m30998| 2015-07-09T14:16:30.417-0400 I SHARDING [conn448] distributed lock 'map_reduce_drop/bs-osx108-8:30998:1436464535:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.421-0400 m30998| 2015-07-09T14:16:30.420-0400 I SHARDING [conn447] distributed lock 'map_reduce_drop/bs-osx108-8:30998:1436464535:16807' acquired, ts : 559eba7e0bd550bed3408b31 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.421-0400 m30998| 2015-07-09T14:16:30.421-0400 I SHARDING [conn447] distributed lock 'map_reduce_drop/bs-osx108-8:30998:1436464535:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.426-0400 m30999| 2015-07-09T14:16:30.425-0400 I SHARDING [conn448] distributed lock 'map_reduce_drop/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba7eca4787b9985d1ec2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.427-0400 m30999| 2015-07-09T14:16:30.426-0400 I SHARDING [conn448] distributed lock 'map_reduce_drop/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.471-0400 m31100| 2015-07-09T14:16:30.471-0400 I COMMAND [conn179] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.477-0400 m31100| 2015-07-09T14:16:30.476-0400 I COMMAND [conn179] CMD: drop map_reduce_drop.tmp.mr.coll68_316 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.477-0400 m31100| 2015-07-09T14:16:30.477-0400 I COMMAND [conn179] CMD: drop map_reduce_drop.tmp.mr.coll68_316 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.497-0400 m31102| 2015-07-09T14:16:30.497-0400 I COMMAND [repl writer worker 7] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.500-0400 m31101| 2015-07-09T14:16:30.500-0400 I COMMAND [repl writer worker 5] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.507-0400 m31100| 2015-07-09T14:16:30.507-0400 I COMMAND [conn179] CMD: drop map_reduce_drop.tmp.mr.coll68_316 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.521-0400 m31100| 2015-07-09T14:16:30.521-0400 I COMMAND [conn146] command map_reduce_drop.$cmd command: insert { insert: "coll68", documents: 250, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 256, w: 256 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 6139 } }, Database: { acquireCount: { w: 256 }, acquireWaitCount: { w: 3 }, timeAcquiringMicros: { w: 30122 } }, Collection: { acquireCount: { w: 6 } }, Metadata: { acquireCount: { w: 250 } }, oplog: { acquireCount: { w: 250 } } } protocol:op_command 102ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.529-0400 m31100| 2015-07-09T14:16:30.529-0400 I COMMAND [conn179] command map_reduce_drop.coll68_out command: mapReduce { mapreduce: "coll68", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.530-0400 m31100| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.530-0400 m31100| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.530-0400 m31100| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.531-0400 m31100| return redu..., out: "coll68_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:179 locks:{ Global: { acquireCount: { r: 2479, w: 1476, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 15044 } }, Database: { acquireCount: { r: 493, w: 1468, R: 7, W: 10 }, acquireWaitCount: { W: 4 }, timeAcquiringMicros: { W: 50867 } }, Collection: { acquireCount: { r: 493, w: 980 } }, Metadata: { acquireCount: { w: 490 } }, oplog: { acquireCount: { w: 490 } } } protocol:op_command 294ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.531-0400 m30999| 2015-07-09T14:16:30.530-0400 I COMMAND [conn449] DROP: map_reduce_drop.coll68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.531-0400 m30999| 2015-07-09T14:16:30.530-0400 I COMMAND [conn449] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.532-0400 m31100| 2015-07-09T14:16:30.530-0400 I COMMAND [conn179] CMD: drop map_reduce_drop.coll68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.538-0400 m31100| 2015-07-09T14:16:30.537-0400 I COMMAND [conn24] command map_reduce_drop.$cmd command: insert { insert: "coll68", documents: 250, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 256, w: 256 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 13207 } }, Database: { acquireCount: { w: 256 }, acquireWaitCount: { w: 4 }, timeAcquiringMicros: { w: 30450 } }, Collection: { acquireCount: { w: 6 } }, Metadata: { acquireCount: { w: 250 } }, oplog: { acquireCount: { w: 250 } } } protocol:op_command 115ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.538-0400 m31100| 2015-07-09T14:16:30.538-0400 I COMMAND [conn191] CMD: drop map_reduce_drop.tmp.mr.coll68_317 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.544-0400 m31102| 2015-07-09T14:16:30.544-0400 I COMMAND [repl writer worker 15] CMD: drop map_reduce_drop.coll68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.549-0400 m31101| 2015-07-09T14:16:30.549-0400 I COMMAND [repl writer worker 12] CMD: drop map_reduce_drop.coll68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.552-0400 m31100| 2015-07-09T14:16:30.552-0400 I COMMAND [conn185] CMD: drop map_reduce_drop.tmp.mr.coll68_319 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.564-0400 m31100| 2015-07-09T14:16:30.563-0400 I COMMAND [conn177] CMD: drop map_reduce_drop.tmp.mr.coll68_318 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.564-0400 m31100| 2015-07-09T14:16:30.563-0400 I COMMAND [conn191] CMD: drop map_reduce_drop.tmp.mr.coll68_317 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.591-0400 m31101| 2015-07-09T14:16:30.591-0400 I COMMAND [repl writer worker 13] CMD: drop map_reduce_drop.tmp.mr.coll68_317 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.596-0400 m31102| 2015-07-09T14:16:30.596-0400 I COMMAND [repl writer worker 12] CMD: drop map_reduce_drop.tmp.mr.coll68_317 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.596-0400 m31100| 2015-07-09T14:16:30.596-0400 I COMMAND [conn191] mr failed, removing collection :: caused by :: 18697 Collection unexpectedly disappeared: map_reduce_drop.coll68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.596-0400 m31100| 2015-07-09T14:16:30.596-0400 I COMMAND [conn191] CMD: drop map_reduce_drop.tmp.mr.coll68_317 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.638-0400 m31100| 2015-07-09T14:16:30.637-0400 I COMMAND [conn191] command map_reduce_drop.coll68 command: mapReduce { mapreduce: "coll68", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.638-0400 m31100| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.638-0400 m31100| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.638-0400 m31100| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.639-0400 m31100| return redu..., out: "coll68_out" } planSummary: EOF ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 exception: Collection unexpectedly disappeared: map_reduce_drop.coll68 code:18697 numYields:0 reslen:104 locks:{ Global: { acquireCount: { r: 19, w: 11 } }, Database: { acquireCount: { r: 2, w: 5, R: 2, W: 8 }, acquireWaitCount: { r: 1, w: 2, R: 2, W: 6 }, timeAcquiringMicros: { r: 18956, w: 194, R: 8670, W: 75108 } }, Collection: { acquireCount: { r: 2, w: 5 } }, Metadata: { acquireCount: { w: 2 } }, oplog: { acquireCount: { w: 2 } } } protocol:op_command 127ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.672-0400 m31100| 2015-07-09T14:16:30.671-0400 I COMMAND [conn191] CMD: drop map_reduce_drop.tmp.mr.coll68_320 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.707-0400 m31100| 2015-07-09T14:16:30.706-0400 I COMMAND [conn24] command map_reduce_drop.$cmd command: insert { insert: "coll68", documents: 250, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 257, w: 257 } }, Database: { acquireCount: { w: 257 }, acquireWaitCount: { w: 7 }, timeAcquiringMicros: { w: 79478 } }, Collection: { acquireCount: { w: 7 } }, Metadata: { acquireCount: { w: 250 } }, oplog: { acquireCount: { w: 250 } } } protocol:op_command 142ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.760-0400 m31100| 2015-07-09T14:16:30.759-0400 I COMMAND [conn185] CMD: drop map_reduce_drop.tmp.mr.coll68_321 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.766-0400 m31100| 2015-07-09T14:16:30.765-0400 I COMMAND [conn179] CMD: drop map_reduce_drop.tmp.mr.coll68_322 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.918-0400 m30999| 2015-07-09T14:16:30.918-0400 I SHARDING [conn447] distributed lock 'map_reduce_drop/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba7eca4787b9985d1ec3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:30.919-0400 m30999| 2015-07-09T14:16:30.919-0400 I SHARDING [conn447] distributed lock 'map_reduce_drop/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:31.003-0400 m31100| 2015-07-09T14:16:31.002-0400 I COMMAND [conn175] CMD: drop map_reduce_drop.tmp.mr.coll68_323 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:33.334-0400 m31100| 2015-07-09T14:16:33.333-0400 I COMMAND [conn177] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:33.340-0400 m31100| 2015-07-09T14:16:33.340-0400 I COMMAND [conn177] CMD: drop map_reduce_drop.tmp.mr.coll68_318 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:33.340-0400 m31100| 2015-07-09T14:16:33.340-0400 I COMMAND [conn177] CMD: drop map_reduce_drop.tmp.mr.coll68_318 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:33.342-0400 m31101| 2015-07-09T14:16:33.341-0400 I COMMAND [repl writer worker 1] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:33.342-0400 m31102| 2015-07-09T14:16:33.342-0400 I COMMAND [repl writer worker 4] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:33.343-0400 m31100| 2015-07-09T14:16:33.342-0400 I COMMAND [conn177] CMD: drop map_reduce_drop.tmp.mr.coll68_318 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:33.344-0400 m31100| 2015-07-09T14:16:33.343-0400 I COMMAND [conn191] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:33.351-0400 m31100| 2015-07-09T14:16:33.350-0400 I COMMAND [conn191] CMD: drop map_reduce_drop.tmp.mr.coll68_320 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:33.351-0400 m31100| 2015-07-09T14:16:33.351-0400 I COMMAND [conn191] CMD: drop map_reduce_drop.tmp.mr.coll68_320 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:33.352-0400 m31100| 2015-07-09T14:16:33.352-0400 I COMMAND [conn191] CMD: drop map_reduce_drop.tmp.mr.coll68_320 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:33.353-0400 m31100| 2015-07-09T14:16:33.353-0400 I COMMAND [conn177] command map_reduce_drop.coll68_out command: mapReduce { mapreduce: "coll68", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:33.353-0400 m31100| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:33.353-0400 m31100| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:33.353-0400 m31100| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:33.354-0400 m31100| return redu..., out: "coll68_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:179 locks:{ Global: { acquireCount: { r: 3680, w: 2193, W: 3 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 7851 } }, Database: { acquireCount: { r: 733, w: 2185, R: 9, W: 10 }, acquireWaitCount: { r: 2, w: 34, R: 9, W: 7 }, timeAcquiringMicros: { r: 8865, w: 175087, R: 85123, W: 68844 } }, Collection: { acquireCount: { r: 733, w: 1458 } }, Metadata: { acquireCount: { w: 729 } }, oplog: { acquireCount: { w: 729 } } } protocol:op_command 2829ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:33.360-0400 m31102| 2015-07-09T14:16:33.360-0400 I COMMAND [repl writer worker 9] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:33.363-0400 m31101| 2015-07-09T14:16:33.363-0400 I COMMAND [repl writer worker 10] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:33.369-0400 m31100| 2015-07-09T14:16:33.368-0400 I COMMAND [conn191] command map_reduce_drop.coll68_out command: mapReduce { mapreduce: "coll68", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:33.369-0400 m31100| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:33.369-0400 m31100| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:33.369-0400 m31100| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:33.370-0400 m31100| return redu..., out: "coll68_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:179 locks:{ Global: { acquireCount: { r: 3678, w: 2193, W: 3 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 5843, W: 971 } }, Database: { acquireCount: { r: 732, w: 2185, R: 9, W: 10 }, acquireWaitCount: { r: 2, w: 28, R: 7, W: 6 }, timeAcquiringMicros: { r: 25017, w: 119244, R: 37730, W: 56925 } }, Collection: { acquireCount: { r: 732, w: 1458 } }, Metadata: { acquireCount: { w: 729 } }, oplog: { acquireCount: { w: 729 } } } protocol:op_command 2726ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:33.388-0400 m31100| 2015-07-09T14:16:33.388-0400 I COMMAND [conn179] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:33.395-0400 m31100| 2015-07-09T14:16:33.395-0400 I COMMAND [conn179] CMD: drop map_reduce_drop.tmp.mr.coll68_322 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:33.395-0400 m31100| 2015-07-09T14:16:33.395-0400 I COMMAND [conn179] CMD: drop map_reduce_drop.tmp.mr.coll68_322 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:33.407-0400 m31102| 2015-07-09T14:16:33.406-0400 I COMMAND [repl writer worker 0] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:33.407-0400 m31101| 2015-07-09T14:16:33.407-0400 I COMMAND [repl writer worker 13] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:33.432-0400 m31100| 2015-07-09T14:16:33.432-0400 I COMMAND [conn179] CMD: drop map_reduce_drop.tmp.mr.coll68_322 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:33.444-0400 m31100| 2015-07-09T14:16:33.443-0400 I COMMAND [conn177] CMD: drop map_reduce_drop.tmp.mr.coll68_324 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:33.464-0400 m31100| 2015-07-09T14:16:33.461-0400 I COMMAND [conn179] command map_reduce_drop.coll68_out command: mapReduce { mapreduce: "coll68", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:33.465-0400 m31100| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:33.465-0400 m31100| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:33.465-0400 m31100| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:33.466-0400 m31100| return redu..., out: "coll68_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:179 locks:{ Global: { acquireCount: { r: 3680, w: 2193, W: 3 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 7762, w: 6141, W: 9526 } }, Database: { acquireCount: { r: 733, w: 2185, R: 9, W: 10 }, acquireWaitCount: { r: 4, w: 22, R: 9, W: 8 }, timeAcquiringMicros: { r: 13970, w: 84003, R: 10244, W: 75317 } }, Collection: { acquireCount: { r: 733, w: 1458 } }, Metadata: { acquireCount: { w: 729 } }, oplog: { acquireCount: { w: 729 } } } protocol:op_command 2731ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:33.499-0400 m31100| 2015-07-09T14:16:33.499-0400 I COMMAND [conn179] CMD: drop map_reduce_drop.tmp.mr.coll68_325 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:33.517-0400 m31100| 2015-07-09T14:16:33.516-0400 I COMMAND [conn185] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:33.528-0400 m31100| 2015-07-09T14:16:33.526-0400 I COMMAND [conn185] CMD: drop map_reduce_drop.tmp.mr.coll68_321 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:33.528-0400 m31100| 2015-07-09T14:16:33.527-0400 I COMMAND [conn185] CMD: drop map_reduce_drop.tmp.mr.coll68_321 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:33.532-0400 m31100| 2015-07-09T14:16:33.532-0400 I COMMAND [conn185] CMD: drop map_reduce_drop.tmp.mr.coll68_321 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:33.537-0400 m31100| 2015-07-09T14:16:33.537-0400 I COMMAND [conn191] CMD: drop map_reduce_drop.tmp.mr.coll68_326 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:33.539-0400 m31101| 2015-07-09T14:16:33.539-0400 I COMMAND [repl writer worker 10] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:33.545-0400 m31102| 2015-07-09T14:16:33.545-0400 I COMMAND [repl writer worker 8] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:33.554-0400 m31100| 2015-07-09T14:16:33.553-0400 I COMMAND [conn185] command map_reduce_drop.coll68_out command: mapReduce { mapreduce: "coll68", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:33.554-0400 m31100| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:33.555-0400 m31100| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:33.555-0400 m31100| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:33.556-0400 m31100| return redu..., out: "coll68_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:179 locks:{ Global: { acquireCount: { r: 3680, w: 2193, W: 3 }, acquireWaitCount: { r: 2, w: 1, W: 1 }, timeAcquiringMicros: { r: 23780, w: 5519, W: 17466 } }, Database: { acquireCount: { r: 733, w: 2185, R: 9, W: 10 }, acquireWaitCount: { r: 9, w: 28, R: 9, W: 8 }, timeAcquiringMicros: { r: 75634, w: 114418, R: 39166, W: 38766 } }, Collection: { acquireCount: { r: 733, w: 1458 } }, Metadata: { acquireCount: { w: 729 } }, oplog: { acquireCount: { w: 729 } } } protocol:op_command 2845ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:33.667-0400 m31100| 2015-07-09T14:16:33.666-0400 I COMMAND [conn24] command map_reduce_drop.$cmd command: insert { insert: "coll68", documents: 250, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 255, w: 255 } }, Database: { acquireCount: { w: 255 }, acquireWaitCount: { w: 5 }, timeAcquiringMicros: { w: 67098 } }, Collection: { acquireCount: { w: 5 } }, Metadata: { acquireCount: { w: 250 } }, oplog: { acquireCount: { w: 250 } } } protocol:op_command 103ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:33.693-0400 m31100| 2015-07-09T14:16:33.693-0400 I COMMAND [conn185] CMD: drop map_reduce_drop.tmp.mr.coll68_327 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:34.183-0400 m31100| 2015-07-09T14:16:34.183-0400 I - [conn175] M/R: (3/3) Final Reduce Progress: 700/959 72% [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:34.582-0400 m30999| 2015-07-09T14:16:34.581-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:16:34.580-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30999:1436464534:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:35.154-0400 m31100| 2015-07-09T14:16:35.154-0400 I COMMAND [conn175] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:35.161-0400 m31100| 2015-07-09T14:16:35.160-0400 I COMMAND [conn175] CMD: drop map_reduce_drop.tmp.mr.coll68_323 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:35.161-0400 m31100| 2015-07-09T14:16:35.161-0400 I COMMAND [conn175] CMD: drop map_reduce_drop.tmp.mr.coll68_323 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:35.162-0400 m31100| 2015-07-09T14:16:35.162-0400 I COMMAND [conn175] CMD: drop map_reduce_drop.tmp.mr.coll68_323 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:35.162-0400 m31101| 2015-07-09T14:16:35.162-0400 I COMMAND [repl writer worker 9] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:35.163-0400 m31102| 2015-07-09T14:16:35.163-0400 I COMMAND [repl writer worker 7] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:35.163-0400 m31100| 2015-07-09T14:16:35.163-0400 I COMMAND [conn175] command map_reduce_drop.coll68_out command: mapReduce { mapreduce: "coll68", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:35.164-0400 m31100| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:35.164-0400 m31100| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:35.164-0400 m31100| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:35.165-0400 m31100| return redu..., out: "coll68_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:179 locks:{ Global: { acquireCount: { r: 4851, w: 2892, W: 3 }, acquireWaitCount: { r: 3, w: 3, W: 1 }, timeAcquiringMicros: { r: 34098, w: 25278, W: 117 } }, Database: { acquireCount: { r: 966, w: 2884, R: 12, W: 10 }, acquireWaitCount: { r: 15, w: 94, R: 12, W: 8 }, timeAcquiringMicros: { r: 138046, w: 1288471, R: 3557, W: 2301 } }, Collection: { acquireCount: { r: 966, w: 1924 } }, Metadata: { acquireCount: { w: 962 } }, oplog: { acquireCount: { w: 962 } } } protocol:op_command 4179ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:35.245-0400 m31100| 2015-07-09T14:16:35.245-0400 I COMMAND [conn184] CMD: drop map_reduce_drop.tmp.mr.coll68_328 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:35.994-0400 m30998| 2015-07-09T14:16:35.994-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:16:35.992-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30998:1436464535:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:36.901-0400 m31100| 2015-07-09T14:16:36.901-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:16:36.899-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31100:1436464536:197041335', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:37.315-0400 m31200| 2015-07-09T14:16:37.314-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:16:37.313-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31200:1436464537:809424560', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:39.082-0400 m31100| 2015-07-09T14:16:39.082-0400 I - [conn177] M/R: (3/3) Final Reduce Progress: 1100/1816 60% [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:39.093-0400 m31100| 2015-07-09T14:16:39.093-0400 I - [conn184] M/R: (3/3) Final Reduce Progress: 900/2013 44% [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:39.175-0400 m31100| 2015-07-09T14:16:39.175-0400 I - [conn179] M/R: (3/3) Final Reduce Progress: 1100/1816 60% [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:39.213-0400 m31100| 2015-07-09T14:16:39.213-0400 I - [conn191] M/R: (3/3) Final Reduce Progress: 1100/1816 60% [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:39.235-0400 m31100| 2015-07-09T14:16:39.235-0400 I - [conn185] M/R: (3/3) Final Reduce Progress: 1100/1816 60% [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:41.190-0400 m31100| 2015-07-09T14:16:41.190-0400 I COMMAND [conn177] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:41.200-0400 m31100| 2015-07-09T14:16:41.200-0400 I COMMAND [conn177] CMD: drop map_reduce_drop.tmp.mr.coll68_324 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:41.200-0400 m31100| 2015-07-09T14:16:41.200-0400 I COMMAND [conn177] CMD: drop map_reduce_drop.tmp.mr.coll68_324 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:41.202-0400 m31101| 2015-07-09T14:16:41.201-0400 I COMMAND [repl writer worker 5] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:41.203-0400 m31102| 2015-07-09T14:16:41.202-0400 I COMMAND [repl writer worker 14] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:41.204-0400 m31100| 2015-07-09T14:16:41.204-0400 I COMMAND [conn177] CMD: drop map_reduce_drop.tmp.mr.coll68_324 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:41.206-0400 m31100| 2015-07-09T14:16:41.206-0400 I COMMAND [conn177] command map_reduce_drop.coll68_out command: mapReduce { mapreduce: "coll68", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:41.206-0400 m31100| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:41.206-0400 m31100| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:41.207-0400 m31100| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:41.207-0400 m31100| return redu..., out: "coll68_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:34 reslen:179 locks:{ Global: { acquireCount: { r: 9222, w: 5463, W: 3 }, acquireWaitCount: { r: 1, w: 1 }, timeAcquiringMicros: { r: 17339, w: 6654 } }, Database: { acquireCount: { r: 1822, w: 5455, R: 56, W: 10 }, acquireWaitCount: { r: 1, w: 72, R: 54, W: 8 }, timeAcquiringMicros: { r: 1259, w: 643177, R: 176351, W: 32450 } }, Collection: { acquireCount: { r: 1822, w: 3638 } }, Metadata: { acquireCount: { w: 1819 } }, oplog: { acquireCount: { w: 1819 } } } protocol:op_command 7774ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:41.255-0400 m31100| 2015-07-09T14:16:41.254-0400 I COMMAND [conn179] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:41.262-0400 m31100| 2015-07-09T14:16:41.262-0400 I COMMAND [conn179] CMD: drop map_reduce_drop.tmp.mr.coll68_325 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:41.263-0400 m31100| 2015-07-09T14:16:41.262-0400 I COMMAND [conn179] CMD: drop map_reduce_drop.tmp.mr.coll68_325 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:41.270-0400 m31100| 2015-07-09T14:16:41.270-0400 I COMMAND [conn179] CMD: drop map_reduce_drop.tmp.mr.coll68_325 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:41.271-0400 m31101| 2015-07-09T14:16:41.270-0400 I COMMAND [repl writer worker 0] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:41.271-0400 m31102| 2015-07-09T14:16:41.271-0400 I COMMAND [repl writer worker 9] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:41.275-0400 m31100| 2015-07-09T14:16:41.275-0400 I COMMAND [conn179] command map_reduce_drop.coll68_out command: mapReduce { mapreduce: "coll68", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:41.275-0400 m31100| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:41.275-0400 m31100| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:41.275-0400 m31100| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:41.276-0400 m31100| return redu..., out: "coll68_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:57 reslen:179 locks:{ Global: { acquireCount: { r: 9268, w: 5463, W: 3 }, acquireWaitCount: { w: 4, W: 1 }, timeAcquiringMicros: { w: 41365, W: 4443 } }, Database: { acquireCount: { r: 1822, w: 5455, R: 79, W: 10 }, acquireWaitCount: { r: 5, w: 45, R: 78, W: 8 }, timeAcquiringMicros: { r: 13500, w: 115355, R: 178877, W: 19863 } }, Collection: { acquireCount: { r: 1822, w: 3638 } }, Metadata: { acquireCount: { w: 1819 } }, oplog: { acquireCount: { w: 1819 } } } protocol:op_command 7810ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:41.278-0400 m31100| 2015-07-09T14:16:41.277-0400 I COMMAND [conn177] CMD: drop map_reduce_drop.tmp.mr.coll68_329 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:41.347-0400 m31100| 2015-07-09T14:16:41.347-0400 I COMMAND [conn191] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:41.354-0400 m31100| 2015-07-09T14:16:41.353-0400 I COMMAND [conn191] CMD: drop map_reduce_drop.tmp.mr.coll68_326 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:41.354-0400 m31100| 2015-07-09T14:16:41.354-0400 I COMMAND [conn191] CMD: drop map_reduce_drop.tmp.mr.coll68_326 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:41.354-0400 m31101| 2015-07-09T14:16:41.354-0400 I COMMAND [repl writer worker 10] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:41.355-0400 m31102| 2015-07-09T14:16:41.355-0400 I COMMAND [repl writer worker 7] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:41.363-0400 m31100| 2015-07-09T14:16:41.363-0400 I COMMAND [conn191] CMD: drop map_reduce_drop.tmp.mr.coll68_326 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:41.367-0400 m31100| 2015-07-09T14:16:41.366-0400 I COMMAND [conn179] CMD: drop map_reduce_drop.tmp.mr.coll68_330 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:41.371-0400 m31100| 2015-07-09T14:16:41.371-0400 I COMMAND [conn191] command map_reduce_drop.coll68_out command: mapReduce { mapreduce: "coll68", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:41.372-0400 m31100| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:41.372-0400 m31100| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:41.372-0400 m31100| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:41.373-0400 m31100| return redu..., out: "coll68_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:56 reslen:179 locks:{ Global: { acquireCount: { r: 9266, w: 5463, W: 3 }, acquireWaitCount: { r: 2, w: 2, W: 1 }, timeAcquiringMicros: { r: 24092, w: 15241, W: 12856 } }, Database: { acquireCount: { r: 1822, w: 5455, R: 78, W: 10 }, acquireWaitCount: { r: 8, w: 55, R: 78, W: 8 }, timeAcquiringMicros: { r: 25030, w: 149398, R: 122792, W: 62242 } }, Collection: { acquireCount: { r: 1822, w: 3638 } }, Metadata: { acquireCount: { w: 1819 } }, oplog: { acquireCount: { w: 1819 } } } protocol:op_command 7853ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:41.447-0400 m31100| 2015-07-09T14:16:41.447-0400 I COMMAND [conn185] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:41.455-0400 m31100| 2015-07-09T14:16:41.454-0400 I COMMAND [conn185] CMD: drop map_reduce_drop.tmp.mr.coll68_327 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:41.455-0400 m31100| 2015-07-09T14:16:41.455-0400 I COMMAND [conn185] CMD: drop map_reduce_drop.tmp.mr.coll68_327 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:41.456-0400 m31101| 2015-07-09T14:16:41.455-0400 I COMMAND [repl writer worker 4] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:41.457-0400 m31102| 2015-07-09T14:16:41.456-0400 I COMMAND [repl writer worker 1] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:41.484-0400 m31100| 2015-07-09T14:16:41.484-0400 I COMMAND [conn185] CMD: drop map_reduce_drop.tmp.mr.coll68_327 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:41.496-0400 m31100| 2015-07-09T14:16:41.495-0400 I COMMAND [conn68] command map_reduce_drop.$cmd command: insert { insert: "coll68", documents: 250, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 256, w: 256 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 14007 } }, Database: { acquireCount: { w: 256 }, acquireWaitCount: { w: 5 }, timeAcquiringMicros: { w: 53896 } }, Collection: { acquireCount: { w: 6 } }, Metadata: { acquireCount: { w: 250 } }, oplog: { acquireCount: { w: 250 } } } protocol:op_command 117ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:41.502-0400 m31100| 2015-07-09T14:16:41.501-0400 I COMMAND [conn185] command map_reduce_drop.coll68_out command: mapReduce { mapreduce: "coll68", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:41.502-0400 m31100| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:41.502-0400 m31100| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:41.502-0400 m31100| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:41.503-0400 m31100| return redu..., out: "coll68_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:51 reslen:179 locks:{ Global: { acquireCount: { r: 9256, w: 5463, W: 3 }, acquireWaitCount: { r: 2, w: 3, W: 1 }, timeAcquiringMicros: { r: 21836, w: 24947, W: 10754 } }, Database: { acquireCount: { r: 1822, w: 5455, R: 73, W: 10 }, acquireWaitCount: { r: 12, w: 70, R: 71, W: 8 }, timeAcquiringMicros: { r: 30780, w: 242220, R: 71385, W: 89138 } }, Collection: { acquireCount: { r: 1822, w: 3638 } }, Metadata: { acquireCount: { w: 1819 } }, oplog: { acquireCount: { w: 1819 } } } protocol:op_command 7834ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:41.503-0400 m31100| 2015-07-09T14:16:41.502-0400 I COMMAND [conn191] CMD: drop map_reduce_drop.tmp.mr.coll68_331 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:41.591-0400 m31100| 2015-07-09T14:16:41.590-0400 I COMMAND [conn185] CMD: drop map_reduce_drop.tmp.mr.coll68_332 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:43.533-0400 m31100| 2015-07-09T14:16:43.532-0400 I - [conn184] M/R: (3/3) Final Reduce Progress: 1800/2013 89% [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:43.862-0400 m31100| 2015-07-09T14:16:43.862-0400 I COMMAND [conn184] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:43.868-0400 m31100| 2015-07-09T14:16:43.868-0400 I COMMAND [conn184] CMD: drop map_reduce_drop.tmp.mr.coll68_328 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:43.869-0400 m31100| 2015-07-09T14:16:43.868-0400 I COMMAND [conn184] CMD: drop map_reduce_drop.tmp.mr.coll68_328 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:43.869-0400 m31101| 2015-07-09T14:16:43.869-0400 I COMMAND [repl writer worker 11] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:43.870-0400 m31100| 2015-07-09T14:16:43.870-0400 I COMMAND [conn184] CMD: drop map_reduce_drop.tmp.mr.coll68_328 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:43.870-0400 m31102| 2015-07-09T14:16:43.870-0400 I COMMAND [repl writer worker 3] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:43.872-0400 m31100| 2015-07-09T14:16:43.872-0400 I COMMAND [conn184] command map_reduce_drop.coll68_out command: mapReduce { mapreduce: "coll68", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:43.873-0400 m31100| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:43.873-0400 m31100| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:43.873-0400 m31100| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:43.874-0400 m31100| return redu..., out: "coll68_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:179 locks:{ Global: { acquireCount: { r: 10143, w: 6054, W: 3 }, acquireWaitCount: { r: 3, w: 3, W: 1 }, timeAcquiringMicros: { r: 24726, w: 31444, W: 200 } }, Database: { acquireCount: { r: 2019, w: 6046, R: 24, W: 10 }, acquireWaitCount: { r: 19, w: 165, R: 24, W: 8 }, timeAcquiringMicros: { r: 129964, w: 1962492, R: 8838, W: 3800 } }, Collection: { acquireCount: { r: 2019, w: 4032 } }, Metadata: { acquireCount: { w: 2016 } }, oplog: { acquireCount: { w: 2016 } } } protocol:op_command 8628ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:43.875-0400 m30999| 2015-07-09T14:16:43.874-0400 I COMMAND [conn447] DROP: map_reduce_drop.coll68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:43.875-0400 m30999| 2015-07-09T14:16:43.874-0400 I COMMAND [conn447] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:43.875-0400 m31100| 2015-07-09T14:16:43.875-0400 I COMMAND [conn184] CMD: drop map_reduce_drop.coll68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:43.884-0400 m31102| 2015-07-09T14:16:43.884-0400 I COMMAND [repl writer worker 8] CMD: drop map_reduce_drop.coll68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:43.884-0400 m31101| 2015-07-09T14:16:43.884-0400 I COMMAND [repl writer worker 0] CMD: drop map_reduce_drop.coll68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:43.982-0400 m31100| 2015-07-09T14:16:43.982-0400 I COMMAND [conn184] CMD: drop map_reduce_drop.tmp.mr.coll68_333 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:44.252-0400 m31100| 2015-07-09T14:16:44.251-0400 I COMMAND [conn184] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:44.257-0400 m31100| 2015-07-09T14:16:44.256-0400 I COMMAND [conn184] CMD: drop map_reduce_drop.tmp.mr.coll68_333 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:44.258-0400 m31100| 2015-07-09T14:16:44.257-0400 I COMMAND [conn184] CMD: drop map_reduce_drop.tmp.mr.coll68_333 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:44.258-0400 m31102| 2015-07-09T14:16:44.258-0400 I COMMAND [repl writer worker 8] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:44.259-0400 m31101| 2015-07-09T14:16:44.259-0400 I COMMAND [repl writer worker 5] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:44.260-0400 m31100| 2015-07-09T14:16:44.260-0400 I COMMAND [conn184] CMD: drop map_reduce_drop.tmp.mr.coll68_333 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:44.262-0400 m31100| 2015-07-09T14:16:44.261-0400 I COMMAND [conn184] command map_reduce_drop.coll68_out command: mapReduce { mapreduce: "coll68", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:44.262-0400 m31100| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:44.262-0400 m31100| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:44.262-0400 m31100| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:44.263-0400 m31100| return redu..., out: "coll68_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:179 locks:{ Global: { acquireCount: { r: 1258, w: 747, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 49 } }, Database: { acquireCount: { r: 250, w: 739, R: 4, W: 10 }, acquireWaitCount: { R: 4, W: 8 }, timeAcquiringMicros: { R: 1456, W: 2931 } }, Collection: { acquireCount: { r: 250, w: 494 } }, Metadata: { acquireCount: { w: 247 } }, oplog: { acquireCount: { w: 247 } } } protocol:op_command 279ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:44.263-0400 m30999| 2015-07-09T14:16:44.263-0400 I COMMAND [conn447] DROP: map_reduce_drop.coll68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:44.264-0400 m30999| 2015-07-09T14:16:44.263-0400 I COMMAND [conn447] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:44.264-0400 m31100| 2015-07-09T14:16:44.263-0400 I COMMAND [conn184] CMD: drop map_reduce_drop.coll68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:44.271-0400 m31102| 2015-07-09T14:16:44.271-0400 I COMMAND [repl writer worker 10] CMD: drop map_reduce_drop.coll68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:44.274-0400 m31101| 2015-07-09T14:16:44.274-0400 I COMMAND [repl writer worker 15] CMD: drop map_reduce_drop.coll68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:44.365-0400 m31100| 2015-07-09T14:16:44.364-0400 I COMMAND [conn184] CMD: drop map_reduce_drop.tmp.mr.coll68_334 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:44.774-0400 m31100| 2015-07-09T14:16:44.774-0400 I COMMAND [conn184] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:44.781-0400 m31100| 2015-07-09T14:16:44.781-0400 I COMMAND [conn184] CMD: drop map_reduce_drop.tmp.mr.coll68_334 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:44.782-0400 m31100| 2015-07-09T14:16:44.781-0400 I COMMAND [conn184] CMD: drop map_reduce_drop.tmp.mr.coll68_334 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:44.783-0400 m31100| 2015-07-09T14:16:44.782-0400 I COMMAND [conn184] CMD: drop map_reduce_drop.tmp.mr.coll68_334 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:44.783-0400 m31101| 2015-07-09T14:16:44.783-0400 I COMMAND [repl writer worker 10] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:44.783-0400 m31102| 2015-07-09T14:16:44.783-0400 I COMMAND [repl writer worker 10] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:44.785-0400 m31100| 2015-07-09T14:16:44.784-0400 I COMMAND [conn184] command map_reduce_drop.coll68_out command: mapReduce { mapreduce: "coll68", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:44.785-0400 m31100| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:44.785-0400 m31100| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:44.785-0400 m31100| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:44.786-0400 m31100| return redu..., out: "coll68_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:179 locks:{ Global: { acquireCount: { r: 1278, w: 759, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 147 } }, Database: { acquireCount: { r: 254, w: 751, R: 4, W: 10 }, acquireWaitCount: { R: 3, W: 8 }, timeAcquiringMicros: { R: 801, W: 2934 } }, Collection: { acquireCount: { r: 254, w: 502 } }, Metadata: { acquireCount: { w: 251 } }, oplog: { acquireCount: { w: 251 } } } protocol:op_command 420ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:44.858-0400 m31100| 2015-07-09T14:16:44.858-0400 I COMMAND [conn184] CMD: drop map_reduce_drop.tmp.mr.coll68_335 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:46.460-0400 m31100| 2015-07-09T14:16:46.460-0400 I COMMAND [conn184] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:46.468-0400 m31100| 2015-07-09T14:16:46.467-0400 I COMMAND [conn184] CMD: drop map_reduce_drop.tmp.mr.coll68_335 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:46.468-0400 m31100| 2015-07-09T14:16:46.468-0400 I COMMAND [conn184] CMD: drop map_reduce_drop.tmp.mr.coll68_335 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:46.469-0400 m31100| 2015-07-09T14:16:46.469-0400 I COMMAND [conn184] CMD: drop map_reduce_drop.tmp.mr.coll68_335 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:46.469-0400 m31102| 2015-07-09T14:16:46.469-0400 I COMMAND [repl writer worker 7] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:46.470-0400 m31101| 2015-07-09T14:16:46.470-0400 I COMMAND [repl writer worker 11] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:46.472-0400 m31100| 2015-07-09T14:16:46.471-0400 I COMMAND [conn184] command map_reduce_drop.coll68_out command: mapReduce { mapreduce: "coll68", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:46.472-0400 m31100| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:46.472-0400 m31100| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:46.473-0400 m31100| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:46.473-0400 m31100| return redu..., out: "coll68_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:179 locks:{ Global: { acquireCount: { r: 2494, w: 1485, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 41 } }, Database: { acquireCount: { r: 496, w: 1477, R: 7, W: 10 }, acquireWaitCount: { R: 7, W: 6 }, timeAcquiringMicros: { R: 1910, W: 3053 } }, Collection: { acquireCount: { r: 496, w: 986 } }, Metadata: { acquireCount: { w: 493 } }, oplog: { acquireCount: { w: 493 } } } protocol:op_command 1613ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:46.473-0400 m30999| 2015-07-09T14:16:46.472-0400 I COMMAND [conn447] DROP: map_reduce_drop.coll68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:46.473-0400 m30999| 2015-07-09T14:16:46.472-0400 I COMMAND [conn447] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:46.474-0400 m31100| 2015-07-09T14:16:46.474-0400 I COMMAND [conn184] CMD: drop map_reduce_drop.coll68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:46.485-0400 m31102| 2015-07-09T14:16:46.485-0400 I COMMAND [repl writer worker 13] CMD: drop map_reduce_drop.coll68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:46.489-0400 m31101| 2015-07-09T14:16:46.489-0400 I COMMAND [repl writer worker 2] CMD: drop map_reduce_drop.coll68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:46.565-0400 m31100| 2015-07-09T14:16:46.565-0400 I COMMAND [conn184] CMD: drop map_reduce_drop.tmp.mr.coll68_336 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:47.129-0400 m31100| 2015-07-09T14:16:47.128-0400 I - [conn177] M/R: (3/3) Final Reduce Progress: 900/2760 32% [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:47.156-0400 m31100| 2015-07-09T14:16:47.156-0400 I - [conn185] M/R: (3/3) Final Reduce Progress: 800/2760 28% [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:47.180-0400 m31100| 2015-07-09T14:16:47.180-0400 I - [conn191] M/R: (3/3) Final Reduce Progress: 800/2760 28% [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:47.270-0400 m31100| 2015-07-09T14:16:47.269-0400 I - [conn179] M/R: (3/3) Final Reduce Progress: 800/2760 28% [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:47.334-0400 m31100| 2015-07-09T14:16:47.333-0400 I COMMAND [conn184] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:47.341-0400 m31100| 2015-07-09T14:16:47.340-0400 I COMMAND [conn184] CMD: drop map_reduce_drop.tmp.mr.coll68_336 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:47.341-0400 m31100| 2015-07-09T14:16:47.341-0400 I COMMAND [conn184] CMD: drop map_reduce_drop.tmp.mr.coll68_336 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:47.342-0400 m31100| 2015-07-09T14:16:47.342-0400 I COMMAND [conn184] CMD: drop map_reduce_drop.tmp.mr.coll68_336 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:47.343-0400 m31101| 2015-07-09T14:16:47.342-0400 I COMMAND [repl writer worker 12] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:47.343-0400 m31102| 2015-07-09T14:16:47.343-0400 I COMMAND [repl writer worker 6] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:47.345-0400 m31100| 2015-07-09T14:16:47.344-0400 I COMMAND [conn184] command map_reduce_drop.coll68_out command: mapReduce { mapreduce: "coll68", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:47.345-0400 m31100| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:47.345-0400 m31100| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:47.345-0400 m31100| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:47.346-0400 m31100| return redu..., out: "coll68_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:179 locks:{ Global: { acquireCount: { r: 1273, w: 756, W: 3 } }, Database: { acquireCount: { r: 253, w: 748, R: 4, W: 10 }, acquireWaitCount: { R: 3, W: 7 }, timeAcquiringMicros: { R: 2051, W: 2836 } }, Collection: { acquireCount: { r: 253, w: 500 } }, Metadata: { acquireCount: { w: 250 } }, oplog: { acquireCount: { w: 250 } } } protocol:op_command 782ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:47.346-0400 m30999| 2015-07-09T14:16:47.346-0400 I COMMAND [conn447] DROP: map_reduce_drop.coll68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:47.346-0400 m30999| 2015-07-09T14:16:47.346-0400 I COMMAND [conn447] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:47.346-0400 m31100| 2015-07-09T14:16:47.346-0400 I COMMAND [conn184] CMD: drop map_reduce_drop.coll68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:47.358-0400 m31102| 2015-07-09T14:16:47.355-0400 I COMMAND [repl writer worker 4] CMD: drop map_reduce_drop.coll68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:47.359-0400 m30999| 2015-07-09T14:16:47.357-0400 I NETWORK [conn447] end connection 127.0.0.1:64018 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:47.362-0400 m31101| 2015-07-09T14:16:47.362-0400 I COMMAND [repl writer worker 2] CMD: drop map_reduce_drop.coll68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.012-0400 m31100| 2015-07-09T14:16:50.012-0400 I - [conn177] M/R: (3/3) Final Reduce Progress: 2600/2760 94% [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.021-0400 m31100| 2015-07-09T14:16:50.020-0400 I - [conn185] M/R: (3/3) Final Reduce Progress: 2500/2760 90% [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.035-0400 m31100| 2015-07-09T14:16:50.035-0400 I - [conn191] M/R: (3/3) Final Reduce Progress: 2500/2760 90% [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.129-0400 m31100| 2015-07-09T14:16:50.129-0400 I - [conn179] M/R: (3/3) Final Reduce Progress: 2500/2760 90% [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.274-0400 m31100| 2015-07-09T14:16:50.274-0400 I COMMAND [conn177] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.287-0400 m31100| 2015-07-09T14:16:50.286-0400 I COMMAND [conn177] CMD: drop map_reduce_drop.tmp.mr.coll68_329 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.287-0400 m31100| 2015-07-09T14:16:50.287-0400 I COMMAND [conn177] CMD: drop map_reduce_drop.tmp.mr.coll68_329 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.289-0400 m31100| 2015-07-09T14:16:50.288-0400 I COMMAND [conn177] CMD: drop map_reduce_drop.tmp.mr.coll68_329 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.290-0400 m31101| 2015-07-09T14:16:50.289-0400 I COMMAND [repl writer worker 0] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.291-0400 m31100| 2015-07-09T14:16:50.290-0400 I COMMAND [conn177] command map_reduce_drop.coll68_out command: mapReduce { mapreduce: "coll68", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.291-0400 m31100| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.291-0400 m31100| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.292-0400 m31100| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.292-0400 m31100| return redu..., out: "coll68_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:62 reslen:179 locks:{ Global: { acquireCount: { r: 14022, w: 8295, W: 3 }, acquireWaitCount: { r: 2, w: 5, W: 1 }, timeAcquiringMicros: { r: 16169, w: 33525, W: 51 } }, Database: { acquireCount: { r: 2766, w: 8287, R: 96, W: 10 }, acquireWaitCount: { r: 8, w: 103, R: 95, W: 8 }, timeAcquiringMicros: { r: 19505, w: 856665, R: 274946, W: 14227 } }, Collection: { acquireCount: { r: 2766, w: 5526 } }, Metadata: { acquireCount: { w: 2763 } }, oplog: { acquireCount: { w: 2763 } } } protocol:op_command 9014ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.294-0400 m31102| 2015-07-09T14:16:50.294-0400 I COMMAND [repl writer worker 13] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.389-0400 m31100| 2015-07-09T14:16:50.389-0400 I COMMAND [conn177] CMD: drop map_reduce_drop.tmp.mr.coll68_337 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.458-0400 m31100| 2015-07-09T14:16:50.458-0400 I COMMAND [conn191] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.464-0400 m31100| 2015-07-09T14:16:50.463-0400 I COMMAND [conn191] CMD: drop map_reduce_drop.tmp.mr.coll68_331 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.464-0400 m31100| 2015-07-09T14:16:50.463-0400 I COMMAND [conn191] CMD: drop map_reduce_drop.tmp.mr.coll68_331 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.464-0400 m31100| 2015-07-09T14:16:50.464-0400 I COMMAND [conn191] CMD: drop map_reduce_drop.tmp.mr.coll68_331 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.465-0400 m31100| 2015-07-09T14:16:50.464-0400 I COMMAND [conn185] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.467-0400 m31102| 2015-07-09T14:16:50.466-0400 I COMMAND [repl writer worker 12] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.468-0400 m31101| 2015-07-09T14:16:50.467-0400 I COMMAND [repl writer worker 1] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.471-0400 m31100| 2015-07-09T14:16:50.470-0400 I COMMAND [conn185] CMD: drop map_reduce_drop.tmp.mr.coll68_332 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.471-0400 m31100| 2015-07-09T14:16:50.470-0400 I COMMAND [conn185] CMD: drop map_reduce_drop.tmp.mr.coll68_332 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.473-0400 m31100| 2015-07-09T14:16:50.473-0400 I COMMAND [conn185] CMD: drop map_reduce_drop.tmp.mr.coll68_332 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.473-0400 m31100| 2015-07-09T14:16:50.473-0400 I COMMAND [conn191] command map_reduce_drop.coll68_out command: mapReduce { mapreduce: "coll68", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.473-0400 m31100| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.473-0400 m31100| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.474-0400 m31100| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.474-0400 m31100| return redu..., out: "coll68_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:89 reslen:179 locks:{ Global: { acquireCount: { r: 14076, w: 8295, W: 3 }, acquireWaitCount: { r: 1, w: 6, W: 1 }, timeAcquiringMicros: { r: 13182, w: 39901, W: 122 } }, Database: { acquireCount: { r: 2766, w: 8287, R: 123, W: 10 }, acquireWaitCount: { r: 12, w: 61, R: 122, W: 8 }, timeAcquiringMicros: { r: 58129, w: 209288, R: 152203, W: 54594 } }, Collection: { acquireCount: { r: 2766, w: 5526 } }, Metadata: { acquireCount: { w: 2763 } }, oplog: { acquireCount: { w: 2763 } } } protocol:op_command 8977ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.475-0400 m30999| 2015-07-09T14:16:50.475-0400 I COMMAND [conn448] DROP: map_reduce_drop.coll68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.476-0400 m30999| 2015-07-09T14:16:50.475-0400 I COMMAND [conn448] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.476-0400 m31100| 2015-07-09T14:16:50.475-0400 I COMMAND [conn191] CMD: drop map_reduce_drop.coll68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.476-0400 m31102| 2015-07-09T14:16:50.476-0400 I COMMAND [repl writer worker 15] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.479-0400 m31101| 2015-07-09T14:16:50.479-0400 I COMMAND [repl writer worker 11] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.483-0400 m31100| 2015-07-09T14:16:50.482-0400 I COMMAND [conn185] command map_reduce_drop.coll68_out command: mapReduce { mapreduce: "coll68", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.483-0400 m31100| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.483-0400 m31100| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.484-0400 m31100| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.484-0400 m31100| return redu..., out: "coll68_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:84 reslen:179 locks:{ Global: { acquireCount: { r: 14066, w: 8295, W: 3 }, acquireWaitCount: { r: 3, w: 4, W: 1 }, timeAcquiringMicros: { r: 27147, w: 25684, W: 678 } }, Database: { acquireCount: { r: 2766, w: 8287, R: 118, W: 10 }, acquireWaitCount: { r: 11, w: 61, R: 115, W: 8 }, timeAcquiringMicros: { r: 47436, w: 265374, R: 121800, W: 65483 } }, Collection: { acquireCount: { r: 2766, w: 5526 } }, Metadata: { acquireCount: { w: 2763 } }, oplog: { acquireCount: { w: 2763 } } } protocol:op_command 8914ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.492-0400 m31102| 2015-07-09T14:16:50.492-0400 I COMMAND [repl writer worker 1] CMD: drop map_reduce_drop.coll68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.496-0400 m31101| 2015-07-09T14:16:50.496-0400 I COMMAND [repl writer worker 12] CMD: drop map_reduce_drop.coll68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.552-0400 m31100| 2015-07-09T14:16:50.552-0400 I COMMAND [conn179] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.559-0400 m31100| 2015-07-09T14:16:50.559-0400 I COMMAND [conn179] CMD: drop map_reduce_drop.tmp.mr.coll68_330 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.560-0400 m31100| 2015-07-09T14:16:50.560-0400 I COMMAND [conn179] CMD: drop map_reduce_drop.tmp.mr.coll68_330 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.564-0400 m31102| 2015-07-09T14:16:50.564-0400 I COMMAND [repl writer worker 12] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.568-0400 m31101| 2015-07-09T14:16:50.568-0400 I COMMAND [repl writer worker 7] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.595-0400 m31100| 2015-07-09T14:16:50.595-0400 I COMMAND [conn179] CMD: drop map_reduce_drop.tmp.mr.coll68_330 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.604-0400 m31100| 2015-07-09T14:16:50.603-0400 I COMMAND [conn68] command map_reduce_drop.$cmd command: insert { insert: "coll68", documents: 250, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 258, w: 258 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 7743 } }, Database: { acquireCount: { w: 257, W: 1 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 812, W: 65 } }, Collection: { acquireCount: { w: 6, W: 1 } }, Metadata: { acquireCount: { w: 251 } }, oplog: { acquireCount: { w: 251 } } } protocol:op_command 114ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.608-0400 m31100| 2015-07-09T14:16:50.607-0400 I COMMAND [conn25] command map_reduce_drop.$cmd command: insert { insert: "coll68", documents: 250, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 0|0, ObjectId('000000000000000000000000') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 256, w: 256 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 8098 } }, Database: { acquireCount: { w: 256 }, acquireWaitCount: { w: 3 }, timeAcquiringMicros: { w: 15463 } }, Collection: { acquireCount: { w: 6 } }, Metadata: { acquireCount: { w: 250 } }, oplog: { acquireCount: { w: 250 } } } protocol:op_command 114ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.620-0400 m31100| 2015-07-09T14:16:50.619-0400 I COMMAND [conn179] command map_reduce_drop.coll68_out command: mapReduce { mapreduce: "coll68", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.620-0400 m31100| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.620-0400 m31100| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.620-0400 m31100| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.621-0400 m31100| return redu..., out: "coll68_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:87 reslen:179 locks:{ Global: { acquireCount: { r: 14072, w: 8295, W: 3 }, acquireWaitCount: { r: 3, w: 6, W: 1 }, timeAcquiringMicros: { r: 30035, w: 41896, W: 3157 } }, Database: { acquireCount: { r: 2766, w: 8287, R: 121, W: 10 }, acquireWaitCount: { r: 16, w: 80, R: 121, W: 8 }, timeAcquiringMicros: { r: 36466, w: 333664, R: 242086, W: 67047 } }, Collection: { acquireCount: { r: 2766, w: 5526 } }, Metadata: { acquireCount: { w: 2763 } }, oplog: { acquireCount: { w: 2763 } } } protocol:op_command 9259ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.621-0400 m31100| 2015-07-09T14:16:50.620-0400 I COMMAND [conn191] CMD: drop map_reduce_drop.tmp.mr.coll68_338 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.621-0400 m31100| 2015-07-09T14:16:50.620-0400 I COMMAND [conn185] CMD: drop map_reduce_drop.tmp.mr.coll68_339 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.736-0400 m31100| 2015-07-09T14:16:50.736-0400 I COMMAND [conn179] CMD: drop map_reduce_drop.tmp.mr.coll68_340 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.884-0400 m31100| 2015-07-09T14:16:50.884-0400 I COMMAND [conn177] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.891-0400 m31100| 2015-07-09T14:16:50.890-0400 I COMMAND [conn177] CMD: drop map_reduce_drop.tmp.mr.coll68_337 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.891-0400 m31100| 2015-07-09T14:16:50.891-0400 I COMMAND [conn177] CMD: drop map_reduce_drop.tmp.mr.coll68_337 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.892-0400 m31101| 2015-07-09T14:16:50.892-0400 I COMMAND [repl writer worker 15] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.893-0400 m31100| 2015-07-09T14:16:50.892-0400 I COMMAND [conn177] CMD: drop map_reduce_drop.tmp.mr.coll68_337 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.894-0400 m31100| 2015-07-09T14:16:50.893-0400 I COMMAND [conn177] command map_reduce_drop.coll68_out command: mapReduce { mapreduce: "coll68", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.895-0400 m31100| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.895-0400 m31100| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.895-0400 m31100| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.896-0400 m31100| return redu..., out: "coll68_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:179 locks:{ Global: { acquireCount: { r: 1273, w: 756, W: 3 }, acquireWaitCount: { w: 4, W: 1 }, timeAcquiringMicros: { w: 23508, W: 111 } }, Database: { acquireCount: { r: 253, w: 748, R: 4, W: 10 }, acquireWaitCount: { r: 6, w: 30, R: 2, W: 6 }, timeAcquiringMicros: { r: 79663, w: 167646, R: 2210, W: 1581 } }, Collection: { acquireCount: { r: 253, w: 500 } }, Metadata: { acquireCount: { w: 250 } }, oplog: { acquireCount: { w: 250 } } } protocol:op_command 507ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.896-0400 m31102| 2015-07-09T14:16:50.893-0400 I COMMAND [repl writer worker 11] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:50.967-0400 m31100| 2015-07-09T14:16:50.967-0400 I COMMAND [conn177] CMD: drop map_reduce_drop.tmp.mr.coll68_341 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.104-0400 m31100| 2015-07-09T14:16:52.103-0400 I COMMAND [conn185] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.112-0400 m31100| 2015-07-09T14:16:52.112-0400 I COMMAND [conn185] CMD: drop map_reduce_drop.tmp.mr.coll68_339 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.113-0400 m31100| 2015-07-09T14:16:52.112-0400 I COMMAND [conn185] CMD: drop map_reduce_drop.tmp.mr.coll68_339 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.114-0400 m31100| 2015-07-09T14:16:52.113-0400 I COMMAND [conn185] CMD: drop map_reduce_drop.tmp.mr.coll68_339 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.114-0400 m31102| 2015-07-09T14:16:52.113-0400 I COMMAND [repl writer worker 4] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.116-0400 m31100| 2015-07-09T14:16:52.115-0400 I COMMAND [conn185] command map_reduce_drop.coll68_out command: mapReduce { mapreduce: "coll68", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.116-0400 m31100| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.117-0400 m31100| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.117-0400 m31100| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.117-0400 m31100| return redu..., out: "coll68_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:179 locks:{ Global: { acquireCount: { r: 3688, w: 2199, W: 3 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 7471, W: 467 } }, Database: { acquireCount: { r: 734, w: 2191, R: 9, W: 10 }, acquireWaitCount: { w: 32, R: 9, W: 7 }, timeAcquiringMicros: { w: 94575, R: 25185, W: 59434 } }, Collection: { acquireCount: { r: 734, w: 1462 } }, Metadata: { acquireCount: { w: 731 } }, oplog: { acquireCount: { w: 731 } } } protocol:op_command 1505ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.117-0400 m30998| 2015-07-09T14:16:52.116-0400 I COMMAND [conn447] DROP: map_reduce_drop.coll68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.117-0400 m30998| 2015-07-09T14:16:52.116-0400 I COMMAND [conn447] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.117-0400 m31100| 2015-07-09T14:16:52.117-0400 I COMMAND [conn185] CMD: drop map_reduce_drop.coll68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.118-0400 m31101| 2015-07-09T14:16:52.118-0400 I COMMAND [repl writer worker 14] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.121-0400 m31100| 2015-07-09T14:16:52.121-0400 I COMMAND [conn179] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.127-0400 m31102| 2015-07-09T14:16:52.127-0400 I COMMAND [repl writer worker 15] CMD: drop map_reduce_drop.coll68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.131-0400 m31100| 2015-07-09T14:16:52.131-0400 I COMMAND [conn179] CMD: drop map_reduce_drop.tmp.mr.coll68_340 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.131-0400 m31100| 2015-07-09T14:16:52.131-0400 I COMMAND [conn179] CMD: drop map_reduce_drop.tmp.mr.coll68_340 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.134-0400 m31101| 2015-07-09T14:16:52.133-0400 I COMMAND [repl writer worker 5] CMD: drop map_reduce_drop.coll68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.141-0400 m31101| 2015-07-09T14:16:52.140-0400 I COMMAND [repl writer worker 1] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.145-0400 m31102| 2015-07-09T14:16:52.144-0400 I COMMAND [repl writer worker 2] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.173-0400 m31100| 2015-07-09T14:16:52.173-0400 I COMMAND [conn179] CMD: drop map_reduce_drop.tmp.mr.coll68_340 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.199-0400 m31100| 2015-07-09T14:16:52.198-0400 I COMMAND [conn179] command map_reduce_drop.coll68_out command: mapReduce { mapreduce: "coll68", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.203-0400 m31100| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.203-0400 m31100| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.203-0400 m31100| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.204-0400 m31100| return redu..., out: "coll68_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:179 locks:{ Global: { acquireCount: { r: 3688, w: 2199, W: 3 }, acquireWaitCount: { r: 1, w: 2, W: 1 }, timeAcquiringMicros: { r: 9564, w: 7229, W: 40 } }, Database: { acquireCount: { r: 734, w: 2191, R: 9, W: 10 }, acquireWaitCount: { r: 2, w: 24, R: 9, W: 8 }, timeAcquiringMicros: { r: 2141, w: 66313, R: 10828, W: 78328 } }, Collection: { acquireCount: { r: 734, w: 1462 } }, Metadata: { acquireCount: { w: 731 } }, oplog: { acquireCount: { w: 731 } } } protocol:op_command 1470ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.204-0400 m31100| 2015-07-09T14:16:52.201-0400 I COMMAND [conn185] CMD: drop map_reduce_drop.tmp.mr.coll68_342 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.277-0400 m31100| 2015-07-09T14:16:52.276-0400 I COMMAND [conn179] CMD: drop map_reduce_drop.tmp.mr.coll68_343 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.353-0400 m31100| 2015-07-09T14:16:52.352-0400 I COMMAND [conn191] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.361-0400 m31100| 2015-07-09T14:16:52.360-0400 I COMMAND [conn191] CMD: drop map_reduce_drop.tmp.mr.coll68_338 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.361-0400 m31100| 2015-07-09T14:16:52.361-0400 I COMMAND [conn191] CMD: drop map_reduce_drop.tmp.mr.coll68_338 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.363-0400 m31102| 2015-07-09T14:16:52.362-0400 I COMMAND [repl writer worker 13] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.363-0400 m31100| 2015-07-09T14:16:52.363-0400 I COMMAND [conn191] CMD: drop map_reduce_drop.tmp.mr.coll68_338 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.364-0400 m31101| 2015-07-09T14:16:52.364-0400 I COMMAND [repl writer worker 12] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.366-0400 m31100| 2015-07-09T14:16:52.366-0400 I COMMAND [conn191] command map_reduce_drop.coll68_out command: mapReduce { mapreduce: "coll68", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.366-0400 m31100| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.367-0400 m31100| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.367-0400 m31100| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.368-0400 m31100| return redu..., out: "coll68_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:179 locks:{ Global: { acquireCount: { r: 3688, w: 2199, W: 3 }, acquireWaitCount: { r: 1, w: 3, W: 1 }, timeAcquiringMicros: { r: 239, w: 26864, W: 300 } }, Database: { acquireCount: { r: 734, w: 2191, R: 9, W: 10 }, acquireWaitCount: { r: 10, w: 63, R: 9, W: 8 }, timeAcquiringMicros: { r: 101897, w: 227580, R: 24638, W: 14242 } }, Collection: { acquireCount: { r: 734, w: 1462 } }, Metadata: { acquireCount: { w: 731 } }, oplog: { acquireCount: { w: 731 } } } protocol:op_command 1761ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.471-0400 m31100| 2015-07-09T14:16:52.471-0400 I COMMAND [conn191] CMD: drop map_reduce_drop.tmp.mr.coll68_344 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.535-0400 m31100| 2015-07-09T14:16:52.535-0400 I COMMAND [conn177] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.542-0400 m31100| 2015-07-09T14:16:52.541-0400 I COMMAND [conn177] CMD: drop map_reduce_drop.tmp.mr.coll68_341 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.542-0400 m31100| 2015-07-09T14:16:52.542-0400 I COMMAND [conn177] CMD: drop map_reduce_drop.tmp.mr.coll68_341 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.543-0400 m31100| 2015-07-09T14:16:52.543-0400 I COMMAND [conn177] CMD: drop map_reduce_drop.tmp.mr.coll68_341 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.544-0400 m31101| 2015-07-09T14:16:52.544-0400 I COMMAND [repl writer worker 5] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.546-0400 m31100| 2015-07-09T14:16:52.546-0400 I COMMAND [conn177] command map_reduce_drop.coll68_out command: mapReduce { mapreduce: "coll68", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.547-0400 m31100| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.547-0400 m31100| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.547-0400 m31100| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.547-0400 m31100| return redu..., out: "coll68_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:179 locks:{ Global: { acquireCount: { r: 4884, w: 2913, W: 3 }, acquireWaitCount: { r: 2, w: 1, W: 1 }, timeAcquiringMicros: { r: 18239, w: 9910, W: 77 } }, Database: { acquireCount: { r: 972, w: 2905, R: 12, W: 10 }, acquireWaitCount: { r: 11, w: 48, R: 12, W: 8 }, timeAcquiringMicros: { r: 82976, w: 151428, R: 6358, W: 1539 } }, Collection: { acquireCount: { r: 972, w: 1938 } }, Metadata: { acquireCount: { w: 969 } }, oplog: { acquireCount: { w: 969 } } } protocol:op_command 1579ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.549-0400 m31102| 2015-07-09T14:16:52.549-0400 I COMMAND [repl writer worker 0] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.650-0400 m31100| 2015-07-09T14:16:52.650-0400 I COMMAND [conn177] CMD: drop map_reduce_drop.tmp.mr.coll68_345 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.892-0400 m31100| 2015-07-09T14:16:52.892-0400 I COMMAND [conn179] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.903-0400 m31100| 2015-07-09T14:16:52.903-0400 I COMMAND [conn179] CMD: drop map_reduce_drop.tmp.mr.coll68_343 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.903-0400 m31100| 2015-07-09T14:16:52.903-0400 I COMMAND [conn179] CMD: drop map_reduce_drop.tmp.mr.coll68_343 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.904-0400 m31100| 2015-07-09T14:16:52.904-0400 I COMMAND [conn179] CMD: drop map_reduce_drop.tmp.mr.coll68_343 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.905-0400 m31102| 2015-07-09T14:16:52.905-0400 I COMMAND [repl writer worker 7] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.906-0400 m31100| 2015-07-09T14:16:52.905-0400 I COMMAND [conn179] command map_reduce_drop.coll68_out command: mapReduce { mapreduce: "coll68", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.906-0400 m31100| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.906-0400 m31100| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.906-0400 m31100| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.907-0400 m31100| return redu..., out: "coll68_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:179 locks:{ Global: { acquireCount: { r: 2489, w: 1482, W: 3 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 6768, w: 8090, W: 101 } }, Database: { acquireCount: { r: 495, w: 1474, R: 7, W: 10 }, acquireWaitCount: { r: 3, w: 35, R: 6, W: 7 }, timeAcquiringMicros: { r: 317, w: 100967, R: 4515, W: 8498 } }, Collection: { acquireCount: { r: 495, w: 984 } }, Metadata: { acquireCount: { w: 492 } }, oplog: { acquireCount: { w: 492 } } } protocol:op_command 631ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.907-0400 m31101| 2015-07-09T14:16:52.906-0400 I COMMAND [repl writer worker 6] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.907-0400 m30999| 2015-07-09T14:16:52.906-0400 I COMMAND [conn449] DROP: map_reduce_drop.coll68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.907-0400 m30999| 2015-07-09T14:16:52.906-0400 I COMMAND [conn449] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.908-0400 m31100| 2015-07-09T14:16:52.906-0400 I COMMAND [conn179] CMD: drop map_reduce_drop.coll68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.915-0400 m31102| 2015-07-09T14:16:52.915-0400 I COMMAND [repl writer worker 0] CMD: drop map_reduce_drop.coll68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.929-0400 m31101| 2015-07-09T14:16:52.928-0400 I COMMAND [repl writer worker 12] CMD: drop map_reduce_drop.coll68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.959-0400 m31100| 2015-07-09T14:16:52.958-0400 I COMMAND [conn185] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.967-0400 m31100| 2015-07-09T14:16:52.966-0400 I COMMAND [conn185] CMD: drop map_reduce_drop.tmp.mr.coll68_342 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.967-0400 m31100| 2015-07-09T14:16:52.967-0400 I COMMAND [conn185] CMD: drop map_reduce_drop.tmp.mr.coll68_342 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.974-0400 m31101| 2015-07-09T14:16:52.973-0400 I COMMAND [repl writer worker 0] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:52.982-0400 m31102| 2015-07-09T14:16:52.981-0400 I COMMAND [repl writer worker 0] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:53.004-0400 m31100| 2015-07-09T14:16:53.004-0400 I COMMAND [conn185] CMD: drop map_reduce_drop.tmp.mr.coll68_342 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:53.011-0400 m31100| 2015-07-09T14:16:53.011-0400 I COMMAND [conn185] command map_reduce_drop.coll68_out command: mapReduce { mapreduce: "coll68", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:53.011-0400 m31100| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:53.011-0400 m31100| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:53.011-0400 m31100| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:53.012-0400 m31100| return redu..., out: "coll68_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:179 locks:{ Global: { acquireCount: { r: 2489, w: 1482, W: 3 }, acquireWaitCount: { r: 2, w: 1, W: 1 }, timeAcquiringMicros: { r: 17878, w: 8338, W: 7520 } }, Database: { acquireCount: { r: 495, w: 1474, R: 7, W: 10 }, acquireWaitCount: { r: 6, w: 49, R: 7, W: 5 }, timeAcquiringMicros: { r: 32220, w: 129429, R: 37577, W: 47151 } }, Collection: { acquireCount: { r: 495, w: 984 } }, Metadata: { acquireCount: { w: 492 } }, oplog: { acquireCount: { w: 492 } } } protocol:op_command 811ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:53.013-0400 m31100| 2015-07-09T14:16:53.013-0400 I COMMAND [conn179] CMD: drop map_reduce_drop.tmp.mr.coll68_346 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:53.092-0400 m31100| 2015-07-09T14:16:53.092-0400 I COMMAND [conn185] CMD: drop map_reduce_drop.tmp.mr.coll68_347 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:53.732-0400 m31100| 2015-07-09T14:16:53.731-0400 I COMMAND [conn191] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:53.739-0400 m31100| 2015-07-09T14:16:53.738-0400 I COMMAND [conn191] CMD: drop map_reduce_drop.tmp.mr.coll68_344 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:53.739-0400 m31100| 2015-07-09T14:16:53.738-0400 I COMMAND [conn191] CMD: drop map_reduce_drop.tmp.mr.coll68_344 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:53.740-0400 m31100| 2015-07-09T14:16:53.740-0400 I COMMAND [conn191] CMD: drop map_reduce_drop.tmp.mr.coll68_344 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:53.740-0400 m31101| 2015-07-09T14:16:53.740-0400 I COMMAND [repl writer worker 5] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:53.741-0400 m31102| 2015-07-09T14:16:53.741-0400 I COMMAND [repl writer worker 11] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:53.743-0400 m31100| 2015-07-09T14:16:53.743-0400 I COMMAND [conn191] command map_reduce_drop.coll68_out command: mapReduce { mapreduce: "coll68", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:53.744-0400 m31100| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:53.744-0400 m31100| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:53.744-0400 m31100| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:53.745-0400 m31100| return redu..., out: "coll68_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:179 locks:{ Global: { acquireCount: { r: 3668, w: 2187, W: 3 }, acquireWaitCount: { r: 2, w: 1 }, timeAcquiringMicros: { r: 27176, w: 6852 } }, Database: { acquireCount: { r: 730, w: 2179, R: 9, W: 10 }, acquireWaitCount: { r: 10, w: 72, R: 9, W: 8 }, timeAcquiringMicros: { r: 80655, w: 132253, R: 6962, W: 3998 } }, Collection: { acquireCount: { r: 730, w: 1454 } }, Metadata: { acquireCount: { w: 727 } }, oplog: { acquireCount: { w: 727 } } } protocol:op_command 1273ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:53.836-0400 m31100| 2015-07-09T14:16:53.836-0400 I COMMAND [conn191] CMD: drop map_reduce_drop.tmp.mr.coll68_348 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:53.935-0400 m31100| 2015-07-09T14:16:53.935-0400 I COMMAND [conn179] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:53.942-0400 m31100| 2015-07-09T14:16:53.941-0400 I COMMAND [conn179] CMD: drop map_reduce_drop.tmp.mr.coll68_346 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:53.942-0400 m31100| 2015-07-09T14:16:53.941-0400 I COMMAND [conn179] CMD: drop map_reduce_drop.tmp.mr.coll68_346 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:53.942-0400 m31100| 2015-07-09T14:16:53.942-0400 I COMMAND [conn179] CMD: drop map_reduce_drop.tmp.mr.coll68_346 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:53.943-0400 m31102| 2015-07-09T14:16:53.943-0400 I COMMAND [repl writer worker 6] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:53.946-0400 m31100| 2015-07-09T14:16:53.944-0400 I COMMAND [conn179] command map_reduce_drop.coll68_out command: mapReduce { mapreduce: "coll68", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:53.946-0400 m31100| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:53.946-0400 m31100| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:53.946-0400 m31100| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:53.946-0400 m31100| return redu..., out: "coll68_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:179 locks:{ Global: { acquireCount: { r: 2494, w: 1485, W: 3 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 4890, W: 119 } }, Database: { acquireCount: { r: 496, w: 1477, R: 7, W: 10 }, acquireWaitCount: { r: 4, w: 26, R: 7, W: 6 }, timeAcquiringMicros: { r: 17834, w: 70006, R: 45434, W: 8613 } }, Collection: { acquireCount: { r: 496, w: 986 } }, Metadata: { acquireCount: { w: 493 } }, oplog: { acquireCount: { w: 493 } } } protocol:op_command 932ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:53.947-0400 m31100| 2015-07-09T14:16:53.946-0400 I COMMAND [conn185] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:53.947-0400 m30999| 2015-07-09T14:16:53.947-0400 I NETWORK [conn449] end connection 127.0.0.1:64020 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:53.947-0400 m31101| 2015-07-09T14:16:53.947-0400 I COMMAND [repl writer worker 3] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:53.951-0400 m31100| 2015-07-09T14:16:53.951-0400 I COMMAND [conn185] CMD: drop map_reduce_drop.tmp.mr.coll68_347 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:53.951-0400 m31100| 2015-07-09T14:16:53.951-0400 I COMMAND [conn185] CMD: drop map_reduce_drop.tmp.mr.coll68_347 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:53.952-0400 m31100| 2015-07-09T14:16:53.952-0400 I COMMAND [conn185] CMD: drop map_reduce_drop.tmp.mr.coll68_347 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:53.953-0400 m31100| 2015-07-09T14:16:53.953-0400 I COMMAND [conn185] command map_reduce_drop.coll68_out command: mapReduce { mapreduce: "coll68", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:53.953-0400 m31100| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:53.954-0400 m31100| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:53.954-0400 m31100| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:53.955-0400 m31100| return redu..., out: "coll68_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:179 locks:{ Global: { acquireCount: { r: 2494, w: 1485, W: 3 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 11200, W: 206 } }, Database: { acquireCount: { r: 496, w: 1477, R: 7, W: 10 }, acquireWaitCount: { r: 6, w: 17, R: 7, W: 8 }, timeAcquiringMicros: { r: 10193, w: 40927, R: 6133, W: 5446 } }, Collection: { acquireCount: { r: 496, w: 986 } }, Metadata: { acquireCount: { w: 493 } }, oplog: { acquireCount: { w: 493 } } } protocol:op_command 862ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:53.960-0400 m31102| 2015-07-09T14:16:53.960-0400 I COMMAND [repl writer worker 7] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:53.963-0400 m31101| 2015-07-09T14:16:53.963-0400 I COMMAND [repl writer worker 0] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:54.031-0400 m31100| 2015-07-09T14:16:54.031-0400 I COMMAND [conn185] CMD: drop map_reduce_drop.tmp.mr.coll68_349 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:54.032-0400 m31100| 2015-07-09T14:16:54.032-0400 I COMMAND [conn177] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:54.040-0400 m31100| 2015-07-09T14:16:54.040-0400 I COMMAND [conn177] CMD: drop map_reduce_drop.tmp.mr.coll68_345 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:54.040-0400 m31100| 2015-07-09T14:16:54.040-0400 I COMMAND [conn177] CMD: drop map_reduce_drop.tmp.mr.coll68_345 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:54.041-0400 m31100| 2015-07-09T14:16:54.041-0400 I COMMAND [conn177] CMD: drop map_reduce_drop.tmp.mr.coll68_345 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:54.043-0400 m31101| 2015-07-09T14:16:54.042-0400 I COMMAND [repl writer worker 1] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:54.044-0400 m31102| 2015-07-09T14:16:54.043-0400 I COMMAND [repl writer worker 7] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:54.072-0400 m31100| 2015-07-09T14:16:54.072-0400 I COMMAND [conn177] command map_reduce_drop.coll68_out command: mapReduce { mapreduce: "coll68", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:54.073-0400 m31100| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:54.073-0400 m31100| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:54.073-0400 m31100| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:54.074-0400 m31100| return redu..., out: "coll68_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:179 locks:{ Global: { acquireCount: { r: 4819, w: 2874, W: 3 }, acquireWaitCount: { r: 3, w: 2, W: 1 }, timeAcquiringMicros: { r: 29138, w: 14991, W: 433 } }, Database: { acquireCount: { r: 959, w: 2866, R: 12, W: 10 }, acquireWaitCount: { r: 17, w: 71, R: 12, W: 8 }, timeAcquiringMicros: { r: 68376, w: 148849, R: 5789, W: 32532 } }, Collection: { acquireCount: { r: 959, w: 1912 } }, Metadata: { acquireCount: { w: 956 } }, oplog: { acquireCount: { w: 956 } } } protocol:op_command 1422ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:54.139-0400 m31100| 2015-07-09T14:16:54.139-0400 I COMMAND [conn177] CMD: drop map_reduce_drop.tmp.mr.coll68_350 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:54.610-0400 m31100| 2015-07-09T14:16:54.610-0400 I COMMAND [conn191] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:54.616-0400 m31100| 2015-07-09T14:16:54.616-0400 I COMMAND [conn191] CMD: drop map_reduce_drop.tmp.mr.coll68_348 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:54.617-0400 m31100| 2015-07-09T14:16:54.617-0400 I COMMAND [conn191] CMD: drop map_reduce_drop.tmp.mr.coll68_348 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:54.618-0400 m31100| 2015-07-09T14:16:54.617-0400 I COMMAND [conn191] CMD: drop map_reduce_drop.tmp.mr.coll68_348 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:54.618-0400 m31101| 2015-07-09T14:16:54.618-0400 I COMMAND [repl writer worker 5] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:54.619-0400 m31102| 2015-07-09T14:16:54.618-0400 I COMMAND [repl writer worker 12] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:54.620-0400 m31100| 2015-07-09T14:16:54.619-0400 I COMMAND [conn191] command map_reduce_drop.coll68_out command: mapReduce { mapreduce: "coll68", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:54.620-0400 m31100| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:54.620-0400 m31100| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:54.620-0400 m31100| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:54.620-0400 m31100| return redu..., out: "coll68_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:179 locks:{ Global: { acquireCount: { r: 3673, w: 2190, W: 3 }, acquireWaitCount: { w: 3 }, timeAcquiringMicros: { w: 22089 } }, Database: { acquireCount: { r: 731, w: 2182, R: 9, W: 10 }, acquireWaitCount: { w: 40, R: 8, W: 6 }, timeAcquiringMicros: { w: 164982, R: 4267, W: 2426 } }, Collection: { acquireCount: { r: 731, w: 1456 } }, Metadata: { acquireCount: { w: 728 } }, oplog: { acquireCount: { w: 728 } } } protocol:op_command 786ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:54.717-0400 m31100| 2015-07-09T14:16:54.717-0400 I COMMAND [conn191] CMD: drop map_reduce_drop.tmp.mr.coll68_351 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:55.147-0400 m31100| 2015-07-09T14:16:55.147-0400 I COMMAND [conn185] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:55.156-0400 m31100| 2015-07-09T14:16:55.156-0400 I COMMAND [conn185] CMD: drop map_reduce_drop.tmp.mr.coll68_349 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:55.157-0400 m31100| 2015-07-09T14:16:55.157-0400 I COMMAND [conn185] CMD: drop map_reduce_drop.tmp.mr.coll68_349 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:55.158-0400 m31100| 2015-07-09T14:16:55.158-0400 I COMMAND [conn185] CMD: drop map_reduce_drop.tmp.mr.coll68_349 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:55.158-0400 m31102| 2015-07-09T14:16:55.158-0400 I COMMAND [repl writer worker 2] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:55.159-0400 m31101| 2015-07-09T14:16:55.159-0400 I COMMAND [repl writer worker 9] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:55.161-0400 m31100| 2015-07-09T14:16:55.159-0400 I COMMAND [conn185] command map_reduce_drop.coll68_out command: mapReduce { mapreduce: "coll68", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:55.161-0400 m31100| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:55.161-0400 m31100| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:55.161-0400 m31100| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:55.162-0400 m31100| return redu..., out: "coll68_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:179 locks:{ Global: { acquireCount: { r: 5928, w: 3537, W: 3 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 15401, W: 39 } }, Database: { acquireCount: { r: 1180, w: 3529, R: 14, W: 10 }, acquireWaitCount: { r: 5, w: 35, R: 14, W: 8 }, timeAcquiringMicros: { r: 9501, w: 94849, R: 55590, W: 2298 } }, Collection: { acquireCount: { r: 1180, w: 2354 } }, Metadata: { acquireCount: { w: 1177 } }, oplog: { acquireCount: { w: 1177 } } } protocol:op_command 1129ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:55.187-0400 m31100| 2015-07-09T14:16:55.186-0400 I COMMAND [conn177] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:55.194-0400 m31100| 2015-07-09T14:16:55.194-0400 I COMMAND [conn177] CMD: drop map_reduce_drop.tmp.mr.coll68_350 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:55.194-0400 m31100| 2015-07-09T14:16:55.194-0400 I COMMAND [conn177] CMD: drop map_reduce_drop.tmp.mr.coll68_350 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:55.195-0400 m31100| 2015-07-09T14:16:55.194-0400 I COMMAND [conn177] CMD: drop map_reduce_drop.tmp.mr.coll68_350 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:55.200-0400 m31101| 2015-07-09T14:16:55.200-0400 I COMMAND [repl writer worker 5] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:55.204-0400 m31102| 2015-07-09T14:16:55.204-0400 I COMMAND [repl writer worker 8] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:55.230-0400 m31100| 2015-07-09T14:16:55.229-0400 I COMMAND [conn177] command map_reduce_drop.coll68_out command: mapReduce { mapreduce: "coll68", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:55.230-0400 m31100| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:55.231-0400 m31100| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:55.231-0400 m31100| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:55.232-0400 m31100| return redu..., out: "coll68_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:179 locks:{ Global: { acquireCount: { r: 5928, w: 3537, W: 3 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 9657, w: 6556, W: 4256 } }, Database: { acquireCount: { r: 1180, w: 3529, R: 14, W: 10 }, acquireWaitCount: { r: 6, w: 22, R: 14, W: 6 }, timeAcquiringMicros: { r: 10135, w: 58865, R: 5008, W: 45035 } }, Collection: { acquireCount: { r: 1180, w: 2354 } }, Metadata: { acquireCount: { w: 1177 } }, oplog: { acquireCount: { w: 1177 } } } protocol:op_command 1092ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:55.261-0400 m31100| 2015-07-09T14:16:55.261-0400 I COMMAND [conn177] CMD: drop map_reduce_drop.tmp.mr.coll68_352 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:55.324-0400 m31100| 2015-07-09T14:16:55.323-0400 I COMMAND [conn185] CMD: drop map_reduce_drop.tmp.mr.coll68_353 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:56.079-0400 m31100| 2015-07-09T14:16:56.078-0400 I COMMAND [conn191] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:56.086-0400 m31100| 2015-07-09T14:16:56.086-0400 I COMMAND [conn191] CMD: drop map_reduce_drop.tmp.mr.coll68_351 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:56.087-0400 m31100| 2015-07-09T14:16:56.086-0400 I COMMAND [conn191] CMD: drop map_reduce_drop.tmp.mr.coll68_351 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:56.088-0400 m31100| 2015-07-09T14:16:56.087-0400 I COMMAND [conn191] CMD: drop map_reduce_drop.tmp.mr.coll68_351 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:56.088-0400 m31101| 2015-07-09T14:16:56.088-0400 I COMMAND [repl writer worker 7] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:56.088-0400 m31102| 2015-07-09T14:16:56.088-0400 I COMMAND [repl writer worker 0] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:56.091-0400 m31100| 2015-07-09T14:16:56.090-0400 I COMMAND [conn191] command map_reduce_drop.coll68_out command: mapReduce { mapreduce: "coll68", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:56.091-0400 m31100| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:56.091-0400 m31100| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:56.092-0400 m31100| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:56.093-0400 m31100| return redu..., out: "coll68_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:179 locks:{ Global: { acquireCount: { r: 6994, w: 4173, W: 3 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 21690 } }, Database: { acquireCount: { r: 1392, w: 4165, R: 17, W: 10 }, acquireWaitCount: { r: 8, w: 32, R: 16, W: 7 }, timeAcquiringMicros: { r: 95926, w: 183037, R: 5462, W: 2106 } }, Collection: { acquireCount: { r: 1392, w: 2778 } }, Metadata: { acquireCount: { w: 1389 } }, oplog: { acquireCount: { w: 1389 } } } protocol:op_command 1375ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:56.093-0400 m30999| 2015-07-09T14:16:56.092-0400 I COMMAND [conn448] DROP: map_reduce_drop.coll68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:56.093-0400 m30999| 2015-07-09T14:16:56.092-0400 I COMMAND [conn448] drop going to do passthrough [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:56.093-0400 m31100| 2015-07-09T14:16:56.092-0400 I COMMAND [conn191] CMD: drop map_reduce_drop.coll68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:56.100-0400 m31102| 2015-07-09T14:16:56.100-0400 I COMMAND [repl writer worker 3] CMD: drop map_reduce_drop.coll68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:56.103-0400 m30999| 2015-07-09T14:16:56.102-0400 I NETWORK [conn448] end connection 127.0.0.1:64019 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:56.109-0400 m31101| 2015-07-09T14:16:56.109-0400 I COMMAND [repl writer worker 14] CMD: drop map_reduce_drop.coll68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:56.672-0400 m31100| 2015-07-09T14:16:56.672-0400 I COMMAND [conn177] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:56.678-0400 m31100| 2015-07-09T14:16:56.678-0400 I COMMAND [conn177] CMD: drop map_reduce_drop.tmp.mr.coll68_352 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:56.678-0400 m31100| 2015-07-09T14:16:56.678-0400 I COMMAND [conn177] CMD: drop map_reduce_drop.tmp.mr.coll68_352 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:56.678-0400 m31100| 2015-07-09T14:16:56.678-0400 I COMMAND [conn177] CMD: drop map_reduce_drop.tmp.mr.coll68_352 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:56.679-0400 m31100| 2015-07-09T14:16:56.679-0400 I COMMAND [conn177] command map_reduce_drop.coll68_out command: mapReduce { mapreduce: "coll68", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:56.679-0400 m31100| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:56.679-0400 m31100| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:56.680-0400 m31100| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:56.680-0400 m31100| return redu..., out: "coll68_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:179 locks:{ Global: { acquireCount: { r: 9104, w: 5433, W: 3 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 7446, W: 42 } }, Database: { acquireCount: { r: 1812, w: 5425, R: 22, W: 10 }, acquireWaitCount: { r: 3, w: 10, R: 22, W: 6 }, timeAcquiringMicros: { r: 2418, w: 27136, R: 102544, W: 21347 } }, Collection: { acquireCount: { r: 1812, w: 3618 } }, Metadata: { acquireCount: { w: 1809 } }, oplog: { acquireCount: { w: 1809 } } } protocol:op_command 1435ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:56.680-0400 m31102| 2015-07-09T14:16:56.680-0400 I COMMAND [repl writer worker 6] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:56.683-0400 m30998| 2015-07-09T14:16:56.683-0400 I NETWORK [conn447] end connection 127.0.0.1:64016 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:56.684-0400 m31101| 2015-07-09T14:16:56.683-0400 I COMMAND [repl writer worker 4] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:56.689-0400 m31100| 2015-07-09T14:16:56.689-0400 I COMMAND [conn185] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:56.696-0400 m31100| 2015-07-09T14:16:56.696-0400 I COMMAND [conn185] CMD: drop map_reduce_drop.tmp.mr.coll68_353 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:56.696-0400 m31100| 2015-07-09T14:16:56.696-0400 I COMMAND [conn185] CMD: drop map_reduce_drop.tmp.mr.coll68_353 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:56.700-0400 m31100| 2015-07-09T14:16:56.696-0400 I COMMAND [conn185] CMD: drop map_reduce_drop.tmp.mr.coll68_353 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:56.701-0400 m31101| 2015-07-09T14:16:56.697-0400 I COMMAND [repl writer worker 6] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:56.702-0400 m31102| 2015-07-09T14:16:56.698-0400 I COMMAND [repl writer worker 2] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:56.707-0400 m31100| 2015-07-09T14:16:56.707-0400 I COMMAND [conn185] command map_reduce_drop.coll68_out command: mapReduce { mapreduce: "coll68", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:56.707-0400 m31100| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:56.707-0400 m31100| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:56.708-0400 m31100| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:56.708-0400 m31100| return redu..., out: "coll68_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:179 locks:{ Global: { acquireCount: { r: 9104, w: 5433, W: 3 }, acquireWaitCount: { w: 2 }, timeAcquiringMicros: { w: 13666 } }, Database: { acquireCount: { r: 1812, w: 5425, R: 22, W: 10 }, acquireWaitCount: { r: 5, w: 7, R: 22, W: 4 }, timeAcquiringMicros: { r: 16834, w: 13648, R: 35188, W: 16206 } }, Collection: { acquireCount: { r: 1812, w: 3618 } }, Metadata: { acquireCount: { w: 1809 } }, oplog: { acquireCount: { w: 1809 } } } protocol:op_command 1422ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:56.771-0400 m31100| 2015-07-09T14:16:56.770-0400 I COMMAND [conn185] CMD: drop map_reduce_drop.tmp.mr.coll68_354 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:56.894-0400 m31100| 2015-07-09T14:16:56.894-0400 I COMMAND [conn185] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:56.902-0400 m31100| 2015-07-09T14:16:56.902-0400 I COMMAND [conn185] CMD: drop map_reduce_drop.tmp.mr.coll68_354 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:56.903-0400 m31100| 2015-07-09T14:16:56.903-0400 I COMMAND [conn185] CMD: drop map_reduce_drop.tmp.mr.coll68_354 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:56.904-0400 m31100| 2015-07-09T14:16:56.903-0400 I COMMAND [conn185] CMD: drop map_reduce_drop.tmp.mr.coll68_354 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:56.905-0400 m31100| 2015-07-09T14:16:56.904-0400 I COMMAND [conn185] command map_reduce_drop.coll68_out command: mapReduce { mapreduce: "coll68", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:56.905-0400 m31100| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:56.905-0400 m31100| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:56.905-0400 m31100| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:56.906-0400 m31100| return redu..., out: "coll68_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:179 locks:{ Global: { acquireCount: { r: 1273, w: 756, W: 3 } }, Database: { acquireCount: { r: 253, w: 748, R: 4, W: 10 } }, Collection: { acquireCount: { r: 253, w: 500 } }, Metadata: { acquireCount: { w: 250 } }, oplog: { acquireCount: { w: 250 } } } protocol:op_command 133ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:56.908-0400 m31101| 2015-07-09T14:16:56.908-0400 I COMMAND [repl writer worker 4] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:56.909-0400 m31102| 2015-07-09T14:16:56.908-0400 I COMMAND [repl writer worker 5] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:56.955-0400 m31100| 2015-07-09T14:16:56.954-0400 I COMMAND [conn185] CMD: drop map_reduce_drop.tmp.mr.coll68_355 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.137-0400 m31100| 2015-07-09T14:16:57.137-0400 I COMMAND [conn185] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.142-0400 m31100| 2015-07-09T14:16:57.142-0400 I COMMAND [conn185] CMD: drop map_reduce_drop.tmp.mr.coll68_355 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.143-0400 m31100| 2015-07-09T14:16:57.142-0400 I COMMAND [conn185] CMD: drop map_reduce_drop.tmp.mr.coll68_355 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.143-0400 m31100| 2015-07-09T14:16:57.143-0400 I COMMAND [conn185] CMD: drop map_reduce_drop.tmp.mr.coll68_355 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.144-0400 m31100| 2015-07-09T14:16:57.143-0400 I COMMAND [conn185] command map_reduce_drop.coll68_out command: mapReduce { mapreduce: "coll68", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.144-0400 m31100| emit(this.key, 1); [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.144-0400 m31100| }, reduce: function reducer() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.145-0400 m31100| // This dummy reducer is present to e..., finalize: function finalize(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.145-0400 m31100| return redu..., out: "coll68_out" } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:179 locks:{ Global: { acquireCount: { r: 2504, w: 1491, W: 3 } }, Database: { acquireCount: { r: 498, w: 1483, R: 7, W: 10 } }, Collection: { acquireCount: { r: 498, w: 990 } }, Metadata: { acquireCount: { w: 495 } }, oplog: { acquireCount: { w: 495 } } } protocol:op_command 189ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.145-0400 m31101| 2015-07-09T14:16:57.144-0400 I COMMAND [repl writer worker 15] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.146-0400 m31102| 2015-07-09T14:16:57.145-0400 I COMMAND [repl writer worker 5] CMD: drop map_reduce_drop.coll68_out [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.150-0400 m30998| 2015-07-09T14:16:57.149-0400 I NETWORK [conn448] end connection 127.0.0.1:64017 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.173-0400 m30999| 2015-07-09T14:16:57.172-0400 I COMMAND [conn1] DROP DATABASE: map_reduce_drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.173-0400 m30999| 2015-07-09T14:16:57.172-0400 I SHARDING [conn1] DBConfig::dropDatabase: map_reduce_drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.173-0400 m30999| 2015-07-09T14:16:57.172-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:57.172-0400-559eba99ca4787b9985d1ec4", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465817172), what: "dropDatabase.start", ns: "map_reduce_drop", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.279-0400 m30999| 2015-07-09T14:16:57.279-0400 I SHARDING [conn1] DBConfig::dropDatabase: map_reduce_drop dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.280-0400 m31100| 2015-07-09T14:16:57.279-0400 I COMMAND [conn160] dropDatabase map_reduce_drop starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.283-0400 m31100| 2015-07-09T14:16:57.283-0400 I COMMAND [conn160] dropDatabase map_reduce_drop finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.284-0400 m30999| 2015-07-09T14:16:57.283-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:57.283-0400-559eba99ca4787b9985d1ec5", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465817283), what: "dropDatabase", ns: "map_reduce_drop", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.284-0400 m31101| 2015-07-09T14:16:57.284-0400 I COMMAND [repl writer worker 4] dropDatabase map_reduce_drop starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.285-0400 m31102| 2015-07-09T14:16:57.285-0400 I COMMAND [repl writer worker 8] dropDatabase map_reduce_drop starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.286-0400 m31101| 2015-07-09T14:16:57.286-0400 I COMMAND [repl writer worker 4] dropDatabase map_reduce_drop finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.286-0400 m31102| 2015-07-09T14:16:57.286-0400 I COMMAND [repl writer worker 8] dropDatabase map_reduce_drop finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.337-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.337-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.337-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.337-0400 jstests/concurrency/fsm_workloads/map_reduce_drop.js: Workload completed in 27365 ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.338-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.338-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.338-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.338-0400 m30999| 2015-07-09T14:16:57.337-0400 I COMMAND [conn1] DROP: db68.coll68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.338-0400 m30999| 2015-07-09T14:16:57.337-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:57.337-0400-559eba99ca4787b9985d1ec6", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465817337), what: "dropCollection.start", ns: "db68.coll68", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.395-0400 m30999| 2015-07-09T14:16:57.394-0400 I SHARDING [conn1] distributed lock 'db68.coll68/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba99ca4787b9985d1ec7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.396-0400 m31100| 2015-07-09T14:16:57.396-0400 I COMMAND [conn187] CMD: drop db68.coll68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.399-0400 m31200| 2015-07-09T14:16:57.399-0400 I COMMAND [conn18] CMD: drop db68.coll68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.400-0400 m31102| 2015-07-09T14:16:57.400-0400 I COMMAND [repl writer worker 12] CMD: drop db68.coll68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.401-0400 m31101| 2015-07-09T14:16:57.400-0400 I COMMAND [repl writer worker 9] CMD: drop db68.coll68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.403-0400 m31202| 2015-07-09T14:16:57.403-0400 I COMMAND [repl writer worker 10] CMD: drop db68.coll68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.403-0400 m31201| 2015-07-09T14:16:57.403-0400 I COMMAND [repl writer worker 9] CMD: drop db68.coll68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.457-0400 m31100| 2015-07-09T14:16:57.456-0400 I SHARDING [conn187] remotely refreshing metadata for db68.coll68 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559eba7dca4787b9985d1ebe, current metadata version is 2|3||559eba7dca4787b9985d1ebe [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.459-0400 m31100| 2015-07-09T14:16:57.458-0400 W SHARDING [conn187] no chunks found when reloading db68.coll68, previous version was 0|0||559eba7dca4787b9985d1ebe, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.459-0400 m31100| 2015-07-09T14:16:57.458-0400 I SHARDING [conn187] dropping metadata for db68.coll68 at shard version 2|3||559eba7dca4787b9985d1ebe, took 2ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.460-0400 m31200| 2015-07-09T14:16:57.460-0400 I SHARDING [conn18] remotely refreshing metadata for db68.coll68 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559eba7dca4787b9985d1ebe, current metadata version is 2|5||559eba7dca4787b9985d1ebe [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.462-0400 m31200| 2015-07-09T14:16:57.461-0400 W SHARDING [conn18] no chunks found when reloading db68.coll68, previous version was 0|0||559eba7dca4787b9985d1ebe, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.462-0400 m31200| 2015-07-09T14:16:57.462-0400 I SHARDING [conn18] dropping metadata for db68.coll68 at shard version 2|5||559eba7dca4787b9985d1ebe, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.463-0400 m30999| 2015-07-09T14:16:57.463-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:57.463-0400-559eba99ca4787b9985d1ec8", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465817463), what: "dropCollection", ns: "db68.coll68", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.517-0400 m30999| 2015-07-09T14:16:57.516-0400 I SHARDING [conn1] distributed lock 'db68.coll68/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.573-0400 m30999| 2015-07-09T14:16:57.572-0400 I COMMAND [conn1] DROP DATABASE: db68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.573-0400 m30999| 2015-07-09T14:16:57.573-0400 I SHARDING [conn1] DBConfig::dropDatabase: db68 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.573-0400 m30999| 2015-07-09T14:16:57.573-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:57.573-0400-559eba99ca4787b9985d1ec9", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465817573), what: "dropDatabase.start", ns: "db68", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.680-0400 m30999| 2015-07-09T14:16:57.679-0400 I SHARDING [conn1] DBConfig::dropDatabase: db68 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.680-0400 m31100| 2015-07-09T14:16:57.680-0400 I COMMAND [conn160] dropDatabase db68 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.680-0400 m31100| 2015-07-09T14:16:57.680-0400 I COMMAND [conn160] dropDatabase db68 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.681-0400 m30999| 2015-07-09T14:16:57.680-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:57.680-0400-559eba99ca4787b9985d1eca", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465817680), what: "dropDatabase", ns: "db68", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.681-0400 m31101| 2015-07-09T14:16:57.681-0400 I COMMAND [repl writer worker 0] dropDatabase db68 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.681-0400 m31101| 2015-07-09T14:16:57.681-0400 I COMMAND [repl writer worker 0] dropDatabase db68 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.681-0400 m31102| 2015-07-09T14:16:57.681-0400 I COMMAND [repl writer worker 15] dropDatabase db68 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.682-0400 m31102| 2015-07-09T14:16:57.681-0400 I COMMAND [repl writer worker 15] dropDatabase db68 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.776-0400 m31100| 2015-07-09T14:16:57.776-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.780-0400 m31101| 2015-07-09T14:16:57.780-0400 I COMMAND [repl writer worker 6] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.780-0400 m31102| 2015-07-09T14:16:57.780-0400 I COMMAND [repl writer worker 10] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.816-0400 m31200| 2015-07-09T14:16:57.816-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.819-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.819-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.819-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.819-0400 jstests/concurrency/fsm_workloads/map_reduce_merge_nonatomic.js [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.819-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.819-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.820-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.820-0400 m31202| 2015-07-09T14:16:57.819-0400 I COMMAND [repl writer worker 5] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.820-0400 m31201| 2015-07-09T14:16:57.819-0400 I COMMAND [repl writer worker 3] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.826-0400 m30999| 2015-07-09T14:16:57.826-0400 I SHARDING [conn1] distributed lock 'db69/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba99ca4787b9985d1ecb [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.830-0400 m30999| 2015-07-09T14:16:57.829-0400 I SHARDING [conn1] Placing [db69] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.830-0400 m30999| 2015-07-09T14:16:57.829-0400 I SHARDING [conn1] Enabling sharding for database [db69] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.885-0400 m30999| 2015-07-09T14:16:57.884-0400 I SHARDING [conn1] distributed lock 'db69/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.907-0400 m31100| 2015-07-09T14:16:57.906-0400 I INDEX [conn68] build index on: db69.coll69 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db69.coll69" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.907-0400 m31100| 2015-07-09T14:16:57.906-0400 I INDEX [conn68] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.915-0400 m31100| 2015-07-09T14:16:57.915-0400 I INDEX [conn68] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.916-0400 m30999| 2015-07-09T14:16:57.916-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db69.coll69", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.921-0400 m30999| 2015-07-09T14:16:57.920-0400 I SHARDING [conn1] distributed lock 'db69.coll69/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba99ca4787b9985d1ecc [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.923-0400 m30999| 2015-07-09T14:16:57.922-0400 I SHARDING [conn1] enable sharding on: db69.coll69 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.924-0400 m30999| 2015-07-09T14:16:57.922-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:57.922-0400-559eba99ca4787b9985d1ecd", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465817922), what: "shardCollection.start", ns: "db69.coll69", details: { shardKey: { _id: "hashed" }, collection: "db69.coll69", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.930-0400 m31101| 2015-07-09T14:16:57.929-0400 I INDEX [repl writer worker 2] build index on: db69.coll69 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db69.coll69" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.930-0400 m31101| 2015-07-09T14:16:57.930-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.933-0400 m31102| 2015-07-09T14:16:57.932-0400 I INDEX [repl writer worker 3] build index on: db69.coll69 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db69.coll69" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.933-0400 m31102| 2015-07-09T14:16:57.932-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.938-0400 m31102| 2015-07-09T14:16:57.938-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.938-0400 m31101| 2015-07-09T14:16:57.938-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:57.976-0400 m30999| 2015-07-09T14:16:57.975-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db69.coll69 using new epoch 559eba99ca4787b9985d1ece [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.083-0400 m30999| 2015-07-09T14:16:58.083-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db69.coll69: 0ms sequenceNumber: 297 version: 1|1||559eba99ca4787b9985d1ece based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.176-0400 m30999| 2015-07-09T14:16:58.138-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db69.coll69: 0ms sequenceNumber: 298 version: 1|1||559eba99ca4787b9985d1ece based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.176-0400 m31100| 2015-07-09T14:16:58.140-0400 I SHARDING [conn191] remotely refreshing metadata for db69.coll69 with requested shard version 1|1||559eba99ca4787b9985d1ece, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.177-0400 m31100| 2015-07-09T14:16:58.141-0400 I SHARDING [conn191] collection db69.coll69 was previously unsharded, new metadata loaded with shard version 1|1||559eba99ca4787b9985d1ece [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.177-0400 m31100| 2015-07-09T14:16:58.141-0400 I SHARDING [conn191] collection version was loaded at version 1|1||559eba99ca4787b9985d1ece, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.177-0400 m30999| 2015-07-09T14:16:58.142-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:58.142-0400-559eba9aca4787b9985d1ecf", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465818142), what: "shardCollection", ns: "db69.coll69", details: { version: "1|1||559eba99ca4787b9985d1ece" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.196-0400 m30999| 2015-07-09T14:16:58.196-0400 I SHARDING [conn1] distributed lock 'db69.coll69/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.198-0400 m30999| 2015-07-09T14:16:58.197-0400 I SHARDING [conn1] moving chunk ns: db69.coll69 moving ( ns: db69.coll69, shard: test-rs0, lastmod: 1|1||000000000000000000000000, min: { _id: 0 }, max: { _id: MaxKey }) test-rs0 -> test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.198-0400 m31100| 2015-07-09T14:16:58.198-0400 I SHARDING [conn187] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.199-0400 m31100| 2015-07-09T14:16:58.198-0400 I SHARDING [conn187] received moveChunk request: { moveChunk: "db69.coll69", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eba99ca4787b9985d1ece') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.203-0400 m31100| 2015-07-09T14:16:58.202-0400 I SHARDING [conn187] distributed lock 'db69.coll69/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eba9a792e00bb67274ab2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.203-0400 m31100| 2015-07-09T14:16:58.203-0400 I SHARDING [conn187] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:58.203-0400-559eba9a792e00bb67274ab3", server: "bs-osx108-8", clientAddr: "127.0.0.1:63761", time: new Date(1436465818203), what: "moveChunk.start", ns: "db69.coll69", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.258-0400 m31100| 2015-07-09T14:16:58.257-0400 I SHARDING [conn187] remotely refreshing metadata for db69.coll69 based on current shard version 1|1||559eba99ca4787b9985d1ece, current metadata version is 1|1||559eba99ca4787b9985d1ece [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.259-0400 m31100| 2015-07-09T14:16:58.259-0400 I SHARDING [conn187] metadata of collection db69.coll69 already up to date (shard version : 1|1||559eba99ca4787b9985d1ece, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.260-0400 m31100| 2015-07-09T14:16:58.259-0400 I SHARDING [conn187] moveChunk request accepted at version 1|1||559eba99ca4787b9985d1ece [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.260-0400 m31100| 2015-07-09T14:16:58.260-0400 I SHARDING [conn187] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.260-0400 m31200| 2015-07-09T14:16:58.260-0400 I SHARDING [conn16] remotely refreshing metadata for db69.coll69, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.262-0400 m31200| 2015-07-09T14:16:58.262-0400 I SHARDING [conn16] collection db69.coll69 was previously unsharded, new metadata loaded with shard version 0|0||559eba99ca4787b9985d1ece [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.262-0400 m31200| 2015-07-09T14:16:58.262-0400 I SHARDING [conn16] collection version was loaded at version 1|1||559eba99ca4787b9985d1ece, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.263-0400 m31200| 2015-07-09T14:16:58.262-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: 0 } -> { _id: MaxKey } for collection db69.coll69 from test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 at epoch 559eba99ca4787b9985d1ece [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.265-0400 m31100| 2015-07-09T14:16:58.264-0400 I SHARDING [conn187] moveChunk data transfer progress: { active: true, ns: "db69.coll69", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.268-0400 m31100| 2015-07-09T14:16:58.267-0400 I SHARDING [conn187] moveChunk data transfer progress: { active: true, ns: "db69.coll69", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.273-0400 m31100| 2015-07-09T14:16:58.272-0400 I SHARDING [conn187] moveChunk data transfer progress: { active: true, ns: "db69.coll69", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.281-0400 m31200| 2015-07-09T14:16:58.281-0400 I INDEX [migrateThread] build index on: db69.coll69 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db69.coll69" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.282-0400 m31200| 2015-07-09T14:16:58.281-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.283-0400 m31100| 2015-07-09T14:16:58.282-0400 I SHARDING [conn187] moveChunk data transfer progress: { active: true, ns: "db69.coll69", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.291-0400 m31200| 2015-07-09T14:16:58.289-0400 I INDEX [migrateThread] build index on: db69.coll69 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db69.coll69" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.291-0400 m31200| 2015-07-09T14:16:58.290-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.300-0400 m31100| 2015-07-09T14:16:58.300-0400 I SHARDING [conn187] moveChunk data transfer progress: { active: true, ns: "db69.coll69", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.303-0400 m31200| 2015-07-09T14:16:58.303-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.304-0400 m31200| 2015-07-09T14:16:58.304-0400 I SHARDING [migrateThread] Deleter starting delete for: db69.coll69 from { _id: 0 } -> { _id: MaxKey }, with opId: 95737 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.306-0400 m31200| 2015-07-09T14:16:58.305-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db69.coll69 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.313-0400 m31202| 2015-07-09T14:16:58.312-0400 I INDEX [repl writer worker 13] build index on: db69.coll69 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db69.coll69" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.313-0400 m31201| 2015-07-09T14:16:58.312-0400 I INDEX [repl writer worker 1] build index on: db69.coll69 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db69.coll69" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.314-0400 m31202| 2015-07-09T14:16:58.313-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.314-0400 m31201| 2015-07-09T14:16:58.313-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.320-0400 m31202| 2015-07-09T14:16:58.320-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.324-0400 m31200| 2015-07-09T14:16:58.323-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.324-0400 m31200| 2015-07-09T14:16:58.323-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db69.coll69' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.325-0400 m31201| 2015-07-09T14:16:58.325-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.334-0400 m31100| 2015-07-09T14:16:58.333-0400 I SHARDING [conn187] moveChunk data transfer progress: { active: true, ns: "db69.coll69", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.334-0400 m31100| 2015-07-09T14:16:58.334-0400 I SHARDING [conn187] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.335-0400 m31100| 2015-07-09T14:16:58.334-0400 I SHARDING [conn187] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.335-0400 m31100| 2015-07-09T14:16:58.334-0400 I SHARDING [conn187] moveChunk setting version to: 2|0||559eba99ca4787b9985d1ece [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.336-0400 m31200| 2015-07-09T14:16:58.336-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db69.coll69' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.337-0400 m31200| 2015-07-09T14:16:58.336-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:58.336-0400-559eba9ad5a107a5b9c0db6d", server: "bs-osx108-8", clientAddr: "", time: new Date(1436465818336), what: "moveChunk.to", ns: "db69.coll69", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 5: 41, step 2 of 5: 17, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 12, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.391-0400 m31100| 2015-07-09T14:16:58.390-0400 I SHARDING [conn187] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db69.coll69", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.391-0400 m31100| 2015-07-09T14:16:58.390-0400 I SHARDING [conn187] moveChunk updating self version to: 2|1||559eba99ca4787b9985d1ece through { _id: MinKey } -> { _id: 0 } for collection 'db69.coll69' [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.392-0400 m31100| 2015-07-09T14:16:58.392-0400 I SHARDING [conn187] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:58.392-0400-559eba9a792e00bb67274ab4", server: "bs-osx108-8", clientAddr: "127.0.0.1:63761", time: new Date(1436465818392), what: "moveChunk.commit", ns: "db69.coll69", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.445-0400 m31100| 2015-07-09T14:16:58.444-0400 I SHARDING [conn187] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.445-0400 m31100| 2015-07-09T14:16:58.445-0400 I SHARDING [conn187] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.445-0400 m31100| 2015-07-09T14:16:58.445-0400 I SHARDING [conn187] Deleter starting delete for: db69.coll69 from { _id: 0 } -> { _id: MaxKey }, with opId: 229645 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.445-0400 m31100| 2015-07-09T14:16:58.445-0400 I SHARDING [conn187] rangeDeleter deleted 0 documents for db69.coll69 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.445-0400 m31100| 2015-07-09T14:16:58.445-0400 I SHARDING [conn187] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.446-0400 m31100| 2015-07-09T14:16:58.446-0400 I SHARDING [conn187] distributed lock 'db69.coll69/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.446-0400 m31100| 2015-07-09T14:16:58.446-0400 I SHARDING [conn187] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:58.446-0400-559eba9a792e00bb67274ab5", server: "bs-osx108-8", clientAddr: "127.0.0.1:63761", time: new Date(1436465818446), what: "moveChunk.from", ns: "db69.coll69", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 6: 0, step 2 of 6: 60, step 3 of 6: 2, step 4 of 6: 71, step 5 of 6: 111, step 6 of 6: 0, to: "test-rs1", from: "test-rs0", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.501-0400 m31100| 2015-07-09T14:16:58.500-0400 I COMMAND [conn187] command db69.coll69 command: moveChunk { moveChunk: "db69.coll69", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559eba99ca4787b9985d1ece') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 301ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.502-0400 m30999| 2015-07-09T14:16:58.502-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db69.coll69: 0ms sequenceNumber: 299 version: 2|1||559eba99ca4787b9985d1ece based on: 1|1||559eba99ca4787b9985d1ece [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.504-0400 m31100| 2015-07-09T14:16:58.503-0400 I SHARDING [conn187] received splitChunk request: { splitChunk: "db69.coll69", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba99ca4787b9985d1ece') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.508-0400 m31100| 2015-07-09T14:16:58.507-0400 I SHARDING [conn187] distributed lock 'db69.coll69/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559eba9a792e00bb67274ab6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.508-0400 m31100| 2015-07-09T14:16:58.507-0400 I SHARDING [conn187] remotely refreshing metadata for db69.coll69 based on current shard version 2|0||559eba99ca4787b9985d1ece, current metadata version is 2|0||559eba99ca4787b9985d1ece [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.509-0400 m31100| 2015-07-09T14:16:58.509-0400 I SHARDING [conn187] updating metadata for db69.coll69 from shard version 2|0||559eba99ca4787b9985d1ece to shard version 2|1||559eba99ca4787b9985d1ece [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.509-0400 m31100| 2015-07-09T14:16:58.509-0400 I SHARDING [conn187] collection version was loaded at version 2|1||559eba99ca4787b9985d1ece, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.510-0400 m31100| 2015-07-09T14:16:58.509-0400 I SHARDING [conn187] splitChunk accepted at version 2|1||559eba99ca4787b9985d1ece [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.511-0400 m31100| 2015-07-09T14:16:58.510-0400 I SHARDING [conn187] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:58.510-0400-559eba9a792e00bb67274ab7", server: "bs-osx108-8", clientAddr: "127.0.0.1:63761", time: new Date(1436465818510), what: "split", ns: "db69.coll69", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559eba99ca4787b9985d1ece') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559eba99ca4787b9985d1ece') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.564-0400 m31100| 2015-07-09T14:16:58.564-0400 I SHARDING [conn187] distributed lock 'db69.coll69/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.566-0400 m30999| 2015-07-09T14:16:58.566-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db69.coll69: 0ms sequenceNumber: 300 version: 2|3||559eba99ca4787b9985d1ece based on: 2|1||559eba99ca4787b9985d1ece [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.567-0400 m31200| 2015-07-09T14:16:58.566-0400 I SHARDING [conn18] received splitChunk request: { splitChunk: "db69.coll69", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559eba99ca4787b9985d1ece') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.571-0400 m31200| 2015-07-09T14:16:58.570-0400 I SHARDING [conn18] distributed lock 'db69.coll69/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559eba9ad5a107a5b9c0db6e [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.571-0400 m31200| 2015-07-09T14:16:58.570-0400 I SHARDING [conn18] remotely refreshing metadata for db69.coll69 based on current shard version 0|0||559eba99ca4787b9985d1ece, current metadata version is 1|1||559eba99ca4787b9985d1ece [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.572-0400 m31200| 2015-07-09T14:16:58.572-0400 I SHARDING [conn18] updating metadata for db69.coll69 from shard version 0|0||559eba99ca4787b9985d1ece to shard version 2|0||559eba99ca4787b9985d1ece [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.573-0400 m31200| 2015-07-09T14:16:58.572-0400 I SHARDING [conn18] collection version was loaded at version 2|3||559eba99ca4787b9985d1ece, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.573-0400 m31200| 2015-07-09T14:16:58.572-0400 I SHARDING [conn18] splitChunk accepted at version 2|0||559eba99ca4787b9985d1ece [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.575-0400 m31200| 2015-07-09T14:16:58.574-0400 I SHARDING [conn18] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:16:58.574-0400-559eba9ad5a107a5b9c0db6f", server: "bs-osx108-8", clientAddr: "127.0.0.1:62585", time: new Date(1436465818574), what: "split", ns: "db69.coll69", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559eba99ca4787b9985d1ece') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559eba99ca4787b9985d1ece') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.629-0400 m31200| 2015-07-09T14:16:58.629-0400 I SHARDING [conn18] distributed lock 'db69.coll69/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.631-0400 m30999| 2015-07-09T14:16:58.631-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db69.coll69: 0ms sequenceNumber: 301 version: 2|5||559eba99ca4787b9985d1ece based on: 2|3||559eba99ca4787b9985d1ece [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.871-0400 m31100| 2015-07-09T14:16:58.870-0400 I COMMAND [conn68] command db69.$cmd command: insert { insert: "coll69", documents: 497, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 2000|3, ObjectId('559eba99ca4787b9985d1ece') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 505, w: 505 } }, Database: { acquireCount: { w: 505 } }, Collection: { acquireCount: { w: 8 } }, Metadata: { acquireCount: { w: 497 } }, oplog: { acquireCount: { w: 497 } } } protocol:op_command 156ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:58.879-0400 m31200| 2015-07-09T14:16:58.878-0400 I COMMAND [conn72] command db69.$cmd command: insert { insert: "coll69", documents: 503, ordered: false, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 2000|5, ObjectId('559eba99ca4787b9985d1ece') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 511, w: 511 } }, Database: { acquireCount: { w: 511 } }, Collection: { acquireCount: { w: 8 } }, Metadata: { acquireCount: { w: 503 } }, oplog: { acquireCount: { w: 503 } } } protocol:op_command 165ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.048-0400 m31200| 2015-07-09T14:16:59.047-0400 I COMMAND [conn72] command db69.$cmd command: insert { insert: "coll69", documents: 482, ordered: false, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 2000|5, ObjectId('559eba99ca4787b9985d1ece') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 490, w: 490 } }, Database: { acquireCount: { w: 490 } }, Collection: { acquireCount: { w: 8 } }, Metadata: { acquireCount: { w: 482 } }, oplog: { acquireCount: { w: 482 } } } protocol:op_command 157ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.059-0400 m31100| 2015-07-09T14:16:59.059-0400 I COMMAND [conn68] command db69.$cmd command: insert { insert: "coll69", documents: 518, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 2000|3, ObjectId('559eba99ca4787b9985d1ece') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 526, w: 526 } }, Database: { acquireCount: { w: 526 } }, Collection: { acquireCount: { w: 8 } }, Metadata: { acquireCount: { w: 518 } }, oplog: { acquireCount: { w: 518 } } } protocol:op_command 169ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.061-0400 Using 5 threads (requested 5) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.123-0400 m30999| 2015-07-09T14:16:59.120-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64025 #450 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.125-0400 m30998| 2015-07-09T14:16:59.125-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64026 #449 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.132-0400 m30998| 2015-07-09T14:16:59.132-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64027 #450 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.134-0400 m30998| 2015-07-09T14:16:59.133-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64028 #451 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.144-0400 m30999| 2015-07-09T14:16:59.143-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64029 #451 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.153-0400 setting random seed: 2815870479680 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.154-0400 setting random seed: 1539953728206 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.154-0400 setting random seed: 6335174026899 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.154-0400 setting random seed: 5269919671118 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.154-0400 setting random seed: 1071149869821 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.158-0400 m30998| 2015-07-09T14:16:59.158-0400 I SHARDING [conn451] distributed lock 'map_reduce_merge_nonatomic2/bs-osx108-8:30998:1436464535:16807' acquired, ts : 559eba9b0bd550bed3408b32 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.158-0400 m30999| 2015-07-09T14:16:59.158-0400 I SHARDING [conn450] distributed lock 'map_reduce_merge_nonatomic1/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba9bca4787b9985d1ed0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.159-0400 m30998| 2015-07-09T14:16:59.159-0400 I SHARDING [conn449] distributed lock 'map_reduce_merge_nonatomic4/bs-osx108-8:30998:1436464535:16807' acquired, ts : 559eba9b0bd550bed3408b33 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.160-0400 m30999| 2015-07-09T14:16:59.159-0400 I SHARDING [conn451] distributed lock 'map_reduce_merge_nonatomic3/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559eba9bca4787b9985d1ed1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.160-0400 m30998| 2015-07-09T14:16:59.160-0400 I SHARDING [conn450] distributed lock 'map_reduce_merge_nonatomic0/bs-osx108-8:30998:1436464535:16807' acquired, ts : 559eba9b0bd550bed3408b34 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.163-0400 m30998| 2015-07-09T14:16:59.162-0400 I SHARDING [conn451] Placing [map_reduce_merge_nonatomic2] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.164-0400 m30999| 2015-07-09T14:16:59.163-0400 I SHARDING [conn450] Placing [map_reduce_merge_nonatomic1] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.165-0400 m30999| 2015-07-09T14:16:59.164-0400 I SHARDING [conn451] Placing [map_reduce_merge_nonatomic3] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.165-0400 m30998| 2015-07-09T14:16:59.164-0400 I SHARDING [conn449] Placing [map_reduce_merge_nonatomic4] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.165-0400 m30998| 2015-07-09T14:16:59.165-0400 I SHARDING [conn450] Placing [map_reduce_merge_nonatomic0] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.223-0400 m30998| 2015-07-09T14:16:59.223-0400 I SHARDING [conn451] distributed lock 'map_reduce_merge_nonatomic2/bs-osx108-8:30998:1436464535:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.228-0400 m30998| 2015-07-09T14:16:59.227-0400 I SHARDING [conn450] distributed lock 'map_reduce_merge_nonatomic0/bs-osx108-8:30998:1436464535:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.229-0400 m30998| 2015-07-09T14:16:59.228-0400 I SHARDING [conn449] distributed lock 'map_reduce_merge_nonatomic4/bs-osx108-8:30998:1436464535:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.229-0400 m30999| 2015-07-09T14:16:59.228-0400 I SHARDING [conn450] distributed lock 'map_reduce_merge_nonatomic1/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.230-0400 m30999| 2015-07-09T14:16:59.229-0400 I SHARDING [conn451] distributed lock 'map_reduce_merge_nonatomic3/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.270-0400 m30998| 2015-07-09T14:16:59.270-0400 I SHARDING [conn451] ChunkManager: time to load chunks for db69.coll69: 0ms sequenceNumber: 85 version: 2|5||559eba99ca4787b9985d1ece based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.300-0400 m31100| 2015-07-09T14:16:59.299-0400 I COMMAND [conn185] CMD: drop db69.tmp.mr.coll69_356 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.305-0400 m31200| 2015-07-09T14:16:59.305-0400 I COMMAND [conn52] CMD: drop db69.tmp.mr.coll69_225 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.312-0400 m31100| 2015-07-09T14:16:59.312-0400 I COMMAND [conn177] CMD: drop db69.tmp.mr.coll69_357 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.329-0400 m31200| 2015-07-09T14:16:59.329-0400 I COMMAND [conn37] CMD: drop db69.tmp.mr.coll69_226 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.332-0400 m31200| 2015-07-09T14:16:59.330-0400 I COMMAND [conn32] CMD: drop db69.tmp.mr.coll69_227 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.360-0400 m31200| 2015-07-09T14:16:59.360-0400 I COMMAND [conn80] CMD: drop db69.tmp.mr.coll69_229 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.368-0400 m31200| 2015-07-09T14:16:59.367-0400 I COMMAND [conn41] CMD: drop db69.tmp.mr.coll69_228 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.369-0400 m31100| 2015-07-09T14:16:59.369-0400 I COMMAND [conn49] CMD: drop db69.tmp.mr.coll69_360 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.371-0400 m31100| 2015-07-09T14:16:59.370-0400 I COMMAND [conn179] CMD: drop db69.tmp.mr.coll69_359 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.372-0400 m31100| 2015-07-09T14:16:59.372-0400 I COMMAND [conn191] CMD: drop db69.tmp.mr.coll69_358 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.915-0400 m31200| 2015-07-09T14:16:59.915-0400 I COMMAND [conn37] CMD: drop db69.tmp.mrs.coll69_1436465819_119 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.921-0400 m31200| 2015-07-09T14:16:59.921-0400 I COMMAND [conn37] CMD: drop db69.tmp.mr.coll69_226 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.922-0400 m31200| 2015-07-09T14:16:59.921-0400 I COMMAND [conn37] CMD: drop db69.tmp.mr.coll69_226 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.922-0400 m31200| 2015-07-09T14:16:59.922-0400 I COMMAND [conn37] CMD: drop db69.tmp.mr.coll69_226 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.927-0400 m31200| 2015-07-09T14:16:59.926-0400 I COMMAND [conn37] command db69.tmp.mrs.coll69_1436465819_119 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.927-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.927-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.927-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.927-0400 m31200| values...., out: "tmp.mrs.coll69_1436465819_119", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:10 reslen:213 locks:{ Global: { acquireCount: { r: 173, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 739 } }, Database: { acquireCount: { r: 27, w: 66, R: 21, W: 11 }, acquireWaitCount: { r: 2, w: 7, R: 10, W: 7 }, timeAcquiringMicros: { r: 30655, w: 77259, R: 62504, W: 63574 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 637ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.930-0400 m31200| 2015-07-09T14:16:59.929-0400 I COMMAND [conn52] CMD: drop db69.tmp.mrs.coll69_1436465819_118 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.936-0400 m31200| 2015-07-09T14:16:59.936-0400 I COMMAND [conn52] CMD: drop db69.tmp.mr.coll69_225 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.936-0400 m31200| 2015-07-09T14:16:59.936-0400 I COMMAND [conn52] CMD: drop db69.tmp.mr.coll69_225 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.937-0400 m31200| 2015-07-09T14:16:59.937-0400 I COMMAND [conn52] CMD: drop db69.tmp.mr.coll69_225 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.938-0400 m31200| 2015-07-09T14:16:59.938-0400 I COMMAND [conn52] command db69.tmp.mrs.coll69_1436465819_118 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.938-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.938-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.938-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.939-0400 m31200| values...., out: "tmp.mrs.coll69_1436465819_118", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:5 reslen:213 locks:{ Global: { acquireCount: { r: 163, w: 74, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 6490, W: 1119 } }, Database: { acquireCount: { r: 27, w: 66, R: 16, W: 11 }, acquireWaitCount: { r: 4, w: 12, R: 8, W: 3 }, timeAcquiringMicros: { r: 2805, w: 95538, R: 155171, W: 1587 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 663ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.940-0400 m31200| 2015-07-09T14:16:59.939-0400 I COMMAND [conn32] CMD: drop db69.tmp.mrs.coll69_1436465819_103 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.947-0400 m31200| 2015-07-09T14:16:59.947-0400 I COMMAND [conn32] CMD: drop db69.tmp.mr.coll69_227 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.947-0400 m31200| 2015-07-09T14:16:59.947-0400 I COMMAND [conn32] CMD: drop db69.tmp.mr.coll69_227 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.948-0400 m31200| 2015-07-09T14:16:59.948-0400 I COMMAND [conn32] CMD: drop db69.tmp.mr.coll69_227 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.950-0400 m31200| 2015-07-09T14:16:59.949-0400 I COMMAND [conn32] command db69.tmp.mrs.coll69_1436465819_103 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.950-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.950-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.950-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.951-0400 m31200| values...., out: "tmp.mrs.coll69_1436465819_103", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:8 reslen:213 locks:{ Global: { acquireCount: { r: 169, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 1, W: 1 }, timeAcquiringMicros: { r: 13244, w: 514, W: 313 } }, Database: { acquireCount: { r: 27, w: 66, R: 19, W: 11 }, acquireWaitCount: { r: 6, w: 9, R: 9, W: 7 }, timeAcquiringMicros: { r: 8502, w: 34739, R: 70803, W: 116699 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 657ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.952-0400 m31200| 2015-07-09T14:16:59.952-0400 I COMMAND [conn80] CMD: drop db69.tmp.mrs.coll69_1436465819_120 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.960-0400 m31200| 2015-07-09T14:16:59.959-0400 I COMMAND [conn80] CMD: drop db69.tmp.mr.coll69_229 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.960-0400 m31200| 2015-07-09T14:16:59.960-0400 I COMMAND [conn80] CMD: drop db69.tmp.mr.coll69_229 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.964-0400 m31200| 2015-07-09T14:16:59.963-0400 I COMMAND [conn80] CMD: drop db69.tmp.mr.coll69_229 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.966-0400 m31200| 2015-07-09T14:16:59.966-0400 I COMMAND [conn41] CMD: drop db69.tmp.mrs.coll69_1436465819_104 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.975-0400 m31200| 2015-07-09T14:16:59.975-0400 I COMMAND [conn41] CMD: drop db69.tmp.mr.coll69_228 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.975-0400 m31200| 2015-07-09T14:16:59.975-0400 I COMMAND [conn41] CMD: drop db69.tmp.mr.coll69_228 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.977-0400 m31200| 2015-07-09T14:16:59.976-0400 I COMMAND [conn41] CMD: drop db69.tmp.mr.coll69_228 [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.977-0400 m31200| 2015-07-09T14:16:59.977-0400 I COMMAND [conn80] command db69.tmp.mrs.coll69_1436465819_120 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.977-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.977-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.977-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.978-0400 m31200| values...., out: "tmp.mrs.coll69_1436465819_120", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:6 reslen:213 locks:{ Global: { acquireCount: { r: 165, w: 74, W: 3 }, acquireWaitCount: { r: 3 }, timeAcquiringMicros: { r: 21153 } }, Database: { acquireCount: { r: 27, w: 66, R: 17, W: 11 }, acquireWaitCount: { r: 10, w: 9, R: 6, W: 9 }, timeAcquiringMicros: { r: 51881, w: 32324, R: 9147, W: 144927 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 654ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.987-0400 m31200| 2015-07-09T14:16:59.986-0400 I COMMAND [conn41] command db69.tmp.mrs.coll69_1436465819_104 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.987-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.987-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.987-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:16:59.987-0400 m31200| values...., out: "tmp.mrs.coll69_1436465819_104", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:6 reslen:213 locks:{ Global: { acquireCount: { r: 165, w: 74, W: 3 }, acquireWaitCount: { w: 5 }, timeAcquiringMicros: { w: 31114 } }, Database: { acquireCount: { r: 27, w: 66, R: 17, W: 11 }, acquireWaitCount: { r: 6, w: 13, R: 7, W: 5 }, timeAcquiringMicros: { r: 22285, w: 96805, R: 14521, W: 100749 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 679ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.152-0400 m31100| 2015-07-09T14:17:00.152-0400 I COMMAND [conn185] CMD: drop db69.tmp.mrs.coll69_1436465819_118 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.159-0400 m31100| 2015-07-09T14:17:00.159-0400 I COMMAND [conn185] CMD: drop db69.tmp.mr.coll69_356 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.159-0400 m31100| 2015-07-09T14:17:00.159-0400 I COMMAND [conn185] CMD: drop db69.tmp.mr.coll69_356 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.161-0400 m31100| 2015-07-09T14:17:00.161-0400 I COMMAND [conn185] CMD: drop db69.tmp.mr.coll69_356 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.167-0400 m31100| 2015-07-09T14:17:00.166-0400 I COMMAND [conn177] CMD: drop db69.tmp.mrs.coll69_1436465819_119 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.175-0400 m31100| 2015-07-09T14:17:00.175-0400 I COMMAND [conn177] CMD: drop db69.tmp.mr.coll69_357 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.176-0400 m31100| 2015-07-09T14:17:00.175-0400 I COMMAND [conn177] CMD: drop db69.tmp.mr.coll69_357 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.177-0400 m31100| 2015-07-09T14:17:00.177-0400 I COMMAND [conn185] command db69.tmp.mrs.coll69_1436465819_118 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.177-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.177-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.178-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.178-0400 m31100| values...., out: "tmp.mrs.coll69_1436465819_118", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:8 reslen:213 locks:{ Global: { acquireCount: { r: 171, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 497 } }, Database: { acquireCount: { r: 27, w: 66, R: 20, W: 11 }, acquireWaitCount: { r: 1, w: 20, R: 7, W: 7 }, timeAcquiringMicros: { r: 69, w: 254526, R: 165871, W: 15717 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 904ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.179-0400 m31100| 2015-07-09T14:17:00.179-0400 I COMMAND [conn177] CMD: drop db69.tmp.mr.coll69_357 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.184-0400 m31100| 2015-07-09T14:17:00.184-0400 I COMMAND [conn179] CMD: drop db69.tmp.mrs.coll69_1436465819_104 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.191-0400 m31100| 2015-07-09T14:17:00.191-0400 I COMMAND [conn179] CMD: drop db69.tmp.mr.coll69_359 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.191-0400 m31100| 2015-07-09T14:17:00.191-0400 I COMMAND [conn179] CMD: drop db69.tmp.mr.coll69_359 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.195-0400 m31100| 2015-07-09T14:17:00.195-0400 I COMMAND [conn177] command db69.tmp.mrs.coll69_1436465819_119 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.195-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.195-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.196-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.196-0400 m31100| values...., out: "tmp.mrs.coll69_1436465819_119", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:15 reslen:213 locks:{ Global: { acquireCount: { r: 185, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 227, w: 7471, W: 98 } }, Database: { acquireCount: { r: 27, w: 66, R: 27, W: 11 }, acquireWaitCount: { r: 5, w: 8, R: 14, W: 9 }, timeAcquiringMicros: { r: 38456, w: 35962, R: 97762, W: 111708 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 918ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.199-0400 m31100| 2015-07-09T14:17:00.199-0400 I COMMAND [conn179] CMD: drop db69.tmp.mr.coll69_359 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.204-0400 m31100| 2015-07-09T14:17:00.204-0400 I COMMAND [conn49] CMD: drop db69.tmp.mrs.coll69_1436465819_120 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.210-0400 m31100| 2015-07-09T14:17:00.210-0400 I COMMAND [conn49] CMD: drop db69.tmp.mr.coll69_360 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.212-0400 m31100| 2015-07-09T14:17:00.210-0400 I COMMAND [conn49] CMD: drop db69.tmp.mr.coll69_360 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.212-0400 m31100| 2015-07-09T14:17:00.211-0400 I COMMAND [conn49] CMD: drop db69.tmp.mr.coll69_360 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.212-0400 m31100| 2015-07-09T14:17:00.211-0400 I COMMAND [conn185] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll69_361 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.219-0400 m31100| 2015-07-09T14:17:00.219-0400 I COMMAND [conn179] command db69.tmp.mrs.coll69_1436465819_104 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.220-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.220-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.220-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.221-0400 m31100| values...., out: "tmp.mrs.coll69_1436465819_104", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:16 reslen:213 locks:{ Global: { acquireCount: { r: 187, w: 74, W: 3 }, acquireWaitCount: { r: 2, W: 1 }, timeAcquiringMicros: { r: 16810, W: 226 } }, Database: { acquireCount: { r: 27, w: 66, R: 28, W: 11 }, acquireWaitCount: { r: 8, w: 8, R: 12, W: 8 }, timeAcquiringMicros: { r: 89407, w: 97952, R: 21243, W: 56793 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 913ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.229-0400 m31100| 2015-07-09T14:17:00.229-0400 I COMMAND [conn191] CMD: drop db69.tmp.mrs.coll69_1436465819_103 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.234-0400 m31100| 2015-07-09T14:17:00.233-0400 I COMMAND [conn191] CMD: drop db69.tmp.mr.coll69_358 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.234-0400 m31100| 2015-07-09T14:17:00.234-0400 I COMMAND [conn191] CMD: drop db69.tmp.mr.coll69_358 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.236-0400 m31100| 2015-07-09T14:17:00.235-0400 I COMMAND [conn49] command db69.tmp.mrs.coll69_1436465819_120 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.236-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.237-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.237-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.237-0400 m31100| values...., out: "tmp.mrs.coll69_1436465819_120", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:14 reslen:213 locks:{ Global: { acquireCount: { r: 183, w: 74, W: 3 }, acquireWaitCount: { r: 3, w: 1, W: 1 }, timeAcquiringMicros: { r: 23321, w: 92, W: 2533 } }, Database: { acquireCount: { r: 27, w: 66, R: 26, W: 11 }, acquireWaitCount: { r: 10, w: 12, R: 11, W: 7 }, timeAcquiringMicros: { r: 39400, w: 100178, R: 11088, W: 94968 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 914ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.238-0400 m31100| 2015-07-09T14:17:00.236-0400 I COMMAND [conn177] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll69_362 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.265-0400 m31100| 2015-07-09T14:17:00.264-0400 I COMMAND [conn191] CMD: drop db69.tmp.mr.coll69_358 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.275-0400 m31100| 2015-07-09T14:17:00.275-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:64030 #195 (117 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.281-0400 m31100| 2015-07-09T14:17:00.281-0400 I COMMAND [conn185] CMD: drop map_reduce_merge_nonatomic2.coll69 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.283-0400 m31100| 2015-07-09T14:17:00.283-0400 I COMMAND [conn179] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll69_363 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.285-0400 m31200| 2015-07-09T14:17:00.284-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:64031 #155 (98 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.287-0400 m31100| 2015-07-09T14:17:00.286-0400 I COMMAND [conn191] command db69.tmp.mrs.coll69_1436465819_103 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.287-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.287-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.287-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.288-0400 m31100| values...., out: "tmp.mrs.coll69_1436465819_103", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:15 reslen:213 locks:{ Global: { acquireCount: { r: 185, w: 74, W: 3 }, acquireWaitCount: { r: 3, w: 2, W: 1 }, timeAcquiringMicros: { r: 21548, w: 7522, W: 7720 } }, Database: { acquireCount: { r: 27, w: 66, R: 27, W: 11 }, acquireWaitCount: { r: 7, w: 12, R: 12, W: 6 }, timeAcquiringMicros: { r: 29245, w: 44826, R: 14562, W: 154571 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 1002ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.290-0400 m31100| 2015-07-09T14:17:00.290-0400 I COMMAND [conn185] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll69_361 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.291-0400 m31100| 2015-07-09T14:17:00.290-0400 I COMMAND [conn185] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll69_361 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.291-0400 m31100| 2015-07-09T14:17:00.290-0400 I COMMAND [conn185] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll69_361 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.294-0400 m31100| 2015-07-09T14:17:00.293-0400 I COMMAND [conn49] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll69_364 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.306-0400 m31100| 2015-07-09T14:17:00.303-0400 I COMMAND [conn185] command map_reduce_merge_nonatomic2.coll69 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.306-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.306-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.306-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.307-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.307-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.310-0400 m31100| }, out: { merge: "coll69", db: "map_reduce_merge_nonatomic2", nonAtomic: true } }, inputDB: "db69", shardedOutputCollection: "tmp.mrs.coll69_1436465819_118", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll69_1436465819_118", timeMillis: 886, counts: { input: 1015, emit: 1015, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465820000|40, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll69_1436465819_118", timeMillis: 661, counts: { input: 985, emit: 985, reduce: 79, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465819000|230, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1015, emit: 1015, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 985, emit: 985, reduce: 79, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 159 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:303 locks:{ Global: { acquireCount: { r: 58, w: 50, W: 2 } }, Database: { acquireCount: { r: 3, w: 45, W: 6 } }, Collection: { acquireCount: { r: 3, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 124ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.311-0400 m31100| 2015-07-09T14:17:00.305-0400 I COMMAND [conn39] CMD: drop db69.tmp.mrs.coll69_1436465819_118 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.313-0400 m31101| 2015-07-09T14:17:00.313-0400 I COMMAND [repl writer worker 14] CMD: drop map_reduce_merge_nonatomic2.coll69 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.316-0400 m31102| 2015-07-09T14:17:00.315-0400 I COMMAND [repl writer worker 12] CMD: drop map_reduce_merge_nonatomic2.coll69 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.328-0400 m31100| 2015-07-09T14:17:00.327-0400 I COMMAND [conn191] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll69_365 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.331-0400 m31100| 2015-07-09T14:17:00.331-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:64032 #196 (118 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.333-0400 m31200| 2015-07-09T14:17:00.333-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:64033 #156 (99 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.334-0400 m31200| 2015-07-09T14:17:00.334-0400 I COMMAND [conn65] CMD: drop db69.tmp.mrs.coll69_1436465819_118 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.336-0400 m31202| 2015-07-09T14:17:00.335-0400 I COMMAND [repl writer worker 15] CMD: drop db69.tmp.mrs.coll69_1436465819_118 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.338-0400 m31201| 2015-07-09T14:17:00.338-0400 I COMMAND [repl writer worker 11] CMD: drop db69.tmp.mrs.coll69_1436465819_118 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.340-0400 m31101| 2015-07-09T14:17:00.339-0400 I COMMAND [repl writer worker 13] CMD: drop db69.tmp.mrs.coll69_1436465819_118 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.341-0400 m31200| 2015-07-09T14:17:00.341-0400 I COMMAND [conn52] CMD: drop db69.tmp.mr.coll69_230 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.342-0400 m31102| 2015-07-09T14:17:00.342-0400 I COMMAND [repl writer worker 10] CMD: drop db69.tmp.mrs.coll69_1436465819_118 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.345-0400 m31100| 2015-07-09T14:17:00.345-0400 I COMMAND [conn185] CMD: drop db69.tmp.mr.coll69_366 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.347-0400 m31100| 2015-07-09T14:17:00.347-0400 I COMMAND [conn177] CMD: drop map_reduce_merge_nonatomic0.coll69 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.362-0400 m31100| 2015-07-09T14:17:00.361-0400 I COMMAND [conn177] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll69_362 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.363-0400 m31100| 2015-07-09T14:17:00.362-0400 I COMMAND [conn177] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll69_362 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.363-0400 m31100| 2015-07-09T14:17:00.362-0400 I COMMAND [conn177] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll69_362 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.364-0400 m31100| 2015-07-09T14:17:00.363-0400 I COMMAND [conn177] command map_reduce_merge_nonatomic0.coll69 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.364-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.365-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.366-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.366-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.366-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.369-0400 m31100| }, out: { merge: "coll69", db: "map_reduce_merge_nonatomic0", nonAtomic: true } }, inputDB: "db69", shardedOutputCollection: "tmp.mrs.coll69_1436465819_119", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll69_1436465819_119", timeMillis: 899, counts: { input: 1015, emit: 1015, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465820000|63, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll69_1436465819_119", timeMillis: 632, counts: { input: 985, emit: 985, reduce: 79, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465819000|216, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1015, emit: 1015, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 985, emit: 985, reduce: 79, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 159 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:303 locks:{ Global: { acquireCount: { r: 58, w: 50, W: 2 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 7078, W: 4695 } }, Database: { acquireCount: { r: 3, w: 45, W: 6 } }, Collection: { acquireCount: { r: 3, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 165ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.369-0400 m31100| 2015-07-09T14:17:00.364-0400 I COMMAND [conn39] CMD: drop db69.tmp.mrs.coll69_1436465819_119 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.370-0400 m31200| 2015-07-09T14:17:00.368-0400 I COMMAND [conn65] CMD: drop db69.tmp.mrs.coll69_1436465819_119 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.410-0400 m31200| 2015-07-09T14:17:00.409-0400 I COMMAND [conn37] CMD: drop db69.tmp.mr.coll69_231 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.422-0400 m31102| 2015-07-09T14:17:00.420-0400 I COMMAND [repl writer worker 1] CMD: drop map_reduce_merge_nonatomic0.coll69 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.424-0400 m31100| 2015-07-09T14:17:00.423-0400 I COMMAND [conn177] CMD: drop db69.tmp.mr.coll69_367 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.429-0400 m31101| 2015-07-09T14:17:00.428-0400 I COMMAND [repl writer worker 7] CMD: drop map_reduce_merge_nonatomic0.coll69 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.433-0400 m31102| 2015-07-09T14:17:00.433-0400 I COMMAND [repl writer worker 6] CMD: drop db69.tmp.mrs.coll69_1436465819_119 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.442-0400 m31101| 2015-07-09T14:17:00.442-0400 I COMMAND [repl writer worker 9] CMD: drop db69.tmp.mrs.coll69_1436465819_119 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.443-0400 m31100| 2015-07-09T14:17:00.442-0400 I COMMAND [conn179] CMD: drop map_reduce_merge_nonatomic3.coll69 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.456-0400 m31100| 2015-07-09T14:17:00.455-0400 I COMMAND [conn179] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll69_363 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.457-0400 m31100| 2015-07-09T14:17:00.456-0400 I COMMAND [conn49] CMD: drop map_reduce_merge_nonatomic4.coll69 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.470-0400 m31100| 2015-07-09T14:17:00.470-0400 I COMMAND [conn49] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll69_364 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.471-0400 m31100| 2015-07-09T14:17:00.470-0400 I COMMAND [conn49] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll69_364 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.471-0400 m31100| 2015-07-09T14:17:00.470-0400 I COMMAND [conn49] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll69_364 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.472-0400 m31100| 2015-07-09T14:17:00.471-0400 I COMMAND [conn49] command map_reduce_merge_nonatomic4.coll69 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.472-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.472-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.472-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.473-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.473-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.477-0400 m31100| }, out: { merge: "coll69", db: "map_reduce_merge_nonatomic4", nonAtomic: true } }, inputDB: "db69", shardedOutputCollection: "tmp.mrs.coll69_1436465819_120", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll69_1436465819_120", timeMillis: 888, counts: { input: 1015, emit: 1015, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465820000|96, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll69_1436465819_120", timeMillis: 637, counts: { input: 985, emit: 985, reduce: 79, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465819000|259, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1015, emit: 1015, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 985, emit: 985, reduce: 79, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 159 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:303 locks:{ Global: { acquireCount: { r: 58, w: 50, W: 2 }, acquireWaitCount: { r: 1, w: 2, W: 1 }, timeAcquiringMicros: { r: 7104, w: 15785, W: 38333 } }, Database: { acquireCount: { r: 3, w: 45, W: 6 } }, Collection: { acquireCount: { r: 3, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 228ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.477-0400 m31102| 2015-07-09T14:17:00.471-0400 I COMMAND [repl writer worker 0] CMD: drop map_reduce_merge_nonatomic3.coll69 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.478-0400 m31100| 2015-07-09T14:17:00.471-0400 I COMMAND [conn179] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll69_363 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.478-0400 m31100| 2015-07-09T14:17:00.471-0400 I COMMAND [conn39] CMD: drop db69.tmp.mrs.coll69_1436465819_120 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.478-0400 m31100| 2015-07-09T14:17:00.471-0400 I COMMAND [conn179] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll69_363 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.486-0400 m31202| 2015-07-09T14:17:00.484-0400 I COMMAND [repl writer worker 4] CMD: drop db69.tmp.mrs.coll69_1436465819_119 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.486-0400 m31201| 2015-07-09T14:17:00.485-0400 I COMMAND [repl writer worker 14] CMD: drop db69.tmp.mrs.coll69_1436465819_119 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.487-0400 m31101| 2015-07-09T14:17:00.485-0400 I COMMAND [repl writer worker 14] CMD: drop map_reduce_merge_nonatomic3.coll69 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.492-0400 m31100| 2015-07-09T14:17:00.491-0400 I COMMAND [conn179] command map_reduce_merge_nonatomic3.coll69 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.492-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.493-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.493-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.493-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.493-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.494-0400 m31100| }, out: { merge: "coll69", db: "map_reduce_merge_nonatomic3", nonAtomic: true } }, inputDB: "db69", shardedOutputCollection: "tmp.mrs.coll69_1436465819_104", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll69_1436465819_104", timeMillis: 885, counts: { input: 1015, emit: 1015, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465820000|87, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll69_1436465819_104", timeMillis: 668, counts: { input: 985, emit: 985, reduce: 79, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465819000|270, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1015, emit: 1015, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 985, emit: 985, reduce: 79, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 159 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:303 locks:{ Global: { acquireCount: { r: 58, w: 50, W: 2 }, acquireWaitCount: { r: 2, w: 3, W: 1 }, timeAcquiringMicros: { r: 19382, w: 23967, W: 14343 } }, Database: { acquireCount: { r: 3, w: 45, W: 6 } }, Collection: { acquireCount: { r: 3, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 269ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.495-0400 m31100| 2015-07-09T14:17:00.493-0400 I COMMAND [conn187] CMD: drop db69.tmp.mrs.coll69_1436465819_104 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.497-0400 m31102| 2015-07-09T14:17:00.496-0400 I COMMAND [repl writer worker 10] CMD: drop map_reduce_merge_nonatomic4.coll69 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.509-0400 m31200| 2015-07-09T14:17:00.509-0400 I COMMAND [conn65] CMD: drop db69.tmp.mrs.coll69_1436465819_120 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.512-0400 m31101| 2015-07-09T14:17:00.512-0400 I COMMAND [repl writer worker 0] CMD: drop map_reduce_merge_nonatomic4.coll69 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.518-0400 m31100| 2015-07-09T14:17:00.517-0400 I COMMAND [conn191] CMD: drop map_reduce_merge_nonatomic1.coll69 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.518-0400 m31200| 2015-07-09T14:17:00.517-0400 I COMMAND [conn18] CMD: drop db69.tmp.mrs.coll69_1436465819_104 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.518-0400 m31102| 2015-07-09T14:17:00.517-0400 I COMMAND [repl writer worker 1] CMD: drop db69.tmp.mrs.coll69_1436465819_120 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.519-0400 m31202| 2015-07-09T14:17:00.519-0400 I COMMAND [repl writer worker 3] CMD: drop db69.tmp.mrs.coll69_1436465819_120 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.519-0400 m31201| 2015-07-09T14:17:00.519-0400 I COMMAND [repl writer worker 15] CMD: drop db69.tmp.mrs.coll69_1436465819_120 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.524-0400 m31201| 2015-07-09T14:17:00.523-0400 I COMMAND [repl writer worker 12] CMD: drop db69.tmp.mrs.coll69_1436465819_104 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.525-0400 m31202| 2015-07-09T14:17:00.525-0400 I COMMAND [repl writer worker 10] CMD: drop db69.tmp.mrs.coll69_1436465819_104 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.532-0400 m31100| 2015-07-09T14:17:00.531-0400 I COMMAND [conn191] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll69_365 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.533-0400 m31100| 2015-07-09T14:17:00.531-0400 I COMMAND [conn191] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll69_365 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.534-0400 m31100| 2015-07-09T14:17:00.531-0400 I COMMAND [conn191] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll69_365 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.535-0400 m31200| 2015-07-09T14:17:00.534-0400 I COMMAND [conn80] CMD: drop db69.tmp.mr.coll69_233 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.535-0400 m31102| 2015-07-09T14:17:00.534-0400 I COMMAND [repl writer worker 4] CMD: drop db69.tmp.mrs.coll69_1436465819_104 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.535-0400 m31200| 2015-07-09T14:17:00.535-0400 I COMMAND [conn41] CMD: drop db69.tmp.mr.coll69_232 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.539-0400 m31101| 2015-07-09T14:17:00.538-0400 I COMMAND [repl writer worker 13] CMD: drop db69.tmp.mrs.coll69_1436465819_120 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.548-0400 m31101| 2015-07-09T14:17:00.546-0400 I COMMAND [repl writer worker 1] CMD: drop db69.tmp.mrs.coll69_1436465819_104 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.548-0400 m31101| 2015-07-09T14:17:00.547-0400 I COMMAND [repl writer worker 6] CMD: drop map_reduce_merge_nonatomic1.coll69 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.552-0400 m31102| 2015-07-09T14:17:00.551-0400 I COMMAND [repl writer worker 13] CMD: drop map_reduce_merge_nonatomic1.coll69 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.553-0400 m31100| 2015-07-09T14:17:00.552-0400 I COMMAND [conn191] command map_reduce_merge_nonatomic1.coll69 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.553-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.553-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.554-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.554-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.554-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.555-0400 m31100| }, out: { merge: "coll69", db: "map_reduce_merge_nonatomic1", nonAtomic: true } }, inputDB: "db69", shardedOutputCollection: "tmp.mrs.coll69_1436465819_103", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll69_1436465819_103", timeMillis: 949, counts: { input: 1015, emit: 1015, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465820000|106, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll69_1436465819_103", timeMillis: 654, counts: { input: 985, emit: 985, reduce: 79, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465819000|241, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1015, emit: 1015, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 985, emit: 985, reduce: 79, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 159 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:303 locks:{ Global: { acquireCount: { r: 58, w: 50, W: 2 }, acquireWaitCount: { w: 3, W: 1 }, timeAcquiringMicros: { w: 48950, W: 35623 } }, Database: { acquireCount: { r: 3, w: 45, W: 6 } }, Collection: { acquireCount: { r: 3, w: 23 } }, Metadata: { acquireCount: { w: 23 } }, oplog: { acquireCount: { w: 23 } } } protocol:op_command 264ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.555-0400 m31100| 2015-07-09T14:17:00.553-0400 I COMMAND [conn49] CMD: drop db69.tmp.mr.coll69_368 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.555-0400 m31100| 2015-07-09T14:17:00.553-0400 I COMMAND [conn179] CMD: drop db69.tmp.mr.coll69_369 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.555-0400 m31100| 2015-07-09T14:17:00.553-0400 I COMMAND [conn187] CMD: drop db69.tmp.mrs.coll69_1436465819_103 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.584-0400 m31200| 2015-07-09T14:17:00.584-0400 I COMMAND [conn18] CMD: drop db69.tmp.mrs.coll69_1436465819_103 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.589-0400 m31101| 2015-07-09T14:17:00.589-0400 I COMMAND [repl writer worker 12] CMD: drop db69.tmp.mrs.coll69_1436465819_103 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.596-0400 m31102| 2015-07-09T14:17:00.595-0400 I COMMAND [repl writer worker 12] CMD: drop db69.tmp.mrs.coll69_1436465819_103 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.601-0400 m31202| 2015-07-09T14:17:00.600-0400 I COMMAND [repl writer worker 2] CMD: drop db69.tmp.mrs.coll69_1436465819_103 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.602-0400 m31201| 2015-07-09T14:17:00.601-0400 I COMMAND [repl writer worker 8] CMD: drop db69.tmp.mrs.coll69_1436465819_103 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.650-0400 m31200| 2015-07-09T14:17:00.649-0400 I COMMAND [conn32] CMD: drop db69.tmp.mr.coll69_234 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.659-0400 m31100| 2015-07-09T14:17:00.658-0400 I COMMAND [conn191] CMD: drop db69.tmp.mr.coll69_370 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.867-0400 m31200| 2015-07-09T14:17:00.867-0400 I COMMAND [conn52] CMD: drop db69.tmp.mrs.coll69_1436465820_121 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.871-0400 m31200| 2015-07-09T14:17:00.871-0400 I COMMAND [conn52] CMD: drop db69.tmp.mr.coll69_230 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.872-0400 m31200| 2015-07-09T14:17:00.872-0400 I COMMAND [conn52] CMD: drop db69.tmp.mr.coll69_230 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.884-0400 m31200| 2015-07-09T14:17:00.884-0400 I COMMAND [conn52] CMD: drop db69.tmp.mr.coll69_230 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.888-0400 m31200| 2015-07-09T14:17:00.887-0400 I COMMAND [conn52] command db69.tmp.mrs.coll69_1436465820_121 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.888-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.888-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.889-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.889-0400 m31200| values...., out: "tmp.mrs.coll69_1436465820_121", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:213 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 882 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 1, w: 28, R: 5, W: 5 }, timeAcquiringMicros: { r: 10248, w: 298091, R: 19932, W: 12833 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 547ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.891-0400 m31200| 2015-07-09T14:17:00.890-0400 I COMMAND [conn37] CMD: drop db69.tmp.mrs.coll69_1436465820_122 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.895-0400 m31200| 2015-07-09T14:17:00.895-0400 I COMMAND [conn37] CMD: drop db69.tmp.mr.coll69_231 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.896-0400 m31200| 2015-07-09T14:17:00.896-0400 I COMMAND [conn37] CMD: drop db69.tmp.mr.coll69_231 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.898-0400 m31200| 2015-07-09T14:17:00.897-0400 I COMMAND [conn37] CMD: drop db69.tmp.mr.coll69_231 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.900-0400 m31200| 2015-07-09T14:17:00.899-0400 I COMMAND [conn37] command db69.tmp.mrs.coll69_1436465820_122 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.900-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.900-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.900-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.901-0400 m31200| values...., out: "tmp.mrs.coll69_1436465820_122", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:213 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 5688, W: 251 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 2, w: 24, R: 5, W: 7 }, timeAcquiringMicros: { r: 1068, w: 188141, R: 66979, W: 24942 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 491ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.908-0400 m31200| 2015-07-09T14:17:00.908-0400 I COMMAND [conn41] CMD: drop db69.tmp.mrs.coll69_1436465820_105 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.916-0400 m31200| 2015-07-09T14:17:00.916-0400 I COMMAND [conn41] CMD: drop db69.tmp.mr.coll69_232 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.916-0400 m31200| 2015-07-09T14:17:00.916-0400 I COMMAND [conn41] CMD: drop db69.tmp.mr.coll69_232 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.918-0400 m31200| 2015-07-09T14:17:00.918-0400 I COMMAND [conn41] CMD: drop db69.tmp.mr.coll69_232 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.924-0400 m31200| 2015-07-09T14:17:00.923-0400 I COMMAND [conn41] command db69.tmp.mrs.coll69_1436465820_105 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.924-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.924-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.924-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.925-0400 m31200| values...., out: "tmp.mrs.coll69_1436465820_105", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:213 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 6069, w: 5967, W: 29 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 5, w: 14, R: 12, W: 9 }, timeAcquiringMicros: { r: 21171, w: 53842, R: 46922, W: 31087 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 390ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.926-0400 m31200| 2015-07-09T14:17:00.924-0400 I COMMAND [conn80] CMD: drop db69.tmp.mrs.coll69_1436465820_123 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.933-0400 m31200| 2015-07-09T14:17:00.933-0400 I COMMAND [conn80] CMD: drop db69.tmp.mr.coll69_233 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.933-0400 m31200| 2015-07-09T14:17:00.933-0400 I COMMAND [conn80] CMD: drop db69.tmp.mr.coll69_233 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.935-0400 m31200| 2015-07-09T14:17:00.935-0400 I COMMAND [conn80] CMD: drop db69.tmp.mr.coll69_233 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.935-0400 m31200| 2015-07-09T14:17:00.935-0400 I COMMAND [conn80] command db69.tmp.mrs.coll69_1436465820_123 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.935-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.936-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.936-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.936-0400 m31200| values...., out: "tmp.mrs.coll69_1436465820_123", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:213 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 2, W: 1 }, timeAcquiringMicros: { r: 7144, w: 11115, W: 85 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 3, w: 19, R: 10, W: 7 }, timeAcquiringMicros: { r: 864, w: 44108, R: 50025, W: 54870 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 401ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.939-0400 m31200| 2015-07-09T14:17:00.937-0400 I COMMAND [conn32] CMD: drop db69.tmp.mrs.coll69_1436465820_106 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.943-0400 m31200| 2015-07-09T14:17:00.943-0400 I COMMAND [conn32] CMD: drop db69.tmp.mr.coll69_234 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.943-0400 m31200| 2015-07-09T14:17:00.943-0400 I COMMAND [conn32] CMD: drop db69.tmp.mr.coll69_234 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.944-0400 m31200| 2015-07-09T14:17:00.944-0400 I COMMAND [conn32] CMD: drop db69.tmp.mr.coll69_234 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.944-0400 m31200| 2015-07-09T14:17:00.944-0400 I COMMAND [conn32] command db69.tmp.mrs.coll69_1436465820_106 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.944-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.944-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.944-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:00.945-0400 m31200| values...., out: "tmp.mrs.coll69_1436465820_106", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:213 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 1 }, timeAcquiringMicros: { r: 15572, w: 5650 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 4, w: 13, R: 11, W: 5 }, timeAcquiringMicros: { r: 10380, w: 30304, R: 50207, W: 55163 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 342ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.179-0400 m31100| 2015-07-09T14:17:01.179-0400 I COMMAND [conn185] CMD: drop db69.tmp.mrs.coll69_1436465820_121 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.184-0400 m31100| 2015-07-09T14:17:01.183-0400 I COMMAND [conn185] CMD: drop db69.tmp.mr.coll69_366 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.184-0400 m31100| 2015-07-09T14:17:01.183-0400 I COMMAND [conn185] CMD: drop db69.tmp.mr.coll69_366 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.185-0400 m31100| 2015-07-09T14:17:01.185-0400 I COMMAND [conn185] CMD: drop db69.tmp.mr.coll69_366 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.189-0400 m31100| 2015-07-09T14:17:01.188-0400 I COMMAND [conn185] command db69.tmp.mrs.coll69_1436465820_121 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.189-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.189-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.189-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.190-0400 m31100| values...., out: "tmp.mrs.coll69_1436465820_121", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:213 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { r: 5, w: 1, W: 1 }, timeAcquiringMicros: { r: 44047, w: 19064, W: 256 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { w: 26, R: 9, W: 5 }, timeAcquiringMicros: { w: 364614, R: 162503, W: 4668 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 846ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.190-0400 m31100| 2015-07-09T14:17:01.189-0400 I COMMAND [conn185] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll69_371 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.206-0400 m31100| 2015-07-09T14:17:01.206-0400 I COMMAND [conn177] CMD: drop db69.tmp.mrs.coll69_1436465820_122 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.210-0400 m31100| 2015-07-09T14:17:01.209-0400 I COMMAND [conn177] CMD: drop db69.tmp.mr.coll69_367 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.210-0400 m31100| 2015-07-09T14:17:01.210-0400 I COMMAND [conn177] CMD: drop db69.tmp.mr.coll69_367 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.212-0400 m31100| 2015-07-09T14:17:01.211-0400 I COMMAND [conn177] CMD: drop db69.tmp.mr.coll69_367 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.215-0400 m31100| 2015-07-09T14:17:01.215-0400 I COMMAND [conn177] command db69.tmp.mrs.coll69_1436465820_122 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.216-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.216-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.216-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.218-0400 m31100| values...., out: "tmp.mrs.coll69_1436465820_122", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:4 reslen:213 locks:{ Global: { acquireCount: { r: 161, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 6, W: 1 }, timeAcquiringMicros: { r: 355, w: 64398, W: 12092 } }, Database: { acquireCount: { r: 26, w: 66, R: 16, W: 11 }, acquireWaitCount: { r: 5, w: 18, R: 15, W: 4 }, timeAcquiringMicros: { r: 28460, w: 144884, R: 147058, W: 22094 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 807ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.218-0400 m31100| 2015-07-09T14:17:01.217-0400 I COMMAND [conn177] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll69_372 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.240-0400 m31100| 2015-07-09T14:17:01.239-0400 I COMMAND [conn179] CMD: drop db69.tmp.mrs.coll69_1436465820_105 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.246-0400 m31100| 2015-07-09T14:17:01.245-0400 I COMMAND [conn179] CMD: drop db69.tmp.mr.coll69_369 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.248-0400 m31100| 2015-07-09T14:17:01.246-0400 I COMMAND [conn179] CMD: drop db69.tmp.mr.coll69_369 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.248-0400 m31100| 2015-07-09T14:17:01.247-0400 I COMMAND [conn179] CMD: drop db69.tmp.mr.coll69_369 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.249-0400 m31100| 2015-07-09T14:17:01.248-0400 I COMMAND [conn179] command db69.tmp.mrs.coll69_1436465820_105 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.249-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.249-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.249-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.249-0400 m31100| values...., out: "tmp.mrs.coll69_1436465820_105", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:9 reslen:213 locks:{ Global: { acquireCount: { r: 171, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 1, W: 1 }, timeAcquiringMicros: { r: 14451, w: 6314, W: 20142 } }, Database: { acquireCount: { r: 26, w: 66, R: 21, W: 11 }, acquireWaitCount: { r: 4, w: 14, R: 21, W: 9 }, timeAcquiringMicros: { r: 2575, w: 71051, R: 75611, W: 72077 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 714ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.250-0400 m31100| 2015-07-09T14:17:01.250-0400 I COMMAND [conn179] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll69_373 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.263-0400 m31100| 2015-07-09T14:17:01.263-0400 I COMMAND [conn49] CMD: drop db69.tmp.mrs.coll69_1436465820_123 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.276-0400 m31100| 2015-07-09T14:17:01.275-0400 I COMMAND [conn49] CMD: drop db69.tmp.mr.coll69_368 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.276-0400 m31100| 2015-07-09T14:17:01.276-0400 I COMMAND [conn49] CMD: drop db69.tmp.mr.coll69_368 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.276-0400 m31100| 2015-07-09T14:17:01.276-0400 I COMMAND [conn191] CMD: drop db69.tmp.mrs.coll69_1436465820_106 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.283-0400 m31100| 2015-07-09T14:17:01.283-0400 I COMMAND [conn191] CMD: drop db69.tmp.mr.coll69_370 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.283-0400 m31100| 2015-07-09T14:17:01.283-0400 I COMMAND [conn191] CMD: drop db69.tmp.mr.coll69_370 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.286-0400 m31100| 2015-07-09T14:17:01.285-0400 I COMMAND [conn191] CMD: drop db69.tmp.mr.coll69_370 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.291-0400 m31100| 2015-07-09T14:17:01.291-0400 I COMMAND [conn49] CMD: drop db69.tmp.mr.coll69_368 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.291-0400 m31100| 2015-07-09T14:17:01.291-0400 I COMMAND [conn49] command db69.tmp.mrs.coll69_1436465820_123 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.292-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.292-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.292-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.294-0400 m31100| values...., out: "tmp.mrs.coll69_1436465820_123", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:10 reslen:213 locks:{ Global: { acquireCount: { r: 173, w: 74, W: 3 }, acquireWaitCount: { r: 3, w: 3, W: 1 }, timeAcquiringMicros: { r: 23857, w: 25994, W: 4307 } }, Database: { acquireCount: { r: 26, w: 66, R: 22, W: 11 }, acquireWaitCount: { r: 5, w: 16, R: 22, W: 7 }, timeAcquiringMicros: { r: 32151, w: 39562, R: 90559, W: 75681 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 757ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.295-0400 m31100| 2015-07-09T14:17:01.292-0400 I COMMAND [conn191] command db69.tmp.mrs.coll69_1436465820_106 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.295-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.295-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.295-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.296-0400 m31100| values...., out: "tmp.mrs.coll69_1436465820_106", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:9 reslen:213 locks:{ Global: { acquireCount: { r: 171, w: 74, W: 3 }, acquireWaitCount: { r: 3, w: 2, W: 1 }, timeAcquiringMicros: { r: 32236, w: 10527, W: 20169 } }, Database: { acquireCount: { r: 26, w: 66, R: 21, W: 11 }, acquireWaitCount: { r: 5, w: 12, R: 21, W: 7 }, timeAcquiringMicros: { r: 43899, w: 81605, R: 30120, W: 67734 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 690ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.297-0400 m31100| 2015-07-09T14:17:01.293-0400 I COMMAND [conn49] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll69_374 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.297-0400 m31100| 2015-07-09T14:17:01.297-0400 I COMMAND [conn191] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll69_375 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.326-0400 m31100| 2015-07-09T14:17:01.326-0400 I COMMAND [conn177] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll69_372 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.328-0400 m31100| 2015-07-09T14:17:01.328-0400 I COMMAND [conn185] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll69_371 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.336-0400 m31100| 2015-07-09T14:17:01.335-0400 I COMMAND [conn179] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll69_373 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.338-0400 m31100| 2015-07-09T14:17:01.337-0400 I COMMAND [conn177] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll69_372 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.339-0400 m31100| 2015-07-09T14:17:01.338-0400 I COMMAND [conn177] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll69_372 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.339-0400 m31100| 2015-07-09T14:17:01.338-0400 I COMMAND [conn177] command map_reduce_merge_nonatomic0.coll69 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.340-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.340-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.340-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.342-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.342-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.344-0400 m31100| }, out: { merge: "coll69", db: "map_reduce_merge_nonatomic0", nonAtomic: true } }, inputDB: "db69", shardedOutputCollection: "tmp.mrs.coll69_1436465820_122", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll69_1436465820_122", timeMillis: 802, counts: { input: 1015, emit: 1015, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465821000|58, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll69_1436465820_122", timeMillis: 488, counts: { input: 985, emit: 985, reduce: 79, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465820000|60, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1015, emit: 1015, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 985, emit: 985, reduce: 79, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 159 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:303 locks:{ Global: { acquireCount: { r: 76, w: 68 }, acquireWaitCount: { w: 2 }, timeAcquiringMicros: { w: 22471 } }, Database: { acquireCount: { r: 4, w: 44, W: 25 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 122ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.345-0400 m31100| 2015-07-09T14:17:01.339-0400 I COMMAND [conn39] CMD: drop db69.tmp.mrs.coll69_1436465820_122 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.345-0400 m31100| 2015-07-09T14:17:01.339-0400 I COMMAND [conn185] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll69_371 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.346-0400 m31100| 2015-07-09T14:17:01.339-0400 I COMMAND [conn185] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll69_371 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.346-0400 m31100| 2015-07-09T14:17:01.340-0400 I COMMAND [conn185] command map_reduce_merge_nonatomic2.coll69 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.346-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.346-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.347-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.347-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.347-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.348-0400 m31100| }, out: { merge: "coll69", db: "map_reduce_merge_nonatomic2", nonAtomic: true } }, inputDB: "db69", shardedOutputCollection: "tmp.mrs.coll69_1436465820_121", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll69_1436465820_121", timeMillis: 842, counts: { input: 1015, emit: 1015, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465821000|30, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll69_1436465820_121", timeMillis: 531, counts: { input: 985, emit: 985, reduce: 79, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465820000|40, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1015, emit: 1015, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 985, emit: 985, reduce: 79, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 159 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:303 locks:{ Global: { acquireCount: { r: 76, w: 68 }, acquireWaitCount: { w: 4 }, timeAcquiringMicros: { w: 51199 } }, Database: { acquireCount: { r: 4, w: 44, W: 25 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 151ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.348-0400 m31100| 2015-07-09T14:17:01.343-0400 I COMMAND [conn32] CMD: drop db69.tmp.mrs.coll69_1436465820_121 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.354-0400 m31200| 2015-07-09T14:17:01.353-0400 I COMMAND [conn65] CMD: drop db69.tmp.mrs.coll69_1436465820_122 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.355-0400 m31100| 2015-07-09T14:17:01.355-0400 I COMMAND [conn179] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll69_373 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.355-0400 m31100| 2015-07-09T14:17:01.355-0400 I COMMAND [conn179] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll69_373 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.357-0400 m31201| 2015-07-09T14:17:01.356-0400 I COMMAND [repl writer worker 7] CMD: drop db69.tmp.mrs.coll69_1436465820_122 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.358-0400 m31202| 2015-07-09T14:17:01.357-0400 I COMMAND [repl writer worker 8] CMD: drop db69.tmp.mrs.coll69_1436465820_122 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.362-0400 m31100| 2015-07-09T14:17:01.360-0400 I COMMAND [conn179] command map_reduce_merge_nonatomic3.coll69 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.363-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.363-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.363-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.363-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.363-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.366-0400 m31100| }, out: { merge: "coll69", db: "map_reduce_merge_nonatomic3", nonAtomic: true } }, inputDB: "db69", shardedOutputCollection: "tmp.mrs.coll69_1436465820_105", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll69_1436465820_105", timeMillis: 712, counts: { input: 1015, emit: 1015, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465821000|80, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll69_1436465820_105", timeMillis: 383, counts: { input: 985, emit: 985, reduce: 79, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465820000|87, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1015, emit: 1015, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 985, emit: 985, reduce: 79, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 159 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:303 locks:{ Global: { acquireCount: { r: 76, w: 68 } }, Database: { acquireCount: { r: 4, w: 44, W: 25 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 110ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.366-0400 m31200| 2015-07-09T14:17:01.361-0400 I COMMAND [conn37] CMD: drop db69.tmp.mr.coll69_235 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.366-0400 m31100| 2015-07-09T14:17:01.362-0400 I COMMAND [conn187] CMD: drop db69.tmp.mrs.coll69_1436465820_105 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.369-0400 m31200| 2015-07-09T14:17:01.364-0400 I COMMAND [conn65] CMD: drop db69.tmp.mrs.coll69_1436465820_121 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.372-0400 m31200| 2015-07-09T14:17:01.371-0400 I COMMAND [conn18] CMD: drop db69.tmp.mrs.coll69_1436465820_105 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.372-0400 m31100| 2015-07-09T14:17:01.371-0400 I COMMAND [conn177] CMD: drop db69.tmp.mr.coll69_376 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.374-0400 m31102| 2015-07-09T14:17:01.374-0400 I COMMAND [repl writer worker 9] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll69_372 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.380-0400 m31102| 2015-07-09T14:17:01.379-0400 I COMMAND [repl writer worker 5] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll69_371 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.386-0400 m31202| 2015-07-09T14:17:01.385-0400 I COMMAND [repl writer worker 1] CMD: drop db69.tmp.mrs.coll69_1436465820_121 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.386-0400 m31201| 2015-07-09T14:17:01.386-0400 I COMMAND [repl writer worker 15] CMD: drop db69.tmp.mrs.coll69_1436465820_121 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.389-0400 m31100| 2015-07-09T14:17:01.389-0400 I COMMAND [conn49] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll69_374 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.390-0400 m31102| 2015-07-09T14:17:01.390-0400 I COMMAND [repl writer worker 13] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll69_373 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.392-0400 m31200| 2015-07-09T14:17:01.391-0400 I COMMAND [conn52] CMD: drop db69.tmp.mr.coll69_236 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.395-0400 m31100| 2015-07-09T14:17:01.393-0400 I COMMAND [conn49] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll69_374 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.396-0400 m31100| 2015-07-09T14:17:01.393-0400 I COMMAND [conn49] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll69_374 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.396-0400 m31100| 2015-07-09T14:17:01.394-0400 I COMMAND [conn49] command map_reduce_merge_nonatomic4.coll69 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.396-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.396-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.396-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.396-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.397-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.398-0400 m31100| }, out: { merge: "coll69", db: "map_reduce_merge_nonatomic4", nonAtomic: true } }, inputDB: "db69", shardedOutputCollection: "tmp.mrs.coll69_1436465820_123", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll69_1436465820_123", timeMillis: 742, counts: { input: 1015, emit: 1015, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465821000|122, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll69_1436465820_123", timeMillis: 399, counts: { input: 985, emit: 985, reduce: 79, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465820000|103, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1015, emit: 1015, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 985, emit: 985, reduce: 79, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 159 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:303 locks:{ Global: { acquireCount: { r: 76, w: 68 } }, Database: { acquireCount: { r: 4, w: 44, W: 25 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 101ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.398-0400 m31100| 2015-07-09T14:17:01.395-0400 I COMMAND [conn32] CMD: drop db69.tmp.mrs.coll69_1436465820_123 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.399-0400 m31102| 2015-07-09T14:17:01.395-0400 I COMMAND [repl writer worker 6] CMD: drop db69.tmp.mrs.coll69_1436465820_122 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.399-0400 m31100| 2015-07-09T14:17:01.396-0400 I COMMAND [conn185] CMD: drop db69.tmp.mr.coll69_377 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.399-0400 m31100| 2015-07-09T14:17:01.396-0400 I COMMAND [conn191] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll69_375 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.403-0400 m31101| 2015-07-09T14:17:01.403-0400 I COMMAND [repl writer worker 12] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll69_372 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.406-0400 m31102| 2015-07-09T14:17:01.405-0400 I COMMAND [repl writer worker 12] CMD: drop db69.tmp.mrs.coll69_1436465820_121 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.408-0400 m31101| 2015-07-09T14:17:01.408-0400 I COMMAND [repl writer worker 6] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll69_371 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.410-0400 m31201| 2015-07-09T14:17:01.410-0400 I COMMAND [repl writer worker 3] CMD: drop db69.tmp.mrs.coll69_1436465820_105 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.415-0400 m31202| 2015-07-09T14:17:01.415-0400 I COMMAND [repl writer worker 14] CMD: drop db69.tmp.mrs.coll69_1436465820_105 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.416-0400 m31100| 2015-07-09T14:17:01.416-0400 I COMMAND [conn191] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll69_375 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.416-0400 m31100| 2015-07-09T14:17:01.416-0400 I COMMAND [conn191] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll69_375 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.417-0400 m31100| 2015-07-09T14:17:01.416-0400 I COMMAND [conn191] command map_reduce_merge_nonatomic1.coll69 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.418-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.418-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.418-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.418-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.419-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.421-0400 m31100| }, out: { merge: "coll69", db: "map_reduce_merge_nonatomic1", nonAtomic: true } }, inputDB: "db69", shardedOutputCollection: "tmp.mrs.coll69_1436465820_106", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll69_1436465820_106", timeMillis: 682, counts: { input: 1015, emit: 1015, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465821000|123, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll69_1436465820_106", timeMillis: 341, counts: { input: 985, emit: 985, reduce: 79, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465820000|115, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1015, emit: 1015, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 985, emit: 985, reduce: 79, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 159 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:303 locks:{ Global: { acquireCount: { r: 76, w: 68 } }, Database: { acquireCount: { r: 4, w: 44, W: 25 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 120ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.422-0400 m31102| 2015-07-09T14:17:01.417-0400 I COMMAND [repl writer worker 13] CMD: drop db69.tmp.mrs.coll69_1436465820_105 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.422-0400 m31101| 2015-07-09T14:17:01.419-0400 I COMMAND [repl writer worker 14] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll69_373 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.423-0400 m31100| 2015-07-09T14:17:01.420-0400 I COMMAND [conn187] CMD: drop db69.tmp.mrs.coll69_1436465820_106 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.424-0400 m31200| 2015-07-09T14:17:01.422-0400 I COMMAND [conn65] CMD: drop db69.tmp.mrs.coll69_1436465820_123 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.428-0400 m31200| 2015-07-09T14:17:01.427-0400 I COMMAND [conn41] CMD: drop db69.tmp.mr.coll69_237 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.428-0400 m31101| 2015-07-09T14:17:01.428-0400 I COMMAND [repl writer worker 11] CMD: drop db69.tmp.mrs.coll69_1436465820_122 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.428-0400 m31100| 2015-07-09T14:17:01.428-0400 I COMMAND [conn179] CMD: drop db69.tmp.mr.coll69_378 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.430-0400 m31200| 2015-07-09T14:17:01.429-0400 I COMMAND [conn18] CMD: drop db69.tmp.mrs.coll69_1436465820_106 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.446-0400 m31201| 2015-07-09T14:17:01.440-0400 I COMMAND [repl writer worker 13] CMD: drop db69.tmp.mrs.coll69_1436465820_123 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.447-0400 m31202| 2015-07-09T14:17:01.441-0400 I COMMAND [repl writer worker 4] CMD: drop db69.tmp.mrs.coll69_1436465820_123 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.447-0400 m31202| 2015-07-09T14:17:01.443-0400 I COMMAND [repl writer worker 12] CMD: drop db69.tmp.mrs.coll69_1436465820_106 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.447-0400 m31201| 2015-07-09T14:17:01.443-0400 I COMMAND [repl writer worker 12] CMD: drop db69.tmp.mrs.coll69_1436465820_106 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.447-0400 m31100| 2015-07-09T14:17:01.446-0400 I COMMAND [conn191] CMD: drop db69.tmp.mr.coll69_380 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.448-0400 m31200| 2015-07-09T14:17:01.447-0400 I COMMAND [conn32] CMD: drop db69.tmp.mr.coll69_239 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.449-0400 m31102| 2015-07-09T14:17:01.449-0400 I COMMAND [repl writer worker 5] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll69_374 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.451-0400 m31101| 2015-07-09T14:17:01.449-0400 I COMMAND [repl writer worker 15] CMD: drop db69.tmp.mrs.coll69_1436465820_121 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.451-0400 m31100| 2015-07-09T14:17:01.450-0400 I COMMAND [conn49] CMD: drop db69.tmp.mr.coll69_379 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.451-0400 m31102| 2015-07-09T14:17:01.451-0400 I COMMAND [repl writer worker 3] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll69_375 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.454-0400 m31101| 2015-07-09T14:17:01.454-0400 I COMMAND [repl writer worker 14] CMD: drop db69.tmp.mrs.coll69_1436465820_105 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.455-0400 m31200| 2015-07-09T14:17:01.455-0400 I COMMAND [conn80] CMD: drop db69.tmp.mr.coll69_238 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.464-0400 m31101| 2015-07-09T14:17:01.464-0400 I COMMAND [repl writer worker 12] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll69_374 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.467-0400 m31101| 2015-07-09T14:17:01.466-0400 I COMMAND [repl writer worker 3] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll69_375 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.474-0400 m31102| 2015-07-09T14:17:01.474-0400 I COMMAND [repl writer worker 13] CMD: drop db69.tmp.mrs.coll69_1436465820_123 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.497-0400 m31101| 2015-07-09T14:17:01.497-0400 I COMMAND [repl writer worker 14] CMD: drop db69.tmp.mrs.coll69_1436465820_123 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.553-0400 m31102| 2015-07-09T14:17:01.553-0400 I COMMAND [repl writer worker 8] CMD: drop db69.tmp.mrs.coll69_1436465820_106 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:01.554-0400 m31101| 2015-07-09T14:17:01.553-0400 I COMMAND [repl writer worker 1] CMD: drop db69.tmp.mrs.coll69_1436465820_106 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.003-0400 m31200| 2015-07-09T14:17:02.002-0400 I COMMAND [conn37] CMD: drop db69.tmp.mrs.coll69_1436465821_124 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.007-0400 m31200| 2015-07-09T14:17:02.007-0400 I COMMAND [conn37] CMD: drop db69.tmp.mr.coll69_235 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.007-0400 m31200| 2015-07-09T14:17:02.007-0400 I COMMAND [conn37] CMD: drop db69.tmp.mr.coll69_235 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.009-0400 m31200| 2015-07-09T14:17:02.009-0400 I COMMAND [conn37] CMD: drop db69.tmp.mr.coll69_235 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.012-0400 m31200| 2015-07-09T14:17:02.012-0400 I COMMAND [conn37] command db69.tmp.mrs.coll69_1436465821_124 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.012-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.013-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.013-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.013-0400 m31200| values...., out: "tmp.mrs.coll69_1436465821_124", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:6 reslen:213 locks:{ Global: { acquireCount: { r: 163, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 63 } }, Database: { acquireCount: { r: 26, w: 66, R: 17, W: 11 }, acquireWaitCount: { r: 1, w: 16, R: 11, W: 6 }, timeAcquiringMicros: { r: 128, w: 150592, R: 141108, W: 3761 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 651ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.018-0400 m31200| 2015-07-09T14:17:02.018-0400 I COMMAND [conn52] CMD: drop db69.tmp.mrs.coll69_1436465821_125 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.027-0400 m31200| 2015-07-09T14:17:02.027-0400 I COMMAND [conn52] CMD: drop db69.tmp.mr.coll69_236 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.028-0400 m31200| 2015-07-09T14:17:02.028-0400 I COMMAND [conn52] CMD: drop db69.tmp.mr.coll69_236 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.029-0400 m31200| 2015-07-09T14:17:02.029-0400 I COMMAND [conn52] CMD: drop db69.tmp.mr.coll69_236 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.034-0400 m31200| 2015-07-09T14:17:02.031-0400 I COMMAND [conn52] command db69.tmp.mrs.coll69_1436465821_125 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.034-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.034-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.035-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.035-0400 m31200| values...., out: "tmp.mrs.coll69_1436465821_125", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:9 reslen:213 locks:{ Global: { acquireCount: { r: 169, w: 74, W: 3 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 4824, W: 141 } }, Database: { acquireCount: { r: 26, w: 66, R: 20, W: 11 }, acquireWaitCount: { r: 6, w: 12, R: 11, W: 9 }, timeAcquiringMicros: { r: 6706, w: 51003, R: 103369, W: 57938 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 646ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.043-0400 m31200| 2015-07-09T14:17:02.042-0400 I COMMAND [conn41] CMD: drop db69.tmp.mrs.coll69_1436465821_107 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.054-0400 m31200| 2015-07-09T14:17:02.054-0400 I COMMAND [conn41] CMD: drop db69.tmp.mr.coll69_237 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.055-0400 m31200| 2015-07-09T14:17:02.054-0400 I COMMAND [conn41] CMD: drop db69.tmp.mr.coll69_237 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.056-0400 m31200| 2015-07-09T14:17:02.056-0400 I COMMAND [conn41] CMD: drop db69.tmp.mr.coll69_237 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.057-0400 m31200| 2015-07-09T14:17:02.056-0400 I COMMAND [conn80] CMD: drop db69.tmp.mrs.coll69_1436465821_126 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.063-0400 m31200| 2015-07-09T14:17:02.062-0400 I COMMAND [conn80] CMD: drop db69.tmp.mr.coll69_238 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.063-0400 m31200| 2015-07-09T14:17:02.062-0400 I COMMAND [conn80] CMD: drop db69.tmp.mr.coll69_238 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.064-0400 m31200| 2015-07-09T14:17:02.062-0400 I COMMAND [conn41] command db69.tmp.mrs.coll69_1436465821_107 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.064-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.064-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.064-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.064-0400 m31200| values...., out: "tmp.mrs.coll69_1436465821_107", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:12 reslen:213 locks:{ Global: { acquireCount: { r: 175, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 2, W: 1 }, timeAcquiringMicros: { r: 15135, w: 5571, W: 486 } }, Database: { acquireCount: { r: 26, w: 66, R: 23, W: 11 }, acquireWaitCount: { r: 9, w: 9, R: 14, W: 7 }, timeAcquiringMicros: { r: 29659, w: 55082, R: 66527, W: 58200 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 666ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.065-0400 m31200| 2015-07-09T14:17:02.063-0400 I COMMAND [conn80] CMD: drop db69.tmp.mr.coll69_238 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.065-0400 m31200| 2015-07-09T14:17:02.064-0400 I COMMAND [conn80] command db69.tmp.mrs.coll69_1436465821_126 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.065-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.065-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.066-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.066-0400 m31200| values...., out: "tmp.mrs.coll69_1436465821_126", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:11 reslen:213 locks:{ Global: { acquireCount: { r: 173, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 1, W: 1 }, timeAcquiringMicros: { r: 22350, w: 4871, W: 1498 } }, Database: { acquireCount: { r: 26, w: 66, R: 22, W: 11 }, acquireWaitCount: { r: 4, w: 14, R: 11, W: 8 }, timeAcquiringMicros: { r: 9642, w: 95062, R: 28908, W: 85192 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 626ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.066-0400 m31200| 2015-07-09T14:17:02.066-0400 I COMMAND [conn32] CMD: drop db69.tmp.mrs.coll69_1436465821_108 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.073-0400 m31200| 2015-07-09T14:17:02.073-0400 I COMMAND [conn32] CMD: drop db69.tmp.mr.coll69_239 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.074-0400 m31200| 2015-07-09T14:17:02.074-0400 I COMMAND [conn32] CMD: drop db69.tmp.mr.coll69_239 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.075-0400 m31200| 2015-07-09T14:17:02.074-0400 I COMMAND [conn32] CMD: drop db69.tmp.mr.coll69_239 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.075-0400 m31200| 2015-07-09T14:17:02.075-0400 I COMMAND [conn32] command db69.tmp.mrs.coll69_1436465821_108 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.075-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.075-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.076-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.076-0400 m31200| values...., out: "tmp.mrs.coll69_1436465821_108", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:10 reslen:213 locks:{ Global: { acquireCount: { r: 171, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 2 }, timeAcquiringMicros: { r: 20273, w: 13759 } }, Database: { acquireCount: { r: 26, w: 66, R: 21, W: 11 }, acquireWaitCount: { r: 5, w: 16, R: 12, W: 5 }, timeAcquiringMicros: { r: 6216, w: 21025, R: 44486, W: 128763 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 632ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.303-0400 m31100| 2015-07-09T14:17:02.302-0400 I COMMAND [conn177] CMD: drop db69.tmp.mrs.coll69_1436465821_124 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.310-0400 m31100| 2015-07-09T14:17:02.310-0400 I COMMAND [conn177] CMD: drop db69.tmp.mr.coll69_376 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.310-0400 m31100| 2015-07-09T14:17:02.310-0400 I COMMAND [conn177] CMD: drop db69.tmp.mr.coll69_376 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.313-0400 m31100| 2015-07-09T14:17:02.313-0400 I COMMAND [conn177] CMD: drop db69.tmp.mr.coll69_376 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.315-0400 m31100| 2015-07-09T14:17:02.315-0400 I COMMAND [conn177] command db69.tmp.mrs.coll69_1436465821_124 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.316-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.316-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.317-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.317-0400 m31100| values...., out: "tmp.mrs.coll69_1436465821_124", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:9 reslen:213 locks:{ Global: { acquireCount: { r: 171, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 308 } }, Database: { acquireCount: { r: 26, w: 66, R: 21, W: 11 }, acquireWaitCount: { r: 2, w: 22, R: 10, W: 5 }, timeAcquiringMicros: { r: 14890, w: 336857, R: 120634, W: 3946 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 952ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.318-0400 m31100| 2015-07-09T14:17:02.318-0400 I COMMAND [conn177] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll69_381 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.319-0400 m31100| 2015-07-09T14:17:02.319-0400 I COMMAND [conn191] CMD: drop db69.tmp.mrs.coll69_1436465821_108 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.327-0400 m31100| 2015-07-09T14:17:02.326-0400 I COMMAND [conn191] CMD: drop db69.tmp.mr.coll69_380 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.327-0400 m31100| 2015-07-09T14:17:02.327-0400 I COMMAND [conn191] CMD: drop db69.tmp.mr.coll69_380 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.342-0400 m31100| 2015-07-09T14:17:02.341-0400 I COMMAND [conn191] CMD: drop db69.tmp.mr.coll69_380 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.344-0400 m31100| 2015-07-09T14:17:02.344-0400 I COMMAND [conn191] command db69.tmp.mrs.coll69_1436465821_108 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.344-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.344-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.345-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.345-0400 m31100| values...., out: "tmp.mrs.coll69_1436465821_108", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:16 reslen:213 locks:{ Global: { acquireCount: { r: 185, w: 74, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 8178, W: 702 } }, Database: { acquireCount: { r: 26, w: 66, R: 28, W: 11 }, acquireWaitCount: { r: 5, w: 10, R: 14, W: 7 }, timeAcquiringMicros: { r: 1378, w: 48149, R: 56365, W: 93373 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 901ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.346-0400 m31100| 2015-07-09T14:17:02.344-0400 I COMMAND [conn179] CMD: drop db69.tmp.mrs.coll69_1436465821_107 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.346-0400 m31100| 2015-07-09T14:17:02.345-0400 I COMMAND [conn191] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll69_382 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.347-0400 m31100| 2015-07-09T14:17:02.347-0400 I COMMAND [conn179] CMD: drop db69.tmp.mr.coll69_378 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.348-0400 m31100| 2015-07-09T14:17:02.347-0400 I COMMAND [conn179] CMD: drop db69.tmp.mr.coll69_378 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.358-0400 m31100| 2015-07-09T14:17:02.358-0400 I COMMAND [conn179] CMD: drop db69.tmp.mr.coll69_378 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.369-0400 m31100| 2015-07-09T14:17:02.368-0400 I COMMAND [conn179] command db69.tmp.mrs.coll69_1436465821_107 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.370-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.370-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.370-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.371-0400 m31100| values...., out: "tmp.mrs.coll69_1436465821_107", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:16 reslen:213 locks:{ Global: { acquireCount: { r: 185, w: 74, W: 3 }, acquireWaitCount: { r: 2, W: 1 }, timeAcquiringMicros: { r: 16953, W: 20 } }, Database: { acquireCount: { r: 26, w: 66, R: 28, W: 11 }, acquireWaitCount: { r: 7, w: 10, R: 14, W: 9 }, timeAcquiringMicros: { r: 61459, w: 85674, R: 42217, W: 56170 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 972ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.372-0400 m31100| 2015-07-09T14:17:02.371-0400 I COMMAND [conn179] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll69_383 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.389-0400 m31100| 2015-07-09T14:17:02.388-0400 I COMMAND [conn185] CMD: drop db69.tmp.mrs.coll69_1436465821_125 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.400-0400 m31100| 2015-07-09T14:17:02.399-0400 I COMMAND [conn185] CMD: drop db69.tmp.mr.coll69_377 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.400-0400 m31100| 2015-07-09T14:17:02.399-0400 I COMMAND [conn185] CMD: drop db69.tmp.mr.coll69_377 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.401-0400 m31100| 2015-07-09T14:17:02.401-0400 I COMMAND [conn185] CMD: drop db69.tmp.mr.coll69_377 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.405-0400 m31100| 2015-07-09T14:17:02.402-0400 I COMMAND [conn185] command db69.tmp.mrs.coll69_1436465821_125 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.405-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.405-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.405-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.406-0400 m31100| values...., out: "tmp.mrs.coll69_1436465821_125", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:19 reslen:213 locks:{ Global: { acquireCount: { r: 191, w: 74, W: 3 }, acquireWaitCount: { r: 3, W: 1 }, timeAcquiringMicros: { r: 20786, W: 16104 } }, Database: { acquireCount: { r: 26, w: 66, R: 31, W: 11 }, acquireWaitCount: { r: 9, w: 11, R: 16, W: 7 }, timeAcquiringMicros: { r: 33418, w: 41468, R: 95588, W: 102859 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 1016ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.406-0400 m31100| 2015-07-09T14:17:02.406-0400 I COMMAND [conn185] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll69_384 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.425-0400 m31100| 2015-07-09T14:17:02.425-0400 I COMMAND [conn49] CMD: drop db69.tmp.mrs.coll69_1436465821_126 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.430-0400 m31100| 2015-07-09T14:17:02.430-0400 I COMMAND [conn49] CMD: drop db69.tmp.mr.coll69_379 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.431-0400 m31100| 2015-07-09T14:17:02.430-0400 I COMMAND [conn49] CMD: drop db69.tmp.mr.coll69_379 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.433-0400 m31100| 2015-07-09T14:17:02.433-0400 I COMMAND [conn177] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll69_381 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.434-0400 m31100| 2015-07-09T14:17:02.434-0400 I COMMAND [conn49] CMD: drop db69.tmp.mr.coll69_379 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.434-0400 m31100| 2015-07-09T14:17:02.434-0400 I COMMAND [conn49] command db69.tmp.mrs.coll69_1436465821_126 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.435-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.435-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.435-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.435-0400 m31100| values...., out: "tmp.mrs.coll69_1436465821_126", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:19 reslen:213 locks:{ Global: { acquireCount: { r: 191, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 3, W: 1 }, timeAcquiringMicros: { r: 27734, w: 20388, W: 14800 } }, Database: { acquireCount: { r: 26, w: 66, R: 31, W: 11 }, acquireWaitCount: { r: 4, w: 14, R: 18, W: 6 }, timeAcquiringMicros: { r: 12488, w: 113297, R: 35235, W: 80755 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 996ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.436-0400 m31100| 2015-07-09T14:17:02.436-0400 I COMMAND [conn49] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll69_385 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.437-0400 m31100| 2015-07-09T14:17:02.437-0400 I COMMAND [conn177] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll69_381 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.438-0400 m31100| 2015-07-09T14:17:02.437-0400 I COMMAND [conn177] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll69_381 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.438-0400 m31100| 2015-07-09T14:17:02.437-0400 I COMMAND [conn177] command map_reduce_merge_nonatomic0.coll69 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.438-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.438-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.439-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.439-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.439-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.440-0400 m31100| }, out: { merge: "coll69", db: "map_reduce_merge_nonatomic0", nonAtomic: true } }, inputDB: "db69", shardedOutputCollection: "tmp.mrs.coll69_1436465821_124", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll69_1436465821_124", timeMillis: 947, counts: { input: 1015, emit: 1015, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465822000|42, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll69_1436465821_124", timeMillis: 646, counts: { input: 985, emit: 985, reduce: 79, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465822000|9, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1015, emit: 1015, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 985, emit: 985, reduce: 79, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 159 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:303 locks:{ Global: { acquireCount: { r: 76, w: 68 }, acquireWaitCount: { r: 1, w: 3 }, timeAcquiringMicros: { r: 20975, w: 39583 } }, Database: { acquireCount: { r: 4, w: 44, W: 25 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 119ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.440-0400 m31100| 2015-07-09T14:17:02.438-0400 I COMMAND [conn32] CMD: drop db69.tmp.mrs.coll69_1436465821_124 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.445-0400 m31200| 2015-07-09T14:17:02.444-0400 I COMMAND [conn65] CMD: drop db69.tmp.mrs.coll69_1436465821_124 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.449-0400 m31202| 2015-07-09T14:17:02.448-0400 I COMMAND [repl writer worker 0] CMD: drop db69.tmp.mrs.coll69_1436465821_124 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.449-0400 m31201| 2015-07-09T14:17:02.449-0400 I COMMAND [repl writer worker 8] CMD: drop db69.tmp.mrs.coll69_1436465821_124 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.450-0400 m31200| 2015-07-09T14:17:02.450-0400 I COMMAND [conn37] CMD: drop db69.tmp.mr.coll69_240 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.452-0400 m31100| 2015-07-09T14:17:02.451-0400 I COMMAND [conn177] CMD: drop db69.tmp.mr.coll69_386 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.468-0400 m31102| 2015-07-09T14:17:02.468-0400 I COMMAND [repl writer worker 12] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll69_381 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.476-0400 m31101| 2015-07-09T14:17:02.476-0400 I COMMAND [repl writer worker 6] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll69_381 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.480-0400 m31102| 2015-07-09T14:17:02.480-0400 I COMMAND [repl writer worker 2] CMD: drop db69.tmp.mrs.coll69_1436465821_124 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.484-0400 m31101| 2015-07-09T14:17:02.483-0400 I COMMAND [repl writer worker 15] CMD: drop db69.tmp.mrs.coll69_1436465821_124 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.486-0400 m31100| 2015-07-09T14:17:02.486-0400 I COMMAND [conn185] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll69_384 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.487-0400 m31100| 2015-07-09T14:17:02.487-0400 I COMMAND [conn179] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll69_383 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.488-0400 m31100| 2015-07-09T14:17:02.488-0400 I COMMAND [conn191] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll69_382 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.513-0400 m31100| 2015-07-09T14:17:02.512-0400 I COMMAND [conn185] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll69_384 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.513-0400 m31100| 2015-07-09T14:17:02.512-0400 I COMMAND [conn185] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll69_384 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.514-0400 m31100| 2015-07-09T14:17:02.512-0400 I COMMAND [conn185] command map_reduce_merge_nonatomic2.coll69 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.515-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.515-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.515-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.515-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.516-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.518-0400 m31100| }, out: { merge: "coll69", db: "map_reduce_merge_nonatomic2", nonAtomic: true } }, inputDB: "db69", shardedOutputCollection: "tmp.mrs.coll69_1436465821_125", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll69_1436465821_125", timeMillis: 1013, counts: { input: 1015, emit: 1015, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465822000|106, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll69_1436465821_125", timeMillis: 642, counts: { input: 985, emit: 985, reduce: 79, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465822000|23, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1015, emit: 1015, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 985, emit: 985, reduce: 79, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 159 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:303 locks:{ Global: { acquireCount: { r: 76, w: 68 } }, Database: { acquireCount: { r: 4, w: 44, W: 25 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 107ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.518-0400 m31100| 2015-07-09T14:17:02.513-0400 I COMMAND [conn32] CMD: drop db69.tmp.mrs.coll69_1436465821_125 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.518-0400 m31100| 2015-07-09T14:17:02.515-0400 I COMMAND [conn179] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll69_383 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.518-0400 m31100| 2015-07-09T14:17:02.515-0400 I COMMAND [conn179] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll69_383 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.519-0400 m31102| 2015-07-09T14:17:02.515-0400 I COMMAND [repl writer worker 4] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll69_384 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.519-0400 m31100| 2015-07-09T14:17:02.515-0400 I COMMAND [conn179] command map_reduce_merge_nonatomic3.coll69 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.519-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.519-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.519-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.520-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.520-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.521-0400 m31100| }, out: { merge: "coll69", db: "map_reduce_merge_nonatomic3", nonAtomic: true } }, inputDB: "db69", shardedOutputCollection: "tmp.mrs.coll69_1436465821_107", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll69_1436465821_107", timeMillis: 952, counts: { input: 1015, emit: 1015, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465822000|75, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll69_1436465821_107", timeMillis: 658, counts: { input: 985, emit: 985, reduce: 79, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465822000|69, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1015, emit: 1015, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 985, emit: 985, reduce: 79, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 159 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:303 locks:{ Global: { acquireCount: { r: 76, w: 68 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 18937 } }, Database: { acquireCount: { r: 4, w: 44, W: 25 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 145ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.521-0400 m31100| 2015-07-09T14:17:02.516-0400 I COMMAND [conn187] CMD: drop db69.tmp.mrs.coll69_1436465821_107 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.521-0400 m31100| 2015-07-09T14:17:02.516-0400 I COMMAND [conn191] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll69_382 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.522-0400 m31100| 2015-07-09T14:17:02.518-0400 I COMMAND [conn191] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll69_382 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.522-0400 m31100| 2015-07-09T14:17:02.518-0400 I COMMAND [conn191] command map_reduce_merge_nonatomic1.coll69 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.522-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.522-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.522-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.522-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.522-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.525-0400 m31100| }, out: { merge: "coll69", db: "map_reduce_merge_nonatomic1", nonAtomic: true } }, inputDB: "db69", shardedOutputCollection: "tmp.mrs.coll69_1436465821_108", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll69_1436465821_108", timeMillis: 884, counts: { input: 1015, emit: 1015, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465822000|69, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll69_1436465821_108", timeMillis: 631, counts: { input: 985, emit: 985, reduce: 79, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465822000|78, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1015, emit: 1015, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 985, emit: 985, reduce: 79, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 159 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:303 locks:{ Global: { acquireCount: { r: 76, w: 68 }, acquireWaitCount: { w: 3 }, timeAcquiringMicros: { w: 50316 } }, Database: { acquireCount: { r: 4, w: 44, W: 25 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 173ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.525-0400 m31100| 2015-07-09T14:17:02.519-0400 I COMMAND [conn38] CMD: drop db69.tmp.mrs.coll69_1436465821_108 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.526-0400 m31200| 2015-07-09T14:17:02.526-0400 I COMMAND [conn65] CMD: drop db69.tmp.mrs.coll69_1436465821_125 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.530-0400 m31102| 2015-07-09T14:17:02.529-0400 I COMMAND [repl writer worker 11] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll69_383 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.530-0400 m31201| 2015-07-09T14:17:02.529-0400 I COMMAND [repl writer worker 2] CMD: drop db69.tmp.mrs.coll69_1436465821_125 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.531-0400 m31202| 2015-07-09T14:17:02.530-0400 I COMMAND [repl writer worker 7] CMD: drop db69.tmp.mrs.coll69_1436465821_125 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.531-0400 m31200| 2015-07-09T14:17:02.531-0400 I COMMAND [conn18] CMD: drop db69.tmp.mrs.coll69_1436465821_107 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.534-0400 m31101| 2015-07-09T14:17:02.527-0400 I COMMAND [repl writer worker 2] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll69_384 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.534-0400 m31101| 2015-07-09T14:17:02.531-0400 I COMMAND [repl writer worker 0] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll69_383 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.534-0400 m31200| 2015-07-09T14:17:02.534-0400 I COMMAND [conn64] CMD: drop db69.tmp.mrs.coll69_1436465821_108 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.534-0400 m31100| 2015-07-09T14:17:02.534-0400 I COMMAND [conn185] CMD: drop db69.tmp.mr.coll69_387 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.538-0400 m31102| 2015-07-09T14:17:02.537-0400 I COMMAND [repl writer worker 12] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll69_382 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.539-0400 m31101| 2015-07-09T14:17:02.539-0400 I COMMAND [repl writer worker 12] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll69_382 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.541-0400 m31201| 2015-07-09T14:17:02.541-0400 I COMMAND [repl writer worker 3] CMD: drop db69.tmp.mrs.coll69_1436465821_107 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.543-0400 m31202| 2015-07-09T14:17:02.542-0400 I COMMAND [repl writer worker 11] CMD: drop db69.tmp.mrs.coll69_1436465821_107 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.547-0400 m31100| 2015-07-09T14:17:02.547-0400 I COMMAND [conn179] CMD: drop db69.tmp.mr.coll69_388 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.552-0400 m31200| 2015-07-09T14:17:02.552-0400 I COMMAND [conn41] CMD: drop db69.tmp.mr.coll69_242 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.552-0400 m31200| 2015-07-09T14:17:02.552-0400 I COMMAND [conn52] CMD: drop db69.tmp.mr.coll69_241 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.558-0400 m31200| 2015-07-09T14:17:02.557-0400 I COMMAND [conn32] CMD: drop db69.tmp.mr.coll69_243 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.558-0400 m31100| 2015-07-09T14:17:02.558-0400 I COMMAND [conn49] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll69_385 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.562-0400 m31101| 2015-07-09T14:17:02.562-0400 I COMMAND [repl writer worker 5] CMD: drop db69.tmp.mrs.coll69_1436465821_125 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.567-0400 m31100| 2015-07-09T14:17:02.567-0400 I COMMAND [conn49] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll69_385 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.568-0400 m31100| 2015-07-09T14:17:02.567-0400 I COMMAND [conn49] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll69_385 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.568-0400 m31100| 2015-07-09T14:17:02.567-0400 I COMMAND [conn49] command map_reduce_merge_nonatomic4.coll69 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.568-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.568-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.568-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.568-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.568-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.570-0400 m31100| }, out: { merge: "coll69", db: "map_reduce_merge_nonatomic4", nonAtomic: true } }, inputDB: "db69", shardedOutputCollection: "tmp.mrs.coll69_1436465821_126", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll69_1436465821_126", timeMillis: 992, counts: { input: 1015, emit: 1015, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465822000|134, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll69_1436465821_126", timeMillis: 623, counts: { input: 985, emit: 985, reduce: 79, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465822000|71, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1015, emit: 1015, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 985, emit: 985, reduce: 79, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 159 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:303 locks:{ Global: { acquireCount: { r: 76, w: 68 } }, Database: { acquireCount: { r: 4, w: 44, W: 25 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 131ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.570-0400 m31100| 2015-07-09T14:17:02.568-0400 I COMMAND [conn32] CMD: drop db69.tmp.mrs.coll69_1436465821_126 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.572-0400 m31102| 2015-07-09T14:17:02.569-0400 I COMMAND [repl writer worker 5] CMD: drop db69.tmp.mrs.coll69_1436465821_125 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.573-0400 m31102| 2015-07-09T14:17:02.573-0400 I COMMAND [repl writer worker 13] CMD: drop db69.tmp.mrs.coll69_1436465821_107 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.575-0400 m31100| 2015-07-09T14:17:02.575-0400 I COMMAND [conn191] CMD: drop db69.tmp.mr.coll69_389 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.576-0400 m31101| 2015-07-09T14:17:02.576-0400 I COMMAND [repl writer worker 1] CMD: drop db69.tmp.mrs.coll69_1436465821_107 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.581-0400 m31102| 2015-07-09T14:17:02.580-0400 I COMMAND [repl writer worker 10] CMD: drop db69.tmp.mrs.coll69_1436465821_108 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.581-0400 m31200| 2015-07-09T14:17:02.581-0400 I COMMAND [conn65] CMD: drop db69.tmp.mrs.coll69_1436465821_126 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.586-0400 m31202| 2015-07-09T14:17:02.585-0400 I COMMAND [repl writer worker 6] CMD: drop db69.tmp.mrs.coll69_1436465821_108 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.594-0400 m31201| 2015-07-09T14:17:02.594-0400 I COMMAND [repl writer worker 5] CMD: drop db69.tmp.mrs.coll69_1436465821_108 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.596-0400 m31202| 2015-07-09T14:17:02.596-0400 I COMMAND [repl writer worker 9] CMD: drop db69.tmp.mrs.coll69_1436465821_126 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.596-0400 m31201| 2015-07-09T14:17:02.596-0400 I COMMAND [repl writer worker 7] CMD: drop db69.tmp.mrs.coll69_1436465821_126 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.597-0400 m31101| 2015-07-09T14:17:02.596-0400 I COMMAND [repl writer worker 8] CMD: drop db69.tmp.mrs.coll69_1436465821_108 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.598-0400 m31102| 2015-07-09T14:17:02.598-0400 I COMMAND [repl writer worker 8] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll69_385 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.601-0400 m31102| 2015-07-09T14:17:02.601-0400 I COMMAND [repl writer worker 13] CMD: drop db69.tmp.mrs.coll69_1436465821_126 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.609-0400 m31101| 2015-07-09T14:17:02.609-0400 I COMMAND [repl writer worker 4] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll69_385 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.611-0400 m31101| 2015-07-09T14:17:02.610-0400 I COMMAND [repl writer worker 1] CMD: drop db69.tmp.mrs.coll69_1436465821_126 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.637-0400 m31100| 2015-07-09T14:17:02.636-0400 I COMMAND [conn49] CMD: drop db69.tmp.mr.coll69_390 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.649-0400 m31200| 2015-07-09T14:17:02.648-0400 I COMMAND [conn80] CMD: drop db69.tmp.mr.coll69_244 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.950-0400 m31200| 2015-07-09T14:17:02.948-0400 I COMMAND [conn37] CMD: drop db69.tmp.mrs.coll69_1436465822_127 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.954-0400 m31200| 2015-07-09T14:17:02.953-0400 I COMMAND [conn37] CMD: drop db69.tmp.mr.coll69_240 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.954-0400 m31200| 2015-07-09T14:17:02.954-0400 I COMMAND [conn37] CMD: drop db69.tmp.mr.coll69_240 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.956-0400 m31200| 2015-07-09T14:17:02.956-0400 I COMMAND [conn37] CMD: drop db69.tmp.mr.coll69_240 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.960-0400 m31200| 2015-07-09T14:17:02.959-0400 I COMMAND [conn37] command db69.tmp.mrs.coll69_1436465822_127 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.960-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.960-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.960-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.961-0400 m31200| values...., out: "tmp.mrs.coll69_1436465822_127", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:213 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 180 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { w: 22, R: 3, W: 4 }, timeAcquiringMicros: { w: 260160, R: 41674, W: 3689 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 510ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.983-0400 m31200| 2015-07-09T14:17:02.982-0400 I COMMAND [conn32] CMD: drop db69.tmp.mrs.coll69_1436465822_110 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.988-0400 m31200| 2015-07-09T14:17:02.988-0400 I COMMAND [conn32] CMD: drop db69.tmp.mr.coll69_243 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.988-0400 m31200| 2015-07-09T14:17:02.988-0400 I COMMAND [conn32] CMD: drop db69.tmp.mr.coll69_243 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.989-0400 m31200| 2015-07-09T14:17:02.988-0400 I COMMAND [conn52] CMD: drop db69.tmp.mrs.coll69_1436465822_128 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.994-0400 m31200| 2015-07-09T14:17:02.993-0400 I COMMAND [conn52] CMD: drop db69.tmp.mr.coll69_241 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.994-0400 m31200| 2015-07-09T14:17:02.994-0400 I COMMAND [conn52] CMD: drop db69.tmp.mr.coll69_241 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.995-0400 m31200| 2015-07-09T14:17:02.994-0400 I COMMAND [conn52] CMD: drop db69.tmp.mr.coll69_241 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.997-0400 m31200| 2015-07-09T14:17:02.996-0400 I COMMAND [conn32] CMD: drop db69.tmp.mr.coll69_243 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.997-0400 m31200| 2015-07-09T14:17:02.996-0400 I COMMAND [conn52] command db69.tmp.mrs.coll69_1436465822_128 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.997-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.997-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.997-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.998-0400 m31200| values...., out: "tmp.mrs.coll69_1436465822_128", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:3 reslen:213 locks:{ Global: { acquireCount: { r: 157, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 89, w: 5881, W: 5750 } }, Database: { acquireCount: { r: 26, w: 66, R: 14, W: 11 }, acquireWaitCount: { r: 1, w: 10, R: 14, W: 9 }, timeAcquiringMicros: { r: 5994, w: 66344, R: 73526, W: 15559 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 463ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:02.998-0400 m31200| 2015-07-09T14:17:02.997-0400 I COMMAND [conn41] CMD: drop db69.tmp.mrs.coll69_1436465822_109 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.001-0400 m31200| 2015-07-09T14:17:02.999-0400 I COMMAND [conn41] CMD: drop db69.tmp.mr.coll69_242 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.001-0400 m31200| 2015-07-09T14:17:02.999-0400 I COMMAND [conn41] CMD: drop db69.tmp.mr.coll69_242 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.002-0400 m31200| 2015-07-09T14:17:02.999-0400 I COMMAND [conn32] command db69.tmp.mrs.coll69_1436465822_110 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.002-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.002-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.003-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.003-0400 m31200| values...., out: "tmp.mrs.coll69_1436465822_110", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:3 reslen:213 locks:{ Global: { acquireCount: { r: 157, w: 74, W: 3 }, acquireWaitCount: { w: 3, W: 1 }, timeAcquiringMicros: { w: 15206, W: 130 } }, Database: { acquireCount: { r: 26, w: 66, R: 14, W: 11 }, acquireWaitCount: { r: 2, w: 7, R: 14, W: 6 }, timeAcquiringMicros: { r: 11262, w: 56362, R: 42504, W: 53961 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 443ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.004-0400 m31200| 2015-07-09T14:17:03.000-0400 I COMMAND [conn41] CMD: drop db69.tmp.mr.coll69_242 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.004-0400 m31200| 2015-07-09T14:17:03.001-0400 I COMMAND [conn41] command db69.tmp.mrs.coll69_1436465822_109 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.004-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.005-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.005-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.006-0400 m31200| values...., out: "tmp.mrs.coll69_1436465822_109", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:213 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 16943, W: 1165 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 3, w: 15, R: 11, W: 5 }, timeAcquiringMicros: { r: 9269, w: 50262, R: 83527, W: 38800 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 457ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.008-0400 m31200| 2015-07-09T14:17:03.008-0400 I COMMAND [conn80] CMD: drop db69.tmp.mrs.coll69_1436465822_129 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.014-0400 m31200| 2015-07-09T14:17:03.013-0400 I COMMAND [conn80] CMD: drop db69.tmp.mr.coll69_244 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.015-0400 m31200| 2015-07-09T14:17:03.013-0400 I COMMAND [conn80] CMD: drop db69.tmp.mr.coll69_244 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.015-0400 m31200| 2015-07-09T14:17:03.013-0400 I COMMAND [conn80] CMD: drop db69.tmp.mr.coll69_244 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.015-0400 m31200| 2015-07-09T14:17:03.014-0400 I COMMAND [conn80] command db69.tmp.mrs.coll69_1436465822_129 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.017-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.017-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.017-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.018-0400 m31200| values...., out: "tmp.mrs.coll69_1436465822_129", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:213 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { w: 2 }, timeAcquiringMicros: { w: 16269 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 1, w: 6, R: 13, W: 5 }, timeAcquiringMicros: { r: 29225, w: 38444, R: 48578, W: 76464 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 423ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.157-0400 m31100| 2015-07-09T14:17:03.157-0400 I COMMAND [conn177] CMD: drop db69.tmp.mrs.coll69_1436465822_127 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.170-0400 m31100| 2015-07-09T14:17:03.170-0400 I COMMAND [conn177] CMD: drop db69.tmp.mr.coll69_386 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.172-0400 m31100| 2015-07-09T14:17:03.171-0400 I COMMAND [conn177] CMD: drop db69.tmp.mr.coll69_386 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.180-0400 m31100| 2015-07-09T14:17:03.179-0400 I COMMAND [conn177] CMD: drop db69.tmp.mr.coll69_386 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.200-0400 m31100| 2015-07-09T14:17:03.199-0400 I COMMAND [conn177] command db69.tmp.mrs.coll69_1436465822_127 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.200-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.200-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.200-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.202-0400 m31100| values...., out: "tmp.mrs.coll69_1436465822_127", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:213 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 271 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { w: 21, R: 11, W: 4 }, timeAcquiringMicros: { w: 318390, R: 112091, W: 24094 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 747ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.202-0400 m31100| 2015-07-09T14:17:03.200-0400 I COMMAND [conn177] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll69_391 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.215-0400 m31100| 2015-07-09T14:17:03.215-0400 I COMMAND [conn185] CMD: drop db69.tmp.mrs.coll69_1436465822_128 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.220-0400 m31100| 2015-07-09T14:17:03.220-0400 I COMMAND [conn185] CMD: drop db69.tmp.mr.coll69_387 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.222-0400 m31100| 2015-07-09T14:17:03.220-0400 I COMMAND [conn185] CMD: drop db69.tmp.mr.coll69_387 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.223-0400 m31100| 2015-07-09T14:17:03.221-0400 I COMMAND [conn185] CMD: drop db69.tmp.mr.coll69_387 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.223-0400 m31100| 2015-07-09T14:17:03.222-0400 I COMMAND [conn185] command db69.tmp.mrs.coll69_1436465822_128 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.224-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.224-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.224-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.225-0400 m31100| values...., out: "tmp.mrs.coll69_1436465822_128", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:4 reslen:213 locks:{ Global: { acquireCount: { r: 161, w: 74, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 12923, W: 608 } }, Database: { acquireCount: { r: 26, w: 66, R: 16, W: 11 }, acquireWaitCount: { r: 3, w: 16, R: 15, W: 6 }, timeAcquiringMicros: { r: 15749, w: 160292, R: 113794, W: 37775 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 689ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.225-0400 m31100| 2015-07-09T14:17:03.225-0400 I COMMAND [conn185] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll69_392 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.261-0400 m31100| 2015-07-09T14:17:03.260-0400 I COMMAND [conn179] CMD: drop db69.tmp.mrs.coll69_1436465822_109 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.272-0400 m31100| 2015-07-09T14:17:03.271-0400 I COMMAND [conn179] CMD: drop db69.tmp.mr.coll69_388 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.273-0400 m31100| 2015-07-09T14:17:03.273-0400 I COMMAND [conn179] CMD: drop db69.tmp.mr.coll69_388 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.276-0400 m31100| 2015-07-09T14:17:03.275-0400 I COMMAND [conn179] CMD: drop db69.tmp.mr.coll69_388 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.277-0400 m31100| 2015-07-09T14:17:03.276-0400 I COMMAND [conn49] CMD: drop db69.tmp.mrs.coll69_1436465822_129 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.282-0400 m31100| 2015-07-09T14:17:03.282-0400 I COMMAND [conn49] CMD: drop db69.tmp.mr.coll69_390 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.283-0400 m31100| 2015-07-09T14:17:03.282-0400 I COMMAND [conn49] CMD: drop db69.tmp.mr.coll69_390 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.286-0400 m31100| 2015-07-09T14:17:03.286-0400 I COMMAND [conn49] CMD: drop db69.tmp.mr.coll69_390 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.288-0400 m31100| 2015-07-09T14:17:03.286-0400 I COMMAND [conn179] command db69.tmp.mrs.coll69_1436465822_109 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.288-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.288-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.288-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.289-0400 m31100| values...., out: "tmp.mrs.coll69_1436465822_109", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:10 reslen:213 locks:{ Global: { acquireCount: { r: 173, w: 74, W: 3 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 13159, W: 255 } }, Database: { acquireCount: { r: 26, w: 66, R: 22, W: 11 }, acquireWaitCount: { w: 8, R: 22, W: 9 }, timeAcquiringMicros: { w: 48071, R: 139585, W: 75377 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 743ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.289-0400 m31100| 2015-07-09T14:17:03.287-0400 I COMMAND [conn49] command db69.tmp.mrs.coll69_1436465822_129 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.290-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.290-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.290-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.291-0400 m31100| values...., out: "tmp.mrs.coll69_1436465822_129", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:8 reslen:213 locks:{ Global: { acquireCount: { r: 169, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 11700, w: 5698, W: 1856 } }, Database: { acquireCount: { r: 26, w: 66, R: 20, W: 11 }, acquireWaitCount: { r: 2, w: 7, R: 20, W: 9 }, timeAcquiringMicros: { r: 34992, w: 78527, R: 61955, W: 84143 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 696ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.291-0400 m31100| 2015-07-09T14:17:03.290-0400 I COMMAND [conn49] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll69_393 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.294-0400 m31100| 2015-07-09T14:17:03.293-0400 I COMMAND [conn179] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll69_394 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.327-0400 m31100| 2015-07-09T14:17:03.327-0400 I COMMAND [conn191] CMD: drop db69.tmp.mrs.coll69_1436465822_110 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.331-0400 m31100| 2015-07-09T14:17:03.331-0400 I COMMAND [conn191] CMD: drop db69.tmp.mr.coll69_389 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.332-0400 m31100| 2015-07-09T14:17:03.332-0400 I COMMAND [conn191] CMD: drop db69.tmp.mr.coll69_389 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.333-0400 m31100| 2015-07-09T14:17:03.333-0400 I COMMAND [conn177] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll69_391 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.342-0400 m31100| 2015-07-09T14:17:03.338-0400 I COMMAND [conn177] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll69_391 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.343-0400 m31100| 2015-07-09T14:17:03.338-0400 I COMMAND [conn177] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll69_391 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.343-0400 m31100| 2015-07-09T14:17:03.339-0400 I COMMAND [conn191] CMD: drop db69.tmp.mr.coll69_389 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.344-0400 m31100| 2015-07-09T14:17:03.340-0400 I COMMAND [conn185] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll69_392 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.345-0400 m31100| 2015-07-09T14:17:03.341-0400 I COMMAND [conn177] command map_reduce_merge_nonatomic0.coll69 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.345-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.345-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.345-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.346-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.346-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.348-0400 m31100| }, out: { merge: "coll69", db: "map_reduce_merge_nonatomic0", nonAtomic: true } }, inputDB: "db69", shardedOutputCollection: "tmp.mrs.coll69_1436465822_127", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll69_1436465822_127", timeMillis: 718, counts: { input: 1015, emit: 1015, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465823000|35, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll69_1436465822_127", timeMillis: 504, counts: { input: 985, emit: 985, reduce: 79, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465822000|109, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1015, emit: 1015, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 985, emit: 985, reduce: 79, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 159 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:303 locks:{ Global: { acquireCount: { r: 76, w: 68 }, acquireWaitCount: { w: 4 }, timeAcquiringMicros: { w: 52902 } }, Database: { acquireCount: { r: 4, w: 44, W: 25 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 140ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.348-0400 m31100| 2015-07-09T14:17:03.342-0400 I COMMAND [conn32] CMD: drop db69.tmp.mrs.coll69_1436465822_127 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.349-0400 m31100| 2015-07-09T14:17:03.342-0400 I COMMAND [conn191] command db69.tmp.mrs.coll69_1436465822_110 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.349-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.349-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.350-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.350-0400 m31100| values...., out: "tmp.mrs.coll69_1436465822_110", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:10 reslen:213 locks:{ Global: { acquireCount: { r: 173, w: 74, W: 3 }, acquireWaitCount: { r: 2, W: 1 }, timeAcquiringMicros: { r: 17751, W: 31274 } }, Database: { acquireCount: { r: 26, w: 66, R: 22, W: 11 }, acquireWaitCount: { r: 4, w: 6, R: 22, W: 6 }, timeAcquiringMicros: { r: 14833, w: 60444, R: 109343, W: 81146 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 786ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.351-0400 m31100| 2015-07-09T14:17:03.342-0400 I COMMAND [conn185] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll69_392 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.351-0400 m31100| 2015-07-09T14:17:03.342-0400 I COMMAND [conn185] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll69_392 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.351-0400 m31200| 2015-07-09T14:17:03.348-0400 I COMMAND [conn65] CMD: drop db69.tmp.mrs.coll69_1436465822_127 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.353-0400 m31100| 2015-07-09T14:17:03.351-0400 I COMMAND [conn185] command map_reduce_merge_nonatomic2.coll69 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.353-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.353-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.354-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.354-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.354-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.355-0400 m31100| }, out: { merge: "coll69", db: "map_reduce_merge_nonatomic2", nonAtomic: true } }, inputDB: "db69", shardedOutputCollection: "tmp.mrs.coll69_1436465822_128", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll69_1436465822_128", timeMillis: 687, counts: { input: 1015, emit: 1015, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465823000|43, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll69_1436465822_128", timeMillis: 461, counts: { input: 985, emit: 985, reduce: 79, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465822000|169, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1015, emit: 1015, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 985, emit: 985, reduce: 79, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 159 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:303 locks:{ Global: { acquireCount: { r: 76, w: 68 }, acquireWaitCount: { w: 4 }, timeAcquiringMicros: { w: 49134 } }, Database: { acquireCount: { r: 4, w: 44, W: 25 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 126ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.356-0400 m31201| 2015-07-09T14:17:03.352-0400 I COMMAND [repl writer worker 2] CMD: drop db69.tmp.mrs.coll69_1436465822_127 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.357-0400 m31202| 2015-07-09T14:17:03.352-0400 I COMMAND [repl writer worker 6] CMD: drop db69.tmp.mrs.coll69_1436465822_127 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.357-0400 m31100| 2015-07-09T14:17:03.353-0400 I COMMAND [conn191] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll69_395 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.358-0400 m31100| 2015-07-09T14:17:03.353-0400 I COMMAND [conn32] CMD: drop db69.tmp.mrs.coll69_1436465822_128 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.358-0400 m31200| 2015-07-09T14:17:03.357-0400 I COMMAND [conn37] CMD: drop db69.tmp.mr.coll69_245 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.372-0400 m31200| 2015-07-09T14:17:03.372-0400 I COMMAND [conn65] CMD: drop db69.tmp.mrs.coll69_1436465822_128 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.378-0400 m31100| 2015-07-09T14:17:03.377-0400 I COMMAND [conn177] CMD: drop db69.tmp.mr.coll69_396 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.386-0400 m31202| 2015-07-09T14:17:03.385-0400 I COMMAND [repl writer worker 14] CMD: drop db69.tmp.mrs.coll69_1436465822_128 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.386-0400 m31201| 2015-07-09T14:17:03.385-0400 I COMMAND [repl writer worker 6] CMD: drop db69.tmp.mrs.coll69_1436465822_128 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.387-0400 m31102| 2015-07-09T14:17:03.387-0400 I COMMAND [repl writer worker 6] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll69_391 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.390-0400 m31101| 2015-07-09T14:17:03.390-0400 I COMMAND [repl writer worker 14] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll69_391 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.397-0400 m31102| 2015-07-09T14:17:03.397-0400 I COMMAND [repl writer worker 12] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll69_392 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.399-0400 m31100| 2015-07-09T14:17:03.399-0400 I COMMAND [conn179] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll69_394 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.400-0400 m31200| 2015-07-09T14:17:03.399-0400 I COMMAND [conn52] CMD: drop db69.tmp.mr.coll69_246 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.404-0400 m31102| 2015-07-09T14:17:03.404-0400 I COMMAND [repl writer worker 14] CMD: drop db69.tmp.mrs.coll69_1436465822_127 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.406-0400 m31100| 2015-07-09T14:17:03.406-0400 I COMMAND [conn49] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll69_393 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.408-0400 m31100| 2015-07-09T14:17:03.407-0400 I COMMAND [conn185] CMD: drop db69.tmp.mr.coll69_397 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.411-0400 m31101| 2015-07-09T14:17:03.410-0400 I COMMAND [repl writer worker 7] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll69_392 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.411-0400 m31102| 2015-07-09T14:17:03.411-0400 I COMMAND [repl writer worker 11] CMD: drop db69.tmp.mrs.coll69_1436465822_128 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.413-0400 m31101| 2015-07-09T14:17:03.413-0400 I COMMAND [repl writer worker 6] CMD: drop db69.tmp.mrs.coll69_1436465822_127 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.427-0400 m31100| 2015-07-09T14:17:03.424-0400 I COMMAND [conn179] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll69_394 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.428-0400 m31100| 2015-07-09T14:17:03.424-0400 I COMMAND [conn49] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll69_393 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.428-0400 m31100| 2015-07-09T14:17:03.425-0400 I COMMAND [conn179] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll69_394 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.428-0400 m31100| 2015-07-09T14:17:03.425-0400 I COMMAND [conn49] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll69_393 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.428-0400 m31100| 2015-07-09T14:17:03.426-0400 I COMMAND [conn179] command map_reduce_merge_nonatomic3.coll69 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.428-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.429-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.429-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.429-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.429-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.430-0400 m31100| }, out: { merge: "coll69", db: "map_reduce_merge_nonatomic3", nonAtomic: true } }, inputDB: "db69", shardedOutputCollection: "tmp.mrs.coll69_1436465822_109", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll69_1436465822_109", timeMillis: 728, counts: { input: 1015, emit: 1015, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465823000|106, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll69_1436465822_109", timeMillis: 456, counts: { input: 985, emit: 985, reduce: 79, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465822000|172, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1015, emit: 1015, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 985, emit: 985, reduce: 79, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 159 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:303 locks:{ Global: { acquireCount: { r: 76, w: 68 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 1150 } }, Database: { acquireCount: { r: 4, w: 44, W: 25 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 134ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.431-0400 m31100| 2015-07-09T14:17:03.426-0400 I COMMAND [conn49] command map_reduce_merge_nonatomic4.coll69 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.431-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.431-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.431-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.431-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.431-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.433-0400 m31100| }, out: { merge: "coll69", db: "map_reduce_merge_nonatomic4", nonAtomic: true } }, inputDB: "db69", shardedOutputCollection: "tmp.mrs.coll69_1436465822_129", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll69_1436465822_129", timeMillis: 692, counts: { input: 1015, emit: 1015, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465823000|113, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll69_1436465822_129", timeMillis: 422, counts: { input: 985, emit: 985, reduce: 79, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465823000|21, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1015, emit: 1015, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 985, emit: 985, reduce: 79, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 159 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:303 locks:{ Global: { acquireCount: { r: 76, w: 68 } }, Database: { acquireCount: { r: 4, w: 44, W: 25 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 138ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.433-0400 m31100| 2015-07-09T14:17:03.428-0400 I COMMAND [conn38] CMD: drop db69.tmp.mrs.coll69_1436465822_109 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.433-0400 m31100| 2015-07-09T14:17:03.428-0400 I COMMAND [conn32] CMD: drop db69.tmp.mrs.coll69_1436465822_129 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.434-0400 m31101| 2015-07-09T14:17:03.434-0400 I COMMAND [repl writer worker 0] CMD: drop db69.tmp.mrs.coll69_1436465822_128 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.440-0400 m31200| 2015-07-09T14:17:03.440-0400 I COMMAND [conn64] CMD: drop db69.tmp.mrs.coll69_1436465822_109 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.451-0400 m31200| 2015-07-09T14:17:03.449-0400 I COMMAND [conn65] CMD: drop db69.tmp.mrs.coll69_1436465822_129 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.454-0400 m31100| 2015-07-09T14:17:03.453-0400 I COMMAND [conn191] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll69_395 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.455-0400 m31202| 2015-07-09T14:17:03.454-0400 I COMMAND [repl writer worker 9] CMD: drop db69.tmp.mrs.coll69_1436465822_109 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.455-0400 m31201| 2015-07-09T14:17:03.454-0400 I COMMAND [repl writer worker 13] CMD: drop db69.tmp.mrs.coll69_1436465822_109 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.459-0400 m31102| 2015-07-09T14:17:03.459-0400 I COMMAND [repl writer worker 8] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll69_394 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.459-0400 m31202| 2015-07-09T14:17:03.459-0400 I COMMAND [repl writer worker 7] CMD: drop db69.tmp.mrs.coll69_1436465822_129 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.462-0400 m31102| 2015-07-09T14:17:03.461-0400 I COMMAND [repl writer worker 12] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll69_393 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.462-0400 m31201| 2015-07-09T14:17:03.462-0400 I COMMAND [repl writer worker 14] CMD: drop db69.tmp.mrs.coll69_1436465822_129 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.465-0400 m31200| 2015-07-09T14:17:03.465-0400 I COMMAND [conn41] CMD: drop db69.tmp.mr.coll69_247 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.467-0400 m31100| 2015-07-09T14:17:03.466-0400 I COMMAND [conn191] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll69_395 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.468-0400 m31100| 2015-07-09T14:17:03.467-0400 I COMMAND [conn191] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll69_395 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.469-0400 m31100| 2015-07-09T14:17:03.467-0400 I COMMAND [conn191] command map_reduce_merge_nonatomic1.coll69 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.469-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.469-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.470-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.470-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.470-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.472-0400 m31100| }, out: { merge: "coll69", db: "map_reduce_merge_nonatomic1", nonAtomic: true } }, inputDB: "db69", shardedOutputCollection: "tmp.mrs.coll69_1436465822_110", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll69_1436465822_110", timeMillis: 775, counts: { input: 1015, emit: 1015, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465823000|145, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll69_1436465822_110", timeMillis: 432, counts: { input: 985, emit: 985, reduce: 79, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465822000|168, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1015, emit: 1015, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 985, emit: 985, reduce: 79, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 159 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:303 locks:{ Global: { acquireCount: { r: 76, w: 68 } }, Database: { acquireCount: { r: 4, w: 44, W: 25 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 124ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.472-0400 m31100| 2015-07-09T14:17:03.468-0400 I COMMAND [conn38] CMD: drop db69.tmp.mrs.coll69_1436465822_110 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.479-0400 m31101| 2015-07-09T14:17:03.478-0400 I COMMAND [repl writer worker 2] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll69_394 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.481-0400 m31101| 2015-07-09T14:17:03.480-0400 I COMMAND [repl writer worker 10] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll69_393 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.481-0400 m31100| 2015-07-09T14:17:03.481-0400 I COMMAND [conn179] CMD: drop db69.tmp.mr.coll69_398 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.488-0400 m31200| 2015-07-09T14:17:03.487-0400 I COMMAND [conn64] CMD: drop db69.tmp.mrs.coll69_1436465822_110 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.489-0400 m31100| 2015-07-09T14:17:03.488-0400 I COMMAND [conn49] CMD: drop db69.tmp.mr.coll69_399 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.489-0400 m31200| 2015-07-09T14:17:03.488-0400 I COMMAND [conn80] CMD: drop db69.tmp.mr.coll69_248 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.502-0400 m31102| 2015-07-09T14:17:03.502-0400 I COMMAND [repl writer worker 1] CMD: drop db69.tmp.mrs.coll69_1436465822_109 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.508-0400 m31201| 2015-07-09T14:17:03.508-0400 I COMMAND [repl writer worker 1] CMD: drop db69.tmp.mrs.coll69_1436465822_110 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.511-0400 m31202| 2015-07-09T14:17:03.510-0400 I COMMAND [repl writer worker 15] CMD: drop db69.tmp.mrs.coll69_1436465822_110 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.517-0400 m31102| 2015-07-09T14:17:03.517-0400 I COMMAND [repl writer worker 5] CMD: drop db69.tmp.mrs.coll69_1436465822_129 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.520-0400 m31101| 2015-07-09T14:17:03.519-0400 I COMMAND [repl writer worker 5] CMD: drop db69.tmp.mrs.coll69_1436465822_109 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.524-0400 m31101| 2015-07-09T14:17:03.524-0400 I COMMAND [repl writer worker 12] CMD: drop db69.tmp.mrs.coll69_1436465822_129 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.525-0400 m31102| 2015-07-09T14:17:03.525-0400 I COMMAND [repl writer worker 6] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll69_395 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.527-0400 m31102| 2015-07-09T14:17:03.526-0400 I COMMAND [repl writer worker 0] CMD: drop db69.tmp.mrs.coll69_1436465822_110 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.529-0400 m31200| 2015-07-09T14:17:03.528-0400 I COMMAND [conn32] CMD: drop db69.tmp.mr.coll69_249 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.537-0400 m31101| 2015-07-09T14:17:03.535-0400 I COMMAND [repl writer worker 8] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll69_395 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.537-0400 m31101| 2015-07-09T14:17:03.537-0400 I COMMAND [repl writer worker 14] CMD: drop db69.tmp.mrs.coll69_1436465822_110 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.545-0400 m31100| 2015-07-09T14:17:03.544-0400 I COMMAND [conn191] CMD: drop db69.tmp.mr.coll69_400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.847-0400 m31200| 2015-07-09T14:17:03.847-0400 I COMMAND [conn37] CMD: drop db69.tmp.mrs.coll69_1436465823_130 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.850-0400 m31200| 2015-07-09T14:17:03.850-0400 I COMMAND [conn37] CMD: drop db69.tmp.mr.coll69_245 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.850-0400 m31200| 2015-07-09T14:17:03.850-0400 I COMMAND [conn37] CMD: drop db69.tmp.mr.coll69_245 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.852-0400 m31200| 2015-07-09T14:17:03.852-0400 I COMMAND [conn37] CMD: drop db69.tmp.mr.coll69_245 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.854-0400 m31200| 2015-07-09T14:17:03.853-0400 I COMMAND [conn37] command db69.tmp.mrs.coll69_1436465823_130 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.854-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.854-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.854-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.855-0400 m31200| values...., out: "tmp.mrs.coll69_1436465823_130", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:213 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 188 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { w: 21, R: 9, W: 4 }, timeAcquiringMicros: { w: 277291, R: 48855, W: 1332 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 496ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.857-0400 m31200| 2015-07-09T14:17:03.857-0400 I COMMAND [conn52] CMD: drop db69.tmp.mrs.coll69_1436465823_131 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.862-0400 m31200| 2015-07-09T14:17:03.862-0400 I COMMAND [conn52] CMD: drop db69.tmp.mr.coll69_246 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.863-0400 m31200| 2015-07-09T14:17:03.862-0400 I COMMAND [conn52] CMD: drop db69.tmp.mr.coll69_246 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.867-0400 m31200| 2015-07-09T14:17:03.864-0400 I COMMAND [conn52] CMD: drop db69.tmp.mr.coll69_246 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.868-0400 m31200| 2015-07-09T14:17:03.868-0400 I COMMAND [conn52] command db69.tmp.mrs.coll69_1436465823_131 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.868-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.869-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.869-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.869-0400 m31200| values...., out: "tmp.mrs.coll69_1436465823_131", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:3 reslen:213 locks:{ Global: { acquireCount: { r: 157, w: 74, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 3796, W: 292 } }, Database: { acquireCount: { r: 26, w: 66, R: 14, W: 11 }, acquireWaitCount: { r: 4, w: 9, R: 13, W: 8 }, timeAcquiringMicros: { r: 15698, w: 58283, R: 100145, W: 19272 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 482ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.878-0400 m31200| 2015-07-09T14:17:03.878-0400 I COMMAND [conn41] CMD: drop db69.tmp.mrs.coll69_1436465823_111 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.886-0400 m31200| 2015-07-09T14:17:03.885-0400 I COMMAND [conn41] CMD: drop db69.tmp.mr.coll69_247 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.886-0400 m31200| 2015-07-09T14:17:03.886-0400 I COMMAND [conn41] CMD: drop db69.tmp.mr.coll69_247 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.890-0400 m31200| 2015-07-09T14:17:03.889-0400 I COMMAND [conn41] CMD: drop db69.tmp.mr.coll69_247 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.891-0400 m31200| 2015-07-09T14:17:03.890-0400 I COMMAND [conn41] command db69.tmp.mrs.coll69_1436465823_111 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.891-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.892-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.892-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.892-0400 m31200| values...., out: "tmp.mrs.coll69_1436465823_111", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:213 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { w: 3, W: 1 }, timeAcquiringMicros: { w: 8802, W: 439 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 5, w: 10, R: 13, W: 8 }, timeAcquiringMicros: { r: 5975, w: 38583, R: 85353, W: 66077 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 436ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.893-0400 m31200| 2015-07-09T14:17:03.893-0400 I COMMAND [conn32] CMD: drop db69.tmp.mrs.coll69_1436465823_112 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.898-0400 m31200| 2015-07-09T14:17:03.897-0400 I COMMAND [conn32] CMD: drop db69.tmp.mr.coll69_249 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.898-0400 m31200| 2015-07-09T14:17:03.897-0400 I COMMAND [conn32] CMD: drop db69.tmp.mr.coll69_249 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.900-0400 m31200| 2015-07-09T14:17:03.899-0400 I COMMAND [conn32] CMD: drop db69.tmp.mr.coll69_249 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.901-0400 m31200| 2015-07-09T14:17:03.900-0400 I COMMAND [conn32] command db69.tmp.mrs.coll69_1436465823_112 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.901-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.901-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.901-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.902-0400 m31200| values...., out: "tmp.mrs.coll69_1436465823_112", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:213 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 2, W: 1 }, timeAcquiringMicros: { r: 8523, w: 9796, W: 61 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 6, w: 11, R: 13, W: 9 }, timeAcquiringMicros: { r: 6427, w: 33329, R: 39073, W: 79363 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 394ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.903-0400 m31200| 2015-07-09T14:17:03.902-0400 I COMMAND [conn80] CMD: drop db69.tmp.mrs.coll69_1436465823_132 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.912-0400 m31200| 2015-07-09T14:17:03.911-0400 I COMMAND [conn80] CMD: drop db69.tmp.mr.coll69_248 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.912-0400 m31200| 2015-07-09T14:17:03.912-0400 I COMMAND [conn80] CMD: drop db69.tmp.mr.coll69_248 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.913-0400 m31200| 2015-07-09T14:17:03.913-0400 I COMMAND [conn80] CMD: drop db69.tmp.mr.coll69_248 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.913-0400 m31200| 2015-07-09T14:17:03.913-0400 I COMMAND [conn80] command db69.tmp.mrs.coll69_1436465823_132 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.914-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.914-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.914-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:03.916-0400 m31200| values...., out: "tmp.mrs.coll69_1436465823_132", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:4 reslen:213 locks:{ Global: { acquireCount: { r: 159, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 2 }, timeAcquiringMicros: { r: 8594, w: 10688 } }, Database: { acquireCount: { r: 26, w: 66, R: 15, W: 11 }, acquireWaitCount: { r: 6, w: 13, R: 14, W: 5 }, timeAcquiringMicros: { r: 22142, w: 37712, R: 56020, W: 77426 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 446ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.364-0400 m31100| 2015-07-09T14:17:04.363-0400 I COMMAND [conn177] CMD: drop db69.tmp.mrs.coll69_1436465823_130 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.369-0400 m31100| 2015-07-09T14:17:04.367-0400 I COMMAND [conn177] CMD: drop db69.tmp.mr.coll69_396 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.369-0400 m31100| 2015-07-09T14:17:04.368-0400 I COMMAND [conn177] CMD: drop db69.tmp.mr.coll69_396 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.370-0400 m31100| 2015-07-09T14:17:04.370-0400 I COMMAND [conn177] CMD: drop db69.tmp.mr.coll69_396 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.372-0400 m31100| 2015-07-09T14:17:04.371-0400 I COMMAND [conn177] command db69.tmp.mrs.coll69_1436465823_130 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.372-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.372-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.372-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.372-0400 m31100| values...., out: "tmp.mrs.coll69_1436465823_130", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:8 reslen:213 locks:{ Global: { acquireCount: { r: 169, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 564 } }, Database: { acquireCount: { r: 26, w: 66, R: 20, W: 11 }, acquireWaitCount: { r: 2, w: 27, R: 12, W: 5 }, timeAcquiringMicros: { r: 16126, w: 417780, R: 146661, W: 1606 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 1015ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.376-0400 m31100| 2015-07-09T14:17:04.375-0400 I COMMAND [conn177] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll69_401 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.376-0400 m31100| 2015-07-09T14:17:04.376-0400 I COMMAND [conn185] CMD: drop db69.tmp.mrs.coll69_1436465823_131 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.381-0400 m31100| 2015-07-09T14:17:04.381-0400 I COMMAND [conn185] CMD: drop db69.tmp.mr.coll69_397 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.382-0400 m31100| 2015-07-09T14:17:04.381-0400 I COMMAND [conn185] CMD: drop db69.tmp.mr.coll69_397 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.399-0400 m31100| 2015-07-09T14:17:04.398-0400 I COMMAND [conn185] CMD: drop db69.tmp.mr.coll69_397 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.404-0400 m31100| 2015-07-09T14:17:04.404-0400 I COMMAND [conn185] command db69.tmp.mrs.coll69_1436465823_131 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.404-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.405-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.405-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.406-0400 m31100| values...., out: "tmp.mrs.coll69_1436465823_131", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:16 reslen:213 locks:{ Global: { acquireCount: { r: 185, w: 74, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 4925, W: 516 } }, Database: { acquireCount: { r: 26, w: 66, R: 28, W: 11 }, acquireWaitCount: { r: 3, w: 16, R: 16, W: 9 }, timeAcquiringMicros: { r: 22472, w: 159002, R: 110147, W: 53083 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 1018ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.406-0400 m31100| 2015-07-09T14:17:04.405-0400 I COMMAND [conn185] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll69_402 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.425-0400 m31100| 2015-07-09T14:17:04.425-0400 I COMMAND [conn179] CMD: drop db69.tmp.mrs.coll69_1436465823_111 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.431-0400 m31100| 2015-07-09T14:17:04.431-0400 I COMMAND [conn179] CMD: drop db69.tmp.mr.coll69_398 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.431-0400 m31100| 2015-07-09T14:17:04.431-0400 I COMMAND [conn179] CMD: drop db69.tmp.mr.coll69_398 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.433-0400 m31100| 2015-07-09T14:17:04.433-0400 I COMMAND [conn179] CMD: drop db69.tmp.mr.coll69_398 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.437-0400 m31100| 2015-07-09T14:17:04.437-0400 I COMMAND [conn179] command db69.tmp.mrs.coll69_1436465823_111 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.438-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.438-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.438-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.439-0400 m31100| values...., out: "tmp.mrs.coll69_1436465823_111", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:23 reslen:213 locks:{ Global: { acquireCount: { r: 199, w: 74, W: 3 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 5601, W: 874 } }, Database: { acquireCount: { r: 26, w: 66, R: 35, W: 11 }, acquireWaitCount: { r: 3, w: 10, R: 24, W: 9 }, timeAcquiringMicros: { r: 68417, w: 57274, R: 79012, W: 35238 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 983ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.439-0400 m31100| 2015-07-09T14:17:04.439-0400 I COMMAND [conn179] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll69_403 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.452-0400 m31100| 2015-07-09T14:17:04.452-0400 I COMMAND [conn49] CMD: drop db69.tmp.mrs.coll69_1436465823_132 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.461-0400 m31100| 2015-07-09T14:17:04.461-0400 I COMMAND [conn49] CMD: drop db69.tmp.mr.coll69_399 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.462-0400 m31100| 2015-07-09T14:17:04.462-0400 I COMMAND [conn49] CMD: drop db69.tmp.mr.coll69_399 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.464-0400 m31100| 2015-07-09T14:17:04.463-0400 I COMMAND [conn191] CMD: drop db69.tmp.mrs.coll69_1436465823_112 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.471-0400 m31100| 2015-07-09T14:17:04.471-0400 I COMMAND [conn191] CMD: drop db69.tmp.mr.coll69_400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.472-0400 m31100| 2015-07-09T14:17:04.472-0400 I COMMAND [conn191] CMD: drop db69.tmp.mr.coll69_400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.474-0400 m31100| 2015-07-09T14:17:04.474-0400 I COMMAND [conn177] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll69_401 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.475-0400 m31100| 2015-07-09T14:17:04.474-0400 I COMMAND [conn191] CMD: drop db69.tmp.mr.coll69_400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.476-0400 m31100| 2015-07-09T14:17:04.474-0400 I COMMAND [conn49] CMD: drop db69.tmp.mr.coll69_399 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.476-0400 m31100| 2015-07-09T14:17:04.475-0400 I COMMAND [conn177] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll69_401 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.476-0400 m31100| 2015-07-09T14:17:04.475-0400 I COMMAND [conn177] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll69_401 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.479-0400 m31100| 2015-07-09T14:17:04.475-0400 I COMMAND [conn49] command db69.tmp.mrs.coll69_1436465823_132 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.479-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.479-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.480-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.481-0400 m31100| values...., out: "tmp.mrs.coll69_1436465823_132", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:25 reslen:213 locks:{ Global: { acquireCount: { r: 203, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 3, W: 1 }, timeAcquiringMicros: { r: 6305, w: 15406, W: 10373 } }, Database: { acquireCount: { r: 26, w: 66, R: 37, W: 11 }, acquireWaitCount: { r: 3, w: 13, R: 27, W: 5 }, timeAcquiringMicros: { r: 14492, w: 50604, R: 46617, W: 67885 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 1007ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.481-0400 m31100| 2015-07-09T14:17:04.475-0400 I COMMAND [conn177] command map_reduce_merge_nonatomic0.coll69 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.482-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.482-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.482-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.483-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.483-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.485-0400 m31100| }, out: { merge: "coll69", db: "map_reduce_merge_nonatomic0", nonAtomic: true } }, inputDB: "db69", shardedOutputCollection: "tmp.mrs.coll69_1436465823_130", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll69_1436465823_130", timeMillis: 1012, counts: { input: 1015, emit: 1015, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465824000|29, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll69_1436465823_130", timeMillis: 493, counts: { input: 985, emit: 985, reduce: 79, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465823000|67, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1015, emit: 1015, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 985, emit: 985, reduce: 79, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 159 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:303 locks:{ Global: { acquireCount: { r: 76, w: 68 }, acquireWaitCount: { r: 2, w: 5 }, timeAcquiringMicros: { r: 14382, w: 25845 } }, Database: { acquireCount: { r: 4, w: 44, W: 25 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 102ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.486-0400 m31100| 2015-07-09T14:17:04.475-0400 I COMMAND [conn191] command db69.tmp.mrs.coll69_1436465823_112 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.486-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.487-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.487-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.487-0400 m31100| values...., out: "tmp.mrs.coll69_1436465823_112", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:26 reslen:213 locks:{ Global: { acquireCount: { r: 205, w: 74, W: 3 }, acquireWaitCount: { r: 4, w: 1, W: 1 }, timeAcquiringMicros: { r: 30143, w: 358, W: 1095 } }, Database: { acquireCount: { r: 26, w: 66, R: 38, W: 11 }, acquireWaitCount: { r: 2, w: 7, R: 25, W: 5 }, timeAcquiringMicros: { r: 2628, w: 52865, R: 55485, W: 68736 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 969ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.488-0400 m31100| 2015-07-09T14:17:04.479-0400 I COMMAND [conn32] CMD: drop db69.tmp.mrs.coll69_1436465823_130 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.488-0400 m31100| 2015-07-09T14:17:04.479-0400 I COMMAND [conn49] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll69_404 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.488-0400 m31100| 2015-07-09T14:17:04.482-0400 I COMMAND [conn191] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll69_405 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.495-0400 m31100| 2015-07-09T14:17:04.495-0400 I COMMAND [conn185] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll69_402 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.506-0400 m31102| 2015-07-09T14:17:04.506-0400 I COMMAND [repl writer worker 0] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll69_401 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.508-0400 m31200| 2015-07-09T14:17:04.507-0400 I COMMAND [conn65] CMD: drop db69.tmp.mrs.coll69_1436465823_130 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.511-0400 m31101| 2015-07-09T14:17:04.510-0400 I COMMAND [repl writer worker 3] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll69_401 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.513-0400 m31201| 2015-07-09T14:17:04.511-0400 I COMMAND [repl writer worker 4] CMD: drop db69.tmp.mrs.coll69_1436465823_130 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.514-0400 m31202| 2015-07-09T14:17:04.514-0400 I COMMAND [repl writer worker 9] CMD: drop db69.tmp.mrs.coll69_1436465823_130 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.516-0400 m31100| 2015-07-09T14:17:04.515-0400 I COMMAND [conn179] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll69_403 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.517-0400 m31102| 2015-07-09T14:17:04.516-0400 I COMMAND [repl writer worker 5] CMD: drop db69.tmp.mrs.coll69_1436465823_130 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.517-0400 m31200| 2015-07-09T14:17:04.517-0400 I COMMAND [conn37] CMD: drop db69.tmp.mr.coll69_250 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.518-0400 m31101| 2015-07-09T14:17:04.518-0400 I COMMAND [repl writer worker 7] CMD: drop db69.tmp.mrs.coll69_1436465823_130 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.520-0400 m31100| 2015-07-09T14:17:04.519-0400 I COMMAND [conn177] CMD: drop db69.tmp.mr.coll69_406 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.520-0400 m31100| 2015-07-09T14:17:04.520-0400 I COMMAND [conn185] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll69_402 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.521-0400 m31100| 2015-07-09T14:17:04.521-0400 I COMMAND [conn185] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll69_402 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.522-0400 m31100| 2015-07-09T14:17:04.522-0400 I COMMAND [conn179] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll69_403 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.522-0400 m31100| 2015-07-09T14:17:04.522-0400 I COMMAND [conn179] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll69_403 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.526-0400 m31100| 2015-07-09T14:17:04.524-0400 I COMMAND [conn185] command map_reduce_merge_nonatomic2.coll69 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.527-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.527-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.527-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.527-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.528-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.529-0400 m31100| }, out: { merge: "coll69", db: "map_reduce_merge_nonatomic2", nonAtomic: true } }, inputDB: "db69", shardedOutputCollection: "tmp.mrs.coll69_1436465823_131", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll69_1436465823_131", timeMillis: 996, counts: { input: 1015, emit: 1015, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465824000|42, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll69_1436465823_131", timeMillis: 477, counts: { input: 985, emit: 985, reduce: 79, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465823000|73, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1015, emit: 1015, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 985, emit: 985, reduce: 79, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 159 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:303 locks:{ Global: { acquireCount: { r: 76, w: 68 }, acquireWaitCount: { w: 5 }, timeAcquiringMicros: { w: 33013 } }, Database: { acquireCount: { r: 4, w: 44, W: 25 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 119ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.529-0400 m31100| 2015-07-09T14:17:04.525-0400 I COMMAND [conn38] CMD: drop db69.tmp.mrs.coll69_1436465823_111 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.529-0400 m31100| 2015-07-09T14:17:04.526-0400 I COMMAND [conn32] CMD: drop db69.tmp.mrs.coll69_1436465823_131 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.533-0400 m31101| 2015-07-09T14:17:04.533-0400 I COMMAND [repl writer worker 13] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll69_402 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.537-0400 m31102| 2015-07-09T14:17:04.536-0400 I COMMAND [repl writer worker 14] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll69_402 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.552-0400 m31200| 2015-07-09T14:17:04.552-0400 I COMMAND [conn64] CMD: drop db69.tmp.mrs.coll69_1436465823_111 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.566-0400 m31200| 2015-07-09T14:17:04.566-0400 I COMMAND [conn65] CMD: drop db69.tmp.mrs.coll69_1436465823_131 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.585-0400 m31202| 2015-07-09T14:17:04.584-0400 I COMMAND [repl writer worker 6] CMD: drop db69.tmp.mrs.coll69_1436465823_111 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.586-0400 m31200| 2015-07-09T14:17:04.585-0400 I COMMAND [conn41] CMD: drop db69.tmp.mr.coll69_251 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.589-0400 m31201| 2015-07-09T14:17:04.588-0400 I COMMAND [repl writer worker 3] CMD: drop db69.tmp.mrs.coll69_1436465823_111 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.590-0400 m31100| 2015-07-09T14:17:04.589-0400 I COMMAND [conn49] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll69_404 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.590-0400 m30999| 2015-07-09T14:17:04.589-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:17:04.581-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30999:1436464534:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.590-0400 m31101| 2015-07-09T14:17:04.590-0400 I COMMAND [repl writer worker 15] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll69_403 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.591-0400 m31202| 2015-07-09T14:17:04.591-0400 I COMMAND [repl writer worker 4] CMD: drop db69.tmp.mrs.coll69_1436465823_131 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.592-0400 m31201| 2015-07-09T14:17:04.592-0400 I COMMAND [repl writer worker 12] CMD: drop db69.tmp.mrs.coll69_1436465823_131 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.592-0400 m31100| 2015-07-09T14:17:04.591-0400 I COMMAND [conn185] CMD: drop db69.tmp.mr.coll69_408 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.593-0400 m31100| 2015-07-09T14:17:04.593-0400 I COMMAND [conn49] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll69_404 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.593-0400 m31100| 2015-07-09T14:17:04.593-0400 I COMMAND [conn49] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll69_404 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.594-0400 m31200| 2015-07-09T14:17:04.594-0400 I COMMAND [conn52] CMD: drop db69.tmp.mr.coll69_252 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.595-0400 m31102| 2015-07-09T14:17:04.594-0400 I COMMAND [repl writer worker 12] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll69_403 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.601-0400 m31102| 2015-07-09T14:17:04.601-0400 I COMMAND [repl writer worker 13] CMD: drop db69.tmp.mrs.coll69_1436465823_111 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.603-0400 m31101| 2015-07-09T14:17:04.603-0400 I COMMAND [repl writer worker 3] CMD: drop db69.tmp.mrs.coll69_1436465823_111 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.604-0400 m31100| 2015-07-09T14:17:04.603-0400 I COMMAND [conn179] CMD: drop db69.tmp.mr.coll69_407 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.605-0400 m31100| 2015-07-09T14:17:04.604-0400 I COMMAND [conn49] command map_reduce_merge_nonatomic4.coll69 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.606-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.606-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.606-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.606-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.607-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.608-0400 m31100| }, out: { merge: "coll69", db: "map_reduce_merge_nonatomic4", nonAtomic: true } }, inputDB: "db69", shardedOutputCollection: "tmp.mrs.coll69_1436465823_132", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll69_1436465823_132", timeMillis: 994, counts: { input: 1015, emit: 1015, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465824000|137, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll69_1436465823_132", timeMillis: 444, counts: { input: 985, emit: 985, reduce: 79, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465823000|136, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1015, emit: 1015, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 985, emit: 985, reduce: 79, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 159 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:303 locks:{ Global: { acquireCount: { r: 76, w: 68 } }, Database: { acquireCount: { r: 4, w: 44, W: 25 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 124ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.609-0400 m31100| 2015-07-09T14:17:04.608-0400 I COMMAND [conn32] CMD: drop db69.tmp.mrs.coll69_1436465823_132 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.613-0400 m31102| 2015-07-09T14:17:04.613-0400 I COMMAND [repl writer worker 11] CMD: drop db69.tmp.mrs.coll69_1436465823_131 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.619-0400 m31101| 2015-07-09T14:17:04.618-0400 I COMMAND [repl writer worker 8] CMD: drop db69.tmp.mrs.coll69_1436465823_131 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.620-0400 m31200| 2015-07-09T14:17:04.620-0400 I COMMAND [conn65] CMD: drop db69.tmp.mrs.coll69_1436465823_132 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.622-0400 m31100| 2015-07-09T14:17:04.622-0400 I COMMAND [conn191] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll69_405 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.625-0400 m31100| 2015-07-09T14:17:04.623-0400 I COMMAND [conn191] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll69_405 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.626-0400 m31100| 2015-07-09T14:17:04.623-0400 I COMMAND [conn191] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll69_405 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.626-0400 m31100| 2015-07-09T14:17:04.624-0400 I COMMAND [conn191] command map_reduce_merge_nonatomic1.coll69 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.626-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.626-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.626-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.626-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.626-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.628-0400 m31100| }, out: { merge: "coll69", db: "map_reduce_merge_nonatomic1", nonAtomic: true } }, inputDB: "db69", shardedOutputCollection: "tmp.mrs.coll69_1436465823_112", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll69_1436465823_112", timeMillis: 966, counts: { input: 1015, emit: 1015, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465824000|139, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll69_1436465823_112", timeMillis: 391, counts: { input: 985, emit: 985, reduce: 79, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465823000|128, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1015, emit: 1015, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 985, emit: 985, reduce: 79, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 159 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:303 locks:{ Global: { acquireCount: { r: 76, w: 68 } }, Database: { acquireCount: { r: 4, w: 44, W: 25 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 141ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.628-0400 m31100| 2015-07-09T14:17:04.624-0400 I COMMAND [conn38] CMD: drop db69.tmp.mrs.coll69_1436465823_112 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.628-0400 m31202| 2015-07-09T14:17:04.626-0400 I COMMAND [repl writer worker 13] CMD: drop db69.tmp.mrs.coll69_1436465823_132 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.628-0400 m31201| 2015-07-09T14:17:04.627-0400 I COMMAND [repl writer worker 10] CMD: drop db69.tmp.mrs.coll69_1436465823_132 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.631-0400 m31200| 2015-07-09T14:17:04.630-0400 I COMMAND [conn64] CMD: drop db69.tmp.mrs.coll69_1436465823_112 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.631-0400 m31200| 2015-07-09T14:17:04.631-0400 I COMMAND [conn80] CMD: drop db69.tmp.mr.coll69_253 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.645-0400 m31100| 2015-07-09T14:17:04.645-0400 I COMMAND [conn49] CMD: drop db69.tmp.mr.coll69_409 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.654-0400 m31102| 2015-07-09T14:17:04.654-0400 I COMMAND [repl writer worker 5] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll69_404 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.662-0400 m31101| 2015-07-09T14:17:04.661-0400 I COMMAND [repl writer worker 13] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll69_404 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.670-0400 m31102| 2015-07-09T14:17:04.668-0400 I COMMAND [repl writer worker 6] CMD: drop db69.tmp.mrs.coll69_1436465823_132 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.675-0400 m31100| 2015-07-09T14:17:04.675-0400 I COMMAND [conn191] CMD: drop db69.tmp.mr.coll69_410 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.677-0400 m31201| 2015-07-09T14:17:04.676-0400 I COMMAND [repl writer worker 11] CMD: drop db69.tmp.mrs.coll69_1436465823_112 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.678-0400 m31101| 2015-07-09T14:17:04.677-0400 I COMMAND [repl writer worker 6] CMD: drop db69.tmp.mrs.coll69_1436465823_132 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.680-0400 m31202| 2015-07-09T14:17:04.680-0400 I COMMAND [repl writer worker 5] CMD: drop db69.tmp.mrs.coll69_1436465823_112 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.682-0400 m31101| 2015-07-09T14:17:04.681-0400 I COMMAND [repl writer worker 4] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll69_405 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.685-0400 m31102| 2015-07-09T14:17:04.684-0400 I COMMAND [repl writer worker 14] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll69_405 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.685-0400 m31102| 2015-07-09T14:17:04.685-0400 I COMMAND [repl writer worker 4] CMD: drop db69.tmp.mrs.coll69_1436465823_112 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.687-0400 m31101| 2015-07-09T14:17:04.687-0400 I COMMAND [repl writer worker 14] CMD: drop db69.tmp.mrs.coll69_1436465823_112 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.710-0400 m31200| 2015-07-09T14:17:04.710-0400 I COMMAND [conn32] CMD: drop db69.tmp.mr.coll69_254 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.993-0400 m31200| 2015-07-09T14:17:04.993-0400 I COMMAND [conn37] CMD: drop db69.tmp.mrs.coll69_1436465824_133 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.997-0400 m31200| 2015-07-09T14:17:04.996-0400 I COMMAND [conn37] CMD: drop db69.tmp.mr.coll69_250 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:04.997-0400 m31200| 2015-07-09T14:17:04.997-0400 I COMMAND [conn37] CMD: drop db69.tmp.mr.coll69_250 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.002-0400 m31200| 2015-07-09T14:17:05.001-0400 I COMMAND [conn37] CMD: drop db69.tmp.mr.coll69_250 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.004-0400 m31200| 2015-07-09T14:17:05.004-0400 I COMMAND [conn37] command db69.tmp.mrs.coll69_1436465824_133 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.005-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.005-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.005-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.006-0400 m31200| values...., out: "tmp.mrs.coll69_1436465824_133", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:213 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 55 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { w: 22, R: 8, W: 5 }, timeAcquiringMicros: { w: 253470, R: 40822, W: 3742 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 487ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.014-0400 m31200| 2015-07-09T14:17:05.014-0400 I COMMAND [conn41] CMD: drop db69.tmp.mrs.coll69_1436465824_113 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.022-0400 m31200| 2015-07-09T14:17:05.022-0400 I COMMAND [conn41] CMD: drop db69.tmp.mr.coll69_251 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.023-0400 m31200| 2015-07-09T14:17:05.022-0400 I COMMAND [conn41] CMD: drop db69.tmp.mr.coll69_251 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.028-0400 m31200| 2015-07-09T14:17:05.026-0400 I COMMAND [conn41] CMD: drop db69.tmp.mr.coll69_251 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.031-0400 m31200| 2015-07-09T14:17:05.030-0400 I COMMAND [conn41] command db69.tmp.mrs.coll69_1436465824_113 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.031-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.031-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.032-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.033-0400 m31200| values...., out: "tmp.mrs.coll69_1436465824_113", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:4 reslen:213 locks:{ Global: { acquireCount: { r: 159, w: 74, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 4250, W: 75 } }, Database: { acquireCount: { r: 26, w: 66, R: 15, W: 11 }, acquireWaitCount: { r: 5, w: 15, R: 14, W: 9 }, timeAcquiringMicros: { r: 14140, w: 56995, R: 98127, W: 29367 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 458ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.039-0400 m31200| 2015-07-09T14:17:05.039-0400 I COMMAND [conn80] CMD: drop db69.tmp.mrs.coll69_1436465824_135 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.050-0400 m31200| 2015-07-09T14:17:05.050-0400 I COMMAND [conn80] CMD: drop db69.tmp.mr.coll69_253 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.051-0400 m31200| 2015-07-09T14:17:05.050-0400 I COMMAND [conn80] CMD: drop db69.tmp.mr.coll69_253 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.052-0400 m31200| 2015-07-09T14:17:05.052-0400 I COMMAND [conn80] CMD: drop db69.tmp.mr.coll69_253 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.053-0400 m31200| 2015-07-09T14:17:05.053-0400 I COMMAND [conn80] command db69.tmp.mrs.coll69_1436465824_135 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.054-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.054-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.054-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.055-0400 m31200| values...., out: "tmp.mrs.coll69_1436465824_135", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:213 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 8514, w: 4237, W: 734 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 2, w: 16, R: 12, W: 8 }, timeAcquiringMicros: { r: 2143, w: 69972, R: 55191, W: 59978 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 425ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.058-0400 m31200| 2015-07-09T14:17:05.058-0400 I COMMAND [conn52] CMD: drop db69.tmp.mrs.coll69_1436465824_134 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.064-0400 m31200| 2015-07-09T14:17:05.064-0400 I COMMAND [conn52] CMD: drop db69.tmp.mr.coll69_252 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.064-0400 m31200| 2015-07-09T14:17:05.064-0400 I COMMAND [conn52] CMD: drop db69.tmp.mr.coll69_252 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.068-0400 m31200| 2015-07-09T14:17:05.066-0400 I COMMAND [conn52] CMD: drop db69.tmp.mr.coll69_252 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.068-0400 m31200| 2015-07-09T14:17:05.068-0400 I COMMAND [conn32] CMD: drop db69.tmp.mrs.coll69_1436465824_114 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.076-0400 m31200| 2015-07-09T14:17:05.076-0400 I COMMAND [conn32] CMD: drop db69.tmp.mr.coll69_254 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.076-0400 m31200| 2015-07-09T14:17:05.076-0400 I COMMAND [conn32] CMD: drop db69.tmp.mr.coll69_254 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.076-0400 m31200| 2015-07-09T14:17:05.076-0400 I COMMAND [conn52] command db69.tmp.mrs.coll69_1436465824_134 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.076-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.077-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.077-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.077-0400 m31200| values...., out: "tmp.mrs.coll69_1436465824_134", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:4 reslen:213 locks:{ Global: { acquireCount: { r: 159, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 3, W: 1 }, timeAcquiringMicros: { r: 8239, w: 25862, W: 104 } }, Database: { acquireCount: { r: 26, w: 66, R: 15, W: 11 }, acquireWaitCount: { r: 8, w: 17, R: 15, W: 6 }, timeAcquiringMicros: { r: 25926, w: 71931, R: 68111, W: 32487 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 488ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.080-0400 m31200| 2015-07-09T14:17:05.078-0400 I COMMAND [conn32] CMD: drop db69.tmp.mr.coll69_254 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.081-0400 m31200| 2015-07-09T14:17:05.079-0400 I COMMAND [conn32] command db69.tmp.mrs.coll69_1436465824_114 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.082-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.082-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.082-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.083-0400 m31200| values...., out: "tmp.mrs.coll69_1436465824_114", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:213 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 1, W: 1 }, timeAcquiringMicros: { r: 17112, w: 752, W: 1585 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 4, w: 8, R: 13, W: 5 }, timeAcquiringMicros: { r: 18128, w: 39209, R: 43949, W: 74507 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 406ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.565-0400 m31100| 2015-07-09T14:17:05.565-0400 I COMMAND [conn177] CMD: drop db69.tmp.mrs.coll69_1436465824_133 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.570-0400 m31100| 2015-07-09T14:17:05.569-0400 I COMMAND [conn177] CMD: drop db69.tmp.mr.coll69_406 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.570-0400 m31100| 2015-07-09T14:17:05.570-0400 I COMMAND [conn177] CMD: drop db69.tmp.mr.coll69_406 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.571-0400 m31100| 2015-07-09T14:17:05.571-0400 I COMMAND [conn177] CMD: drop db69.tmp.mr.coll69_406 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.575-0400 m31100| 2015-07-09T14:17:05.574-0400 I COMMAND [conn177] command db69.tmp.mrs.coll69_1436465824_133 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.575-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.575-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.576-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.576-0400 m31100| values...., out: "tmp.mrs.coll69_1436465824_133", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:7 reslen:213 locks:{ Global: { acquireCount: { r: 167, w: 74, W: 3 } }, Database: { acquireCount: { r: 26, w: 66, R: 19, W: 11 }, acquireWaitCount: { w: 26, R: 12, W: 2 }, timeAcquiringMicros: { w: 502949, R: 120211, W: 1054 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 1057ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.590-0400 m31100| 2015-07-09T14:17:05.590-0400 I COMMAND [conn177] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll69_411 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.684-0400 m31100| 2015-07-09T14:17:05.684-0400 I COMMAND [conn177] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll69_411 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.685-0400 m31100| 2015-07-09T14:17:05.685-0400 I COMMAND [conn177] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll69_411 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.686-0400 m31100| 2015-07-09T14:17:05.685-0400 I COMMAND [conn177] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll69_411 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.686-0400 m31100| 2015-07-09T14:17:05.686-0400 I COMMAND [conn177] command map_reduce_merge_nonatomic0.coll69 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.686-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.686-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.687-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.687-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.687-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.688-0400 m31100| }, out: { merge: "coll69", db: "map_reduce_merge_nonatomic0", nonAtomic: true } }, inputDB: "db69", shardedOutputCollection: "tmp.mrs.coll69_1436465824_133", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll69_1436465824_133", timeMillis: 1053, counts: { input: 1015, emit: 1015, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465825000|21, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll69_1436465824_133", timeMillis: 480, counts: { input: 985, emit: 985, reduce: 79, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465824000|45, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1015, emit: 1015, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 985, emit: 985, reduce: 79, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 159 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:303 locks:{ Global: { acquireCount: { r: 76, w: 68 } }, Database: { acquireCount: { r: 4, w: 44, W: 25 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 109ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.688-0400 m31100| 2015-07-09T14:17:05.686-0400 I COMMAND [conn32] CMD: drop db69.tmp.mrs.coll69_1436465824_133 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.689-0400 m31101| 2015-07-09T14:17:05.688-0400 I COMMAND [repl writer worker 2] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll69_411 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.690-0400 m31102| 2015-07-09T14:17:05.690-0400 I COMMAND [repl writer worker 11] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll69_411 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.691-0400 m31200| 2015-07-09T14:17:05.690-0400 I COMMAND [conn65] CMD: drop db69.tmp.mrs.coll69_1436465824_133 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.692-0400 m31101| 2015-07-09T14:17:05.692-0400 I COMMAND [repl writer worker 10] CMD: drop db69.tmp.mrs.coll69_1436465824_133 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.693-0400 m31102| 2015-07-09T14:17:05.692-0400 I COMMAND [repl writer worker 15] CMD: drop db69.tmp.mrs.coll69_1436465824_133 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.694-0400 m31202| 2015-07-09T14:17:05.694-0400 I COMMAND [repl writer worker 1] CMD: drop db69.tmp.mrs.coll69_1436465824_133 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.695-0400 m31201| 2015-07-09T14:17:05.694-0400 I COMMAND [repl writer worker 0] CMD: drop db69.tmp.mrs.coll69_1436465824_133 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.695-0400 m31200| 2015-07-09T14:17:05.695-0400 I COMMAND [conn37] CMD: drop db69.tmp.mr.coll69_255 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.706-0400 m31100| 2015-07-09T14:17:05.705-0400 I COMMAND [conn177] CMD: drop db69.tmp.mr.coll69_412 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.800-0400 m31100| 2015-07-09T14:17:05.799-0400 I COMMAND [conn185] CMD: drop db69.tmp.mrs.coll69_1436465824_134 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.803-0400 m31100| 2015-07-09T14:17:05.803-0400 I COMMAND [conn185] CMD: drop db69.tmp.mr.coll69_408 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.804-0400 m31100| 2015-07-09T14:17:05.803-0400 I COMMAND [conn185] CMD: drop db69.tmp.mr.coll69_408 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.805-0400 m31100| 2015-07-09T14:17:05.804-0400 I COMMAND [conn185] CMD: drop db69.tmp.mr.coll69_408 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.809-0400 m31100| 2015-07-09T14:17:05.808-0400 I COMMAND [conn185] command db69.tmp.mrs.coll69_1436465824_134 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.809-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.809-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.809-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.810-0400 m31100| values...., out: "tmp.mrs.coll69_1436465824_134", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:26 reslen:213 locks:{ Global: { acquireCount: { r: 205, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 165 } }, Database: { acquireCount: { r: 26, w: 66, R: 38, W: 11 }, acquireWaitCount: { r: 2, w: 28, R: 28, W: 7 }, timeAcquiringMicros: { r: 35804, w: 204639, R: 74544, W: 58387 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 1220ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.811-0400 m31100| 2015-07-09T14:17:05.809-0400 I COMMAND [conn185] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll69_413 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.856-0400 m31100| 2015-07-09T14:17:05.854-0400 I COMMAND [conn179] CMD: drop db69.tmp.mrs.coll69_1436465824_113 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.859-0400 m31200| 2015-07-09T14:17:05.858-0400 I COMMAND [conn37] CMD: drop db69.tmp.mrs.coll69_1436465825_136 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.861-0400 m31100| 2015-07-09T14:17:05.861-0400 I COMMAND [conn179] CMD: drop db69.tmp.mr.coll69_407 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.862-0400 m31100| 2015-07-09T14:17:05.861-0400 I COMMAND [conn179] CMD: drop db69.tmp.mr.coll69_407 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.864-0400 m31100| 2015-07-09T14:17:05.864-0400 I COMMAND [conn179] CMD: drop db69.tmp.mr.coll69_407 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.867-0400 m31200| 2015-07-09T14:17:05.866-0400 I COMMAND [conn37] CMD: drop db69.tmp.mr.coll69_255 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.868-0400 m31200| 2015-07-09T14:17:05.868-0400 I COMMAND [conn37] CMD: drop db69.tmp.mr.coll69_255 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.869-0400 m31200| 2015-07-09T14:17:05.869-0400 I COMMAND [conn37] CMD: drop db69.tmp.mr.coll69_255 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.869-0400 m31200| 2015-07-09T14:17:05.869-0400 I COMMAND [conn37] command db69.tmp.mrs.coll69_1436465825_136 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.869-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.869-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.870-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.870-0400 m31200| values...., out: "tmp.mrs.coll69_1436465825_136", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:213 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 174ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.871-0400 m31100| 2015-07-09T14:17:05.870-0400 I COMMAND [conn185] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll69_413 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.872-0400 m31100| 2015-07-09T14:17:05.872-0400 I COMMAND [conn185] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll69_413 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.872-0400 m31100| 2015-07-09T14:17:05.872-0400 I COMMAND [conn185] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll69_413 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.873-0400 m31100| 2015-07-09T14:17:05.872-0400 I COMMAND [conn32] CMD: drop db69.tmp.mrs.coll69_1436465824_134 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.876-0400 m31101| 2015-07-09T14:17:05.876-0400 I COMMAND [repl writer worker 9] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll69_413 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.884-0400 m31102| 2015-07-09T14:17:05.884-0400 I COMMAND [repl writer worker 1] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll69_413 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.888-0400 m31200| 2015-07-09T14:17:05.888-0400 I COMMAND [conn65] CMD: drop db69.tmp.mrs.coll69_1436465824_134 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.890-0400 m31102| 2015-07-09T14:17:05.890-0400 I COMMAND [repl writer worker 5] CMD: drop db69.tmp.mrs.coll69_1436465824_134 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.891-0400 m31101| 2015-07-09T14:17:05.891-0400 I COMMAND [repl writer worker 5] CMD: drop db69.tmp.mrs.coll69_1436465824_134 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.892-0400 m31100| 2015-07-09T14:17:05.891-0400 I COMMAND [conn179] command db69.tmp.mrs.coll69_1436465824_113 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.892-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.892-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.893-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.893-0400 m31100| values...., out: "tmp.mrs.coll69_1436465824_113", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:21 reslen:213 locks:{ Global: { acquireCount: { r: 195, w: 74, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 4275, W: 667 } }, Database: { acquireCount: { r: 26, w: 66, R: 33, W: 11 }, acquireWaitCount: { r: 4, w: 31, R: 26, W: 7 }, timeAcquiringMicros: { r: 43591, w: 196451, R: 100030, W: 73898 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 1319ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.894-0400 m31202| 2015-07-09T14:17:05.891-0400 I COMMAND [repl writer worker 5] CMD: drop db69.tmp.mrs.coll69_1436465824_134 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.894-0400 m31201| 2015-07-09T14:17:05.892-0400 I COMMAND [repl writer worker 4] CMD: drop db69.tmp.mrs.coll69_1436465824_134 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.894-0400 m31200| 2015-07-09T14:17:05.892-0400 I COMMAND [conn52] CMD: drop db69.tmp.mr.coll69_256 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.894-0400 m31100| 2015-07-09T14:17:05.893-0400 I COMMAND [conn179] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll69_415 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.896-0400 m31100| 2015-07-09T14:17:05.896-0400 I COMMAND [conn185] CMD: drop db69.tmp.mr.coll69_414 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.900-0400 m31100| 2015-07-09T14:17:05.900-0400 I COMMAND [conn191] CMD: drop db69.tmp.mrs.coll69_1436465824_114 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.906-0400 m31100| 2015-07-09T14:17:05.906-0400 I COMMAND [conn191] CMD: drop db69.tmp.mr.coll69_410 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.906-0400 m31100| 2015-07-09T14:17:05.906-0400 I COMMAND [conn191] CMD: drop db69.tmp.mr.coll69_410 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.923-0400 m31100| 2015-07-09T14:17:05.922-0400 I COMMAND [conn191] CMD: drop db69.tmp.mr.coll69_410 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.928-0400 m31100| 2015-07-09T14:17:05.926-0400 I COMMAND [conn191] command db69.tmp.mrs.coll69_1436465824_114 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.928-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.928-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.928-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.929-0400 m31100| values...., out: "tmp.mrs.coll69_1436465824_114", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:25 reslen:213 locks:{ Global: { acquireCount: { r: 203, w: 74, W: 3 }, acquireWaitCount: { r: 2, W: 1 }, timeAcquiringMicros: { r: 13738, W: 8107 } }, Database: { acquireCount: { r: 26, w: 66, R: 37, W: 11 }, acquireWaitCount: { r: 5, w: 27, R: 26, W: 7 }, timeAcquiringMicros: { r: 8087, w: 243444, R: 58900, W: 57674 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 1253ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.931-0400 m31100| 2015-07-09T14:17:05.931-0400 I COMMAND [conn191] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll69_416 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.977-0400 m31100| 2015-07-09T14:17:05.976-0400 I COMMAND [conn179] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll69_415 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.990-0400 m31100| 2015-07-09T14:17:05.990-0400 I COMMAND [conn179] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll69_415 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.991-0400 m31100| 2015-07-09T14:17:05.990-0400 I COMMAND [conn179] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll69_415 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.991-0400 m31100| 2015-07-09T14:17:05.991-0400 I COMMAND [conn38] CMD: drop db69.tmp.mrs.coll69_1436465824_113 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.996-0400 m31200| 2015-07-09T14:17:05.996-0400 I COMMAND [conn64] CMD: drop db69.tmp.mrs.coll69_1436465824_113 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:05.997-0400 m30998| 2015-07-09T14:17:05.996-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:17:05.994-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30998:1436464535:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.002-0400 m31102| 2015-07-09T14:17:06.002-0400 I COMMAND [repl writer worker 14] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll69_415 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.002-0400 m31202| 2015-07-09T14:17:06.002-0400 I COMMAND [repl writer worker 2] CMD: drop db69.tmp.mrs.coll69_1436465824_113 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.003-0400 m31200| 2015-07-09T14:17:06.003-0400 I COMMAND [conn41] CMD: drop db69.tmp.mr.coll69_257 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.004-0400 m31201| 2015-07-09T14:17:06.004-0400 I COMMAND [repl writer worker 5] CMD: drop db69.tmp.mrs.coll69_1436465824_113 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.009-0400 m31100| 2015-07-09T14:17:06.009-0400 I COMMAND [conn179] CMD: drop db69.tmp.mr.coll69_417 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.010-0400 m31101| 2015-07-09T14:17:06.010-0400 I COMMAND [repl writer worker 4] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll69_415 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.015-0400 m31100| 2015-07-09T14:17:06.015-0400 I COMMAND [conn191] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll69_416 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.017-0400 m31102| 2015-07-09T14:17:06.017-0400 I COMMAND [repl writer worker 11] CMD: drop db69.tmp.mrs.coll69_1436465824_113 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.019-0400 m31100| 2015-07-09T14:17:06.019-0400 I COMMAND [conn191] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll69_416 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.021-0400 m31100| 2015-07-09T14:17:06.019-0400 I COMMAND [conn191] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll69_416 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.021-0400 m31100| 2015-07-09T14:17:06.020-0400 I COMMAND [conn38] CMD: drop db69.tmp.mrs.coll69_1436465824_114 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.024-0400 m31100| 2015-07-09T14:17:06.024-0400 I COMMAND [conn49] CMD: drop db69.tmp.mrs.coll69_1436465824_135 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.024-0400 m31200| 2015-07-09T14:17:06.024-0400 I COMMAND [conn64] CMD: drop db69.tmp.mrs.coll69_1436465824_114 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.025-0400 m31101| 2015-07-09T14:17:06.025-0400 I COMMAND [repl writer worker 8] CMD: drop db69.tmp.mrs.coll69_1436465824_113 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.031-0400 m31100| 2015-07-09T14:17:06.030-0400 I COMMAND [conn49] CMD: drop db69.tmp.mr.coll69_409 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.033-0400 m31100| 2015-07-09T14:17:06.032-0400 I COMMAND [conn49] CMD: drop db69.tmp.mr.coll69_409 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.034-0400 m31102| 2015-07-09T14:17:06.032-0400 I COMMAND [repl writer worker 0] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll69_416 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.041-0400 m31102| 2015-07-09T14:17:06.040-0400 I COMMAND [repl writer worker 8] CMD: drop db69.tmp.mrs.coll69_1436465824_114 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.042-0400 m31101| 2015-07-09T14:17:06.042-0400 I COMMAND [repl writer worker 13] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll69_416 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.048-0400 m31101| 2015-07-09T14:17:06.047-0400 I COMMAND [repl writer worker 10] CMD: drop db69.tmp.mrs.coll69_1436465824_114 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.052-0400 m31200| 2015-07-09T14:17:06.051-0400 I COMMAND [conn32] CMD: drop db69.tmp.mr.coll69_258 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.058-0400 m31100| 2015-07-09T14:17:06.057-0400 I COMMAND [conn191] CMD: drop db69.tmp.mr.coll69_418 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.058-0400 m31202| 2015-07-09T14:17:06.058-0400 I COMMAND [repl writer worker 9] CMD: drop db69.tmp.mrs.coll69_1436465824_114 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.064-0400 m31201| 2015-07-09T14:17:06.064-0400 I COMMAND [repl writer worker 9] CMD: drop db69.tmp.mrs.coll69_1436465824_114 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.072-0400 m31100| 2015-07-09T14:17:06.072-0400 I COMMAND [conn49] CMD: drop db69.tmp.mr.coll69_409 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.088-0400 m31100| 2015-07-09T14:17:06.087-0400 I COMMAND [conn49] command db69.tmp.mrs.coll69_1436465824_135 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.088-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.088-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.089-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.090-0400 m31100| values...., out: "tmp.mrs.coll69_1436465824_135", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:29 reslen:213 locks:{ Global: { acquireCount: { r: 211, w: 74, W: 3 }, acquireWaitCount: { r: 4, w: 1, W: 1 }, timeAcquiringMicros: { r: 21959, w: 347, W: 3090 } }, Database: { acquireCount: { r: 26, w: 66, R: 41, W: 11 }, acquireWaitCount: { r: 10, w: 32, R: 29, W: 9 }, timeAcquiringMicros: { r: 30941, w: 251779, R: 83544, W: 186860 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 1460ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.090-0400 m31100| 2015-07-09T14:17:06.089-0400 I COMMAND [conn49] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll69_419 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.167-0400 m31200| 2015-07-09T14:17:06.167-0400 I COMMAND [conn52] CMD: drop db69.tmp.mrs.coll69_1436465825_137 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.175-0400 m31200| 2015-07-09T14:17:06.175-0400 I COMMAND [conn52] CMD: drop db69.tmp.mr.coll69_256 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.179-0400 m31200| 2015-07-09T14:17:06.177-0400 I COMMAND [conn52] CMD: drop db69.tmp.mr.coll69_256 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.186-0400 m31200| 2015-07-09T14:17:06.185-0400 I COMMAND [conn52] CMD: drop db69.tmp.mr.coll69_256 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.190-0400 m31200| 2015-07-09T14:17:06.189-0400 I COMMAND [conn52] command db69.tmp.mrs.coll69_1436465825_137 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.190-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.190-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.190-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.190-0400 m31200| values...., out: "tmp.mrs.coll69_1436465825_137", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:213 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { w: 18, W: 4 }, timeAcquiringMicros: { w: 143285, W: 11915 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 297ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.203-0400 m31100| 2015-07-09T14:17:06.203-0400 I COMMAND [conn49] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll69_419 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.207-0400 m31200| 2015-07-09T14:17:06.207-0400 I COMMAND [conn41] CMD: drop db69.tmp.mrs.coll69_1436465826_115 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.208-0400 m31100| 2015-07-09T14:17:06.208-0400 I COMMAND [conn49] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll69_419 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.208-0400 m31100| 2015-07-09T14:17:06.208-0400 I COMMAND [conn49] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll69_419 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.209-0400 m31100| 2015-07-09T14:17:06.208-0400 I COMMAND [conn49] command map_reduce_merge_nonatomic4.coll69 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.209-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.209-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.209-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.209-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.209-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.210-0400 m31100| }, out: { merge: "coll69", db: "map_reduce_merge_nonatomic4", nonAtomic: true } }, inputDB: "db69", shardedOutputCollection: "tmp.mrs.coll69_1436465824_135", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll69_1436465824_135", timeMillis: 1403, counts: { input: 1015, emit: 1015, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465826000|13, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll69_1436465824_135", timeMillis: 423, counts: { input: 985, emit: 985, reduce: 79, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465825000|48, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1015, emit: 1015, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 985, emit: 985, reduce: 79, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 159 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:303 locks:{ Global: { acquireCount: { r: 76, w: 68 } }, Database: { acquireCount: { r: 4, w: 44, W: 25 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 119ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.210-0400 m31100| 2015-07-09T14:17:06.209-0400 I COMMAND [conn32] CMD: drop db69.tmp.mrs.coll69_1436465824_135 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.211-0400 m31102| 2015-07-09T14:17:06.211-0400 I COMMAND [repl writer worker 7] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll69_419 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.213-0400 m31101| 2015-07-09T14:17:06.212-0400 I COMMAND [repl writer worker 15] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll69_419 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.215-0400 m31200| 2015-07-09T14:17:06.215-0400 I COMMAND [conn41] CMD: drop db69.tmp.mr.coll69_257 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.216-0400 m31200| 2015-07-09T14:17:06.215-0400 I COMMAND [conn41] CMD: drop db69.tmp.mr.coll69_257 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.228-0400 m31200| 2015-07-09T14:17:06.228-0400 I COMMAND [conn41] CMD: drop db69.tmp.mr.coll69_257 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.229-0400 m31200| 2015-07-09T14:17:06.228-0400 I COMMAND [conn41] command db69.tmp.mrs.coll69_1436465826_115 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.229-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.229-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.229-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.230-0400 m31200| values...., out: "tmp.mrs.coll69_1436465826_115", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:213 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 1, w: 5, R: 10, W: 3 }, timeAcquiringMicros: { r: 1779, w: 11992, R: 19482, W: 6357 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 225ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.232-0400 m31200| 2015-07-09T14:17:06.230-0400 I COMMAND [conn65] CMD: drop db69.tmp.mrs.coll69_1436465824_135 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.233-0400 m31102| 2015-07-09T14:17:06.233-0400 I COMMAND [repl writer worker 14] CMD: drop db69.tmp.mrs.coll69_1436465824_135 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.236-0400 m31201| 2015-07-09T14:17:06.235-0400 I COMMAND [repl writer worker 15] CMD: drop db69.tmp.mrs.coll69_1436465824_135 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.236-0400 m31202| 2015-07-09T14:17:06.236-0400 I COMMAND [repl writer worker 14] CMD: drop db69.tmp.mrs.coll69_1436465824_135 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.238-0400 m31200| 2015-07-09T14:17:06.237-0400 I COMMAND [conn80] CMD: drop db69.tmp.mr.coll69_259 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.238-0400 m31101| 2015-07-09T14:17:06.238-0400 I COMMAND [repl writer worker 9] CMD: drop db69.tmp.mrs.coll69_1436465824_135 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.260-0400 m31100| 2015-07-09T14:17:06.260-0400 I COMMAND [conn49] CMD: drop db69.tmp.mr.coll69_420 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.307-0400 m31200| 2015-07-09T14:17:06.306-0400 I COMMAND [conn32] CMD: drop db69.tmp.mrs.coll69_1436465826_116 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.312-0400 m31200| 2015-07-09T14:17:06.311-0400 I COMMAND [conn32] CMD: drop db69.tmp.mr.coll69_258 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.312-0400 m31200| 2015-07-09T14:17:06.312-0400 I COMMAND [conn32] CMD: drop db69.tmp.mr.coll69_258 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.314-0400 m31200| 2015-07-09T14:17:06.314-0400 I COMMAND [conn32] CMD: drop db69.tmp.mr.coll69_258 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.325-0400 m31200| 2015-07-09T14:17:06.325-0400 I COMMAND [conn32] command db69.tmp.mrs.coll69_1436465826_116 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.325-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.326-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.326-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.326-0400 m31200| values...., out: "tmp.mrs.coll69_1436465826_116", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:213 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 12598 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { w: 9, R: 9, W: 5 }, timeAcquiringMicros: { w: 39637, R: 19043, W: 25559 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 280ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.372-0400 m31200| 2015-07-09T14:17:06.372-0400 I COMMAND [conn80] CMD: drop db69.tmp.mrs.coll69_1436465826_138 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.378-0400 m31200| 2015-07-09T14:17:06.377-0400 I COMMAND [conn80] CMD: drop db69.tmp.mr.coll69_259 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.378-0400 m31200| 2015-07-09T14:17:06.377-0400 I COMMAND [conn80] CMD: drop db69.tmp.mr.coll69_259 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.378-0400 m31200| 2015-07-09T14:17:06.378-0400 I COMMAND [conn80] CMD: drop db69.tmp.mr.coll69_259 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.379-0400 m31200| 2015-07-09T14:17:06.378-0400 I COMMAND [conn80] command db69.tmp.mrs.coll69_1436465826_138 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.379-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.379-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.379-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.380-0400 m31200| values...., out: "tmp.mrs.coll69_1436465826_138", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:213 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 5342 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { R: 9 }, timeAcquiringMicros: { R: 5451 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 141ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.417-0400 m31100| 2015-07-09T14:17:06.415-0400 I COMMAND [conn177] CMD: drop db69.tmp.mrs.coll69_1436465825_136 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.423-0400 m31100| 2015-07-09T14:17:06.423-0400 I COMMAND [conn177] CMD: drop db69.tmp.mr.coll69_412 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.424-0400 m31100| 2015-07-09T14:17:06.423-0400 I COMMAND [conn177] CMD: drop db69.tmp.mr.coll69_412 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.426-0400 m31100| 2015-07-09T14:17:06.425-0400 I COMMAND [conn177] CMD: drop db69.tmp.mr.coll69_412 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.450-0400 m31100| 2015-07-09T14:17:06.449-0400 I COMMAND [conn177] command db69.tmp.mrs.coll69_1436465825_136 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.450-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.451-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.451-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.452-0400 m31100| values...., out: "tmp.mrs.coll69_1436465825_136", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:213 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 11743, W: 716 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 4, w: 30, R: 13, W: 8 }, timeAcquiringMicros: { r: 41172, w: 424896, R: 33232, W: 30297 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 754ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.452-0400 m31100| 2015-07-09T14:17:06.450-0400 I COMMAND [conn177] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll69_421 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.501-0400 m31100| 2015-07-09T14:17:06.501-0400 I COMMAND [conn179] CMD: drop db69.tmp.mrs.coll69_1436465826_115 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.506-0400 m31100| 2015-07-09T14:17:06.506-0400 I COMMAND [conn179] CMD: drop db69.tmp.mr.coll69_417 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.506-0400 m31100| 2015-07-09T14:17:06.506-0400 I COMMAND [conn179] CMD: drop db69.tmp.mr.coll69_417 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.506-0400 m31100| 2015-07-09T14:17:06.506-0400 I COMMAND [conn185] CMD: drop db69.tmp.mrs.coll69_1436465825_137 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.512-0400 m31100| 2015-07-09T14:17:06.511-0400 I COMMAND [conn185] CMD: drop db69.tmp.mr.coll69_414 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.512-0400 m31100| 2015-07-09T14:17:06.512-0400 I COMMAND [conn185] CMD: drop db69.tmp.mr.coll69_414 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.512-0400 m31100| 2015-07-09T14:17:06.512-0400 I COMMAND [conn185] CMD: drop db69.tmp.mr.coll69_414 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.513-0400 m31100| 2015-07-09T14:17:06.513-0400 I COMMAND [conn179] CMD: drop db69.tmp.mr.coll69_417 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.515-0400 m31100| 2015-07-09T14:17:06.514-0400 I COMMAND [conn185] command db69.tmp.mrs.coll69_1436465825_137 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.516-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.516-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.516-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.516-0400 m31100| values...., out: "tmp.mrs.coll69_1436465825_137", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:4 reslen:213 locks:{ Global: { acquireCount: { r: 161, w: 74, W: 3 }, acquireWaitCount: { r: 3, w: 2, W: 1 }, timeAcquiringMicros: { r: 5476, w: 18120, W: 28338 } }, Database: { acquireCount: { r: 26, w: 66, R: 16, W: 11 }, acquireWaitCount: { w: 14, R: 15, W: 7 }, timeAcquiringMicros: { w: 145033, R: 90449, W: 23034 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 621ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.516-0400 m31100| 2015-07-09T14:17:06.515-0400 I COMMAND [conn179] command db69.tmp.mrs.coll69_1436465826_115 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.517-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.517-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.517-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.518-0400 m31100| values...., out: "tmp.mrs.coll69_1436465826_115", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:5 reslen:213 locks:{ Global: { acquireCount: { r: 163, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 3, W: 1 }, timeAcquiringMicros: { r: 4777, w: 16498, W: 9592 } }, Database: { acquireCount: { r: 26, w: 66, R: 17, W: 11 }, acquireWaitCount: { r: 1, w: 6, R: 17, W: 9 }, timeAcquiringMicros: { r: 250, w: 52122, R: 113498, W: 29451 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 512ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.518-0400 m31100| 2015-07-09T14:17:06.516-0400 I COMMAND [conn185] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll69_422 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.519-0400 m31100| 2015-07-09T14:17:06.517-0400 I COMMAND [conn179] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll69_423 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.543-0400 m31100| 2015-07-09T14:17:06.542-0400 I COMMAND [conn177] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll69_421 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.552-0400 m31100| 2015-07-09T14:17:06.550-0400 I COMMAND [conn177] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll69_421 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.552-0400 m31100| 2015-07-09T14:17:06.551-0400 I COMMAND [conn177] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll69_421 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.553-0400 m31100| 2015-07-09T14:17:06.551-0400 I COMMAND [conn32] CMD: drop db69.tmp.mrs.coll69_1436465825_136 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.560-0400 m31200| 2015-07-09T14:17:06.559-0400 I COMMAND [conn65] CMD: drop db69.tmp.mrs.coll69_1436465825_136 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.563-0400 m31201| 2015-07-09T14:17:06.562-0400 I COMMAND [repl writer worker 9] CMD: drop db69.tmp.mrs.coll69_1436465825_136 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.564-0400 m31202| 2015-07-09T14:17:06.564-0400 I COMMAND [repl writer worker 11] CMD: drop db69.tmp.mrs.coll69_1436465825_136 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.565-0400 m31200| 2015-07-09T14:17:06.565-0400 I COMMAND [conn37] CMD: drop db69.tmp.mr.coll69_260 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.568-0400 m31100| 2015-07-09T14:17:06.568-0400 I COMMAND [conn177] CMD: drop db69.tmp.mr.coll69_424 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.581-0400 m31101| 2015-07-09T14:17:06.580-0400 I COMMAND [repl writer worker 8] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll69_421 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.586-0400 m31102| 2015-07-09T14:17:06.586-0400 I COMMAND [repl writer worker 10] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll69_421 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.590-0400 m31101| 2015-07-09T14:17:06.589-0400 I COMMAND [repl writer worker 9] CMD: drop db69.tmp.mrs.coll69_1436465825_136 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.594-0400 m31100| 2015-07-09T14:17:06.594-0400 I COMMAND [conn179] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll69_423 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.594-0400 m31100| 2015-07-09T14:17:06.594-0400 I COMMAND [conn185] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll69_422 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.601-0400 m31100| 2015-07-09T14:17:06.600-0400 I COMMAND [conn179] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll69_423 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.601-0400 m31100| 2015-07-09T14:17:06.600-0400 I COMMAND [conn179] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll69_423 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.601-0400 m31100| 2015-07-09T14:17:06.601-0400 I COMMAND [conn38] CMD: drop db69.tmp.mrs.coll69_1436465826_115 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.612-0400 m31102| 2015-07-09T14:17:06.609-0400 I COMMAND [repl writer worker 5] CMD: drop db69.tmp.mrs.coll69_1436465825_136 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.612-0400 m31101| 2015-07-09T14:17:06.611-0400 I COMMAND [repl writer worker 9] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll69_423 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.614-0400 m31100| 2015-07-09T14:17:06.613-0400 I COMMAND [conn185] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll69_422 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.615-0400 m31101| 2015-07-09T14:17:06.614-0400 I COMMAND [repl writer worker 7] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll69_422 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.626-0400 m31200| 2015-07-09T14:17:06.626-0400 I COMMAND [conn64] CMD: drop db69.tmp.mrs.coll69_1436465826_115 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.626-0400 m31100| 2015-07-09T14:17:06.626-0400 I COMMAND [conn191] CMD: drop db69.tmp.mrs.coll69_1436465826_116 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.632-0400 m31102| 2015-07-09T14:17:06.631-0400 I COMMAND [repl writer worker 5] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll69_423 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.632-0400 m31202| 2015-07-09T14:17:06.632-0400 I COMMAND [repl writer worker 0] CMD: drop db69.tmp.mrs.coll69_1436465826_115 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.634-0400 m31201| 2015-07-09T14:17:06.633-0400 I COMMAND [repl writer worker 8] CMD: drop db69.tmp.mrs.coll69_1436465826_115 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.637-0400 m31100| 2015-07-09T14:17:06.636-0400 I COMMAND [conn191] CMD: drop db69.tmp.mr.coll69_418 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.637-0400 m31100| 2015-07-09T14:17:06.637-0400 I COMMAND [conn191] CMD: drop db69.tmp.mr.coll69_418 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.638-0400 m31100| 2015-07-09T14:17:06.637-0400 I COMMAND [conn185] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll69_422 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.638-0400 m31100| 2015-07-09T14:17:06.637-0400 I COMMAND [conn185] command map_reduce_merge_nonatomic2.coll69 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.638-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.638-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.638-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.639-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.639-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.640-0400 m31100| }, out: { merge: "coll69", db: "map_reduce_merge_nonatomic2", nonAtomic: true } }, inputDB: "db69", shardedOutputCollection: "tmp.mrs.coll69_1436465825_137", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll69_1436465825_137", timeMillis: 619, counts: { input: 1015, emit: 1015, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465826000|113, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll69_1436465825_137", timeMillis: 283, counts: { input: 985, emit: 985, reduce: 79, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465826000|24, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1015, emit: 1015, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 985, emit: 985, reduce: 79, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 159 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:303 locks:{ Global: { acquireCount: { r: 76, w: 68 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 23586 } }, Database: { acquireCount: { r: 4, w: 44, W: 25 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 121ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.641-0400 m31102| 2015-07-09T14:17:06.637-0400 I COMMAND [repl writer worker 10] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll69_422 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.641-0400 m31100| 2015-07-09T14:17:06.639-0400 I COMMAND [conn32] CMD: drop db69.tmp.mrs.coll69_1436465825_137 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.641-0400 m31100| 2015-07-09T14:17:06.640-0400 I COMMAND [conn191] CMD: drop db69.tmp.mr.coll69_418 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.644-0400 m31200| 2015-07-09T14:17:06.644-0400 I COMMAND [conn41] CMD: drop db69.tmp.mr.coll69_261 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.652-0400 m31200| 2015-07-09T14:17:06.651-0400 I COMMAND [conn65] CMD: drop db69.tmp.mrs.coll69_1436465825_137 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.652-0400 m31100| 2015-07-09T14:17:06.651-0400 I COMMAND [conn179] CMD: drop db69.tmp.mr.coll69_425 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.653-0400 m31102| 2015-07-09T14:17:06.653-0400 I COMMAND [repl writer worker 12] CMD: drop db69.tmp.mrs.coll69_1436465826_115 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.658-0400 m31101| 2015-07-09T14:17:06.655-0400 I COMMAND [repl writer worker 8] CMD: drop db69.tmp.mrs.coll69_1436465826_115 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.660-0400 m31201| 2015-07-09T14:17:06.660-0400 I COMMAND [repl writer worker 2] CMD: drop db69.tmp.mrs.coll69_1436465825_137 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.660-0400 m31202| 2015-07-09T14:17:06.659-0400 I COMMAND [repl writer worker 9] CMD: drop db69.tmp.mrs.coll69_1436465825_137 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.664-0400 m31100| 2015-07-09T14:17:06.664-0400 I COMMAND [conn191] command db69.tmp.mrs.coll69_1436465826_116 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.664-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.665-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.665-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.665-0400 m31100| values...., out: "tmp.mrs.coll69_1436465826_116", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:3 reslen:213 locks:{ Global: { acquireCount: { r: 159, w: 74, W: 3 }, acquireWaitCount: { w: 4, W: 1 }, timeAcquiringMicros: { w: 12770, W: 12950 } }, Database: { acquireCount: { r: 26, w: 66, R: 15, W: 11 }, acquireWaitCount: { r: 7, w: 16, R: 15, W: 7 }, timeAcquiringMicros: { r: 58263, w: 103262, R: 79044, W: 85027 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 619ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.668-0400 m31100| 2015-07-09T14:17:06.665-0400 I COMMAND [conn191] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll69_427 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.669-0400 m31101| 2015-07-09T14:17:06.669-0400 I COMMAND [repl writer worker 2] CMD: drop db69.tmp.mrs.coll69_1436465825_137 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.670-0400 m31102| 2015-07-09T14:17:06.670-0400 I COMMAND [repl writer worker 14] CMD: drop db69.tmp.mrs.coll69_1436465825_137 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.672-0400 m31100| 2015-07-09T14:17:06.671-0400 I COMMAND [conn185] CMD: drop db69.tmp.mr.coll69_426 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.682-0400 m31200| 2015-07-09T14:17:06.682-0400 I COMMAND [conn52] CMD: drop db69.tmp.mr.coll69_262 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.743-0400 m31100| 2015-07-09T14:17:06.743-0400 I COMMAND [conn191] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll69_427 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.748-0400 m31100| 2015-07-09T14:17:06.748-0400 I COMMAND [conn191] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll69_427 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.748-0400 m31100| 2015-07-09T14:17:06.748-0400 I COMMAND [conn191] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll69_427 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.749-0400 m31100| 2015-07-09T14:17:06.748-0400 I COMMAND [conn38] CMD: drop db69.tmp.mrs.coll69_1436465826_116 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.752-0400 m31101| 2015-07-09T14:17:06.752-0400 I COMMAND [repl writer worker 4] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll69_427 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.770-0400 m31200| 2015-07-09T14:17:06.770-0400 I COMMAND [conn64] CMD: drop db69.tmp.mrs.coll69_1436465826_116 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.778-0400 m31101| 2015-07-09T14:17:06.778-0400 I COMMAND [repl writer worker 15] CMD: drop db69.tmp.mrs.coll69_1436465826_116 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.784-0400 m31202| 2015-07-09T14:17:06.784-0400 I COMMAND [repl writer worker 7] CMD: drop db69.tmp.mrs.coll69_1436465826_116 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.785-0400 m31201| 2015-07-09T14:17:06.784-0400 I COMMAND [repl writer worker 7] CMD: drop db69.tmp.mrs.coll69_1436465826_116 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.791-0400 m31200| 2015-07-09T14:17:06.791-0400 I COMMAND [conn32] CMD: drop db69.tmp.mr.coll69_263 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.824-0400 m31102| 2015-07-09T14:17:06.824-0400 I COMMAND [repl writer worker 11] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll69_427 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.824-0400 m31100| 2015-07-09T14:17:06.824-0400 I COMMAND [conn191] CMD: drop db69.tmp.mr.coll69_428 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.845-0400 m31102| 2015-07-09T14:17:06.844-0400 I COMMAND [repl writer worker 15] CMD: drop db69.tmp.mrs.coll69_1436465826_116 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.902-0400 m31100| 2015-07-09T14:17:06.902-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:17:06.901-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31100:1436464536:197041335', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.914-0400 m31200| 2015-07-09T14:17:06.913-0400 I COMMAND [conn37] CMD: drop db69.tmp.mrs.coll69_1436465826_139 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.919-0400 m31200| 2015-07-09T14:17:06.919-0400 I COMMAND [conn37] CMD: drop db69.tmp.mr.coll69_260 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.919-0400 m31200| 2015-07-09T14:17:06.919-0400 I COMMAND [conn37] CMD: drop db69.tmp.mr.coll69_260 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.922-0400 m31200| 2015-07-09T14:17:06.921-0400 I COMMAND [conn37] CMD: drop db69.tmp.mr.coll69_260 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.933-0400 m31200| 2015-07-09T14:17:06.932-0400 I COMMAND [conn37] command db69.tmp.mrs.coll69_1436465826_139 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.933-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.933-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.934-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.934-0400 m31200| values...., out: "tmp.mrs.coll69_1436465826_139", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:213 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 10789 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { w: 16, R: 5, W: 4 }, timeAcquiringMicros: { w: 147043, R: 26153, W: 12507 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 368ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.972-0400 m31200| 2015-07-09T14:17:06.972-0400 I COMMAND [conn41] CMD: drop db69.tmp.mrs.coll69_1436465826_117 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.977-0400 m31200| 2015-07-09T14:17:06.976-0400 I COMMAND [conn41] CMD: drop db69.tmp.mr.coll69_261 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.977-0400 m31200| 2015-07-09T14:17:06.977-0400 I COMMAND [conn41] CMD: drop db69.tmp.mr.coll69_261 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.979-0400 m31200| 2015-07-09T14:17:06.979-0400 I COMMAND [conn41] CMD: drop db69.tmp.mr.coll69_261 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.982-0400 m31200| 2015-07-09T14:17:06.982-0400 I COMMAND [conn41] command db69.tmp.mrs.coll69_1436465826_117 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.982-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.982-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.982-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.983-0400 m31200| values...., out: "tmp.mrs.coll69_1436465826_117", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:213 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 9355, W: 822 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { w: 12, R: 11, W: 8 }, timeAcquiringMicros: { w: 59022, R: 62150, W: 19549 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 341ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.983-0400 m31200| 2015-07-09T14:17:06.983-0400 I COMMAND [conn52] CMD: drop db69.tmp.mrs.coll69_1436465826_140 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.987-0400 m31200| 2015-07-09T14:17:06.987-0400 I COMMAND [conn52] CMD: drop db69.tmp.mr.coll69_262 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.988-0400 m31200| 2015-07-09T14:17:06.987-0400 I COMMAND [conn52] CMD: drop db69.tmp.mr.coll69_262 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.988-0400 m31200| 2015-07-09T14:17:06.988-0400 I COMMAND [conn52] CMD: drop db69.tmp.mr.coll69_262 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.989-0400 m31200| 2015-07-09T14:17:06.989-0400 I COMMAND [conn52] command db69.tmp.mrs.coll69_1436465826_140 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.989-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.989-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.989-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:06.990-0400 m31200| values...., out: "tmp.mrs.coll69_1436465826_140", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:213 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 2 }, timeAcquiringMicros: { r: 5377, w: 1779 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 3, w: 11, R: 11, W: 4 }, timeAcquiringMicros: { r: 13300, w: 61462, R: 56090, W: 22279 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 324ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.004-0400 m31200| 2015-07-09T14:17:07.004-0400 I COMMAND [conn32] CMD: drop db69.tmp.mrs.coll69_1436465826_118 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.009-0400 m31200| 2015-07-09T14:17:07.009-0400 I COMMAND [conn32] CMD: drop db69.tmp.mr.coll69_263 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.010-0400 m31200| 2015-07-09T14:17:07.010-0400 I COMMAND [conn32] CMD: drop db69.tmp.mr.coll69_263 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.011-0400 m31200| 2015-07-09T14:17:07.011-0400 I COMMAND [conn32] CMD: drop db69.tmp.mr.coll69_263 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.011-0400 m31200| 2015-07-09T14:17:07.011-0400 I COMMAND [conn32] command db69.tmp.mrs.coll69_1436465826_118 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.012-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.012-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.012-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.012-0400 m31200| values...., out: "tmp.mrs.coll69_1436465826_118", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:213 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 270 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { w: 2, R: 12, W: 5 }, timeAcquiringMicros: { w: 16373, R: 10498, W: 16999 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 221ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.053-0400 m31100| 2015-07-09T14:17:07.053-0400 I COMMAND [conn49] CMD: drop db69.tmp.mrs.coll69_1436465826_138 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.059-0400 m31100| 2015-07-09T14:17:07.059-0400 I COMMAND [conn49] CMD: drop db69.tmp.mr.coll69_420 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.059-0400 m31100| 2015-07-09T14:17:07.059-0400 I COMMAND [conn49] CMD: drop db69.tmp.mr.coll69_420 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.066-0400 m31100| 2015-07-09T14:17:07.065-0400 I COMMAND [conn49] CMD: drop db69.tmp.mr.coll69_420 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.067-0400 m31100| 2015-07-09T14:17:07.067-0400 I COMMAND [conn49] command db69.tmp.mrs.coll69_1436465826_138 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.068-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.068-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.068-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.069-0400 m31100| values...., out: "tmp.mrs.coll69_1436465826_138", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:213 locks:{ Global: { acquireCount: { r: 157, w: 74, W: 3 }, acquireWaitCount: { r: 3, w: 2, W: 1 }, timeAcquiringMicros: { r: 4880, w: 11539, W: 13678 } }, Database: { acquireCount: { r: 26, w: 66, R: 14, W: 11 }, acquireWaitCount: { r: 4, w: 34, R: 13, W: 9 }, timeAcquiringMicros: { r: 35852, w: 419188, R: 65100, W: 46876 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 830ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.070-0400 m31100| 2015-07-09T14:17:07.068-0400 I COMMAND [conn49] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll69_429 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.114-0400 m31100| 2015-07-09T14:17:07.112-0400 I COMMAND [conn49] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll69_429 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.114-0400 m31100| 2015-07-09T14:17:07.113-0400 I COMMAND [conn49] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll69_429 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.114-0400 m31100| 2015-07-09T14:17:07.113-0400 I COMMAND [conn49] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll69_429 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.115-0400 m31100| 2015-07-09T14:17:07.114-0400 I COMMAND [conn32] CMD: drop db69.tmp.mrs.coll69_1436465826_138 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.118-0400 m31200| 2015-07-09T14:17:07.118-0400 I COMMAND [conn65] CMD: drop db69.tmp.mrs.coll69_1436465826_138 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.119-0400 m31101| 2015-07-09T14:17:07.119-0400 I COMMAND [repl writer worker 13] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll69_429 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.119-0400 m31102| 2015-07-09T14:17:07.119-0400 I COMMAND [repl writer worker 3] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll69_429 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.122-0400 m31201| 2015-07-09T14:17:07.122-0400 I COMMAND [repl writer worker 0] CMD: drop db69.tmp.mrs.coll69_1436465826_138 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.122-0400 m31202| 2015-07-09T14:17:07.122-0400 I COMMAND [repl writer worker 2] CMD: drop db69.tmp.mrs.coll69_1436465826_138 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.122-0400 m31100| 2015-07-09T14:17:07.122-0400 I COMMAND [conn177] CMD: drop db69.tmp.mrs.coll69_1436465826_139 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.123-0400 m31102| 2015-07-09T14:17:07.123-0400 I COMMAND [repl writer worker 4] CMD: drop db69.tmp.mrs.coll69_1436465826_138 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.123-0400 m31101| 2015-07-09T14:17:07.123-0400 I COMMAND [repl writer worker 5] CMD: drop db69.tmp.mrs.coll69_1436465826_138 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.126-0400 m31100| 2015-07-09T14:17:07.126-0400 I COMMAND [conn177] CMD: drop db69.tmp.mr.coll69_424 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.126-0400 m31100| 2015-07-09T14:17:07.126-0400 I COMMAND [conn177] CMD: drop db69.tmp.mr.coll69_424 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.130-0400 m31200| 2015-07-09T14:17:07.127-0400 I COMMAND [conn80] CMD: drop db69.tmp.mr.coll69_264 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.130-0400 m31100| 2015-07-09T14:17:07.128-0400 I COMMAND [conn177] CMD: drop db69.tmp.mr.coll69_424 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.131-0400 m31100| 2015-07-09T14:17:07.131-0400 I COMMAND [conn49] CMD: drop db69.tmp.mr.coll69_430 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.132-0400 m31100| 2015-07-09T14:17:07.131-0400 I COMMAND [conn177] command db69.tmp.mrs.coll69_1436465826_139 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.133-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.133-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.133-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.134-0400 m31100| values...., out: "tmp.mrs.coll69_1436465826_139", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:213 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 3, W: 1 }, timeAcquiringMicros: { r: 23873, w: 13847, W: 1494 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 3, w: 21, R: 13, W: 7 }, timeAcquiringMicros: { r: 6147, w: 192898, R: 86526, W: 10061 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 566ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.135-0400 m31100| 2015-07-09T14:17:07.133-0400 I COMMAND [conn177] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll69_431 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.212-0400 m31100| 2015-07-09T14:17:07.212-0400 I COMMAND [conn177] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll69_431 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.213-0400 m31100| 2015-07-09T14:17:07.213-0400 I COMMAND [conn177] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll69_431 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.214-0400 m31100| 2015-07-09T14:17:07.213-0400 I COMMAND [conn177] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll69_431 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.214-0400 m31100| 2015-07-09T14:17:07.214-0400 I COMMAND [conn32] CMD: drop db69.tmp.mrs.coll69_1436465826_139 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.219-0400 m31200| 2015-07-09T14:17:07.218-0400 I COMMAND [conn65] CMD: drop db69.tmp.mrs.coll69_1436465826_139 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.229-0400 m31102| 2015-07-09T14:17:07.223-0400 I COMMAND [repl writer worker 3] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll69_431 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.229-0400 m31202| 2015-07-09T14:17:07.224-0400 I COMMAND [repl writer worker 6] CMD: drop db69.tmp.mrs.coll69_1436465826_139 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.229-0400 m31201| 2015-07-09T14:17:07.229-0400 I COMMAND [repl writer worker 7] CMD: drop db69.tmp.mrs.coll69_1436465826_139 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.231-0400 m31200| 2015-07-09T14:17:07.231-0400 I COMMAND [conn37] CMD: drop db69.tmp.mr.coll69_265 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.233-0400 m31102| 2015-07-09T14:17:07.233-0400 I COMMAND [repl writer worker 4] CMD: drop db69.tmp.mrs.coll69_1436465826_139 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.235-0400 m31101| 2015-07-09T14:17:07.233-0400 I COMMAND [repl writer worker 1] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll69_431 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.235-0400 m31100| 2015-07-09T14:17:07.235-0400 I COMMAND [conn177] CMD: drop db69.tmp.mr.coll69_432 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.242-0400 m31101| 2015-07-09T14:17:07.241-0400 I COMMAND [repl writer worker 6] CMD: drop db69.tmp.mrs.coll69_1436465826_139 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.296-0400 m31200| 2015-07-09T14:17:07.295-0400 I COMMAND [conn80] CMD: drop db69.tmp.mrs.coll69_1436465827_141 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.306-0400 m31200| 2015-07-09T14:17:07.304-0400 I COMMAND [conn80] CMD: drop db69.tmp.mr.coll69_264 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.306-0400 m31200| 2015-07-09T14:17:07.305-0400 I COMMAND [conn80] CMD: drop db69.tmp.mr.coll69_264 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.306-0400 m31200| 2015-07-09T14:17:07.306-0400 I COMMAND [conn80] CMD: drop db69.tmp.mr.coll69_264 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.316-0400 m31200| 2015-07-09T14:17:07.315-0400 I COMMAND [conn80] command db69.tmp.mrs.coll69_1436465827_141 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.316-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.316-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.316-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.317-0400 m31200| values...., out: "tmp.mrs.coll69_1436465827_141", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:213 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 6662 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { w: 8, W: 1 }, timeAcquiringMicros: { w: 40051, W: 9105 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 188ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.317-0400 m31200| 2015-07-09T14:17:07.316-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:17:07.314-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31200:1436464537:809424560', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.320-0400 m31100| 2015-07-09T14:17:07.319-0400 I COMMAND [conn185] CMD: drop db69.tmp.mrs.coll69_1436465826_140 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.325-0400 m31100| 2015-07-09T14:17:07.325-0400 I COMMAND [conn185] CMD: drop db69.tmp.mr.coll69_426 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.325-0400 m31100| 2015-07-09T14:17:07.325-0400 I COMMAND [conn185] CMD: drop db69.tmp.mr.coll69_426 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.325-0400 m31100| 2015-07-09T14:17:07.325-0400 I COMMAND [conn179] CMD: drop db69.tmp.mrs.coll69_1436465826_117 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.331-0400 m31100| 2015-07-09T14:17:07.330-0400 I COMMAND [conn179] CMD: drop db69.tmp.mr.coll69_425 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.331-0400 m31100| 2015-07-09T14:17:07.331-0400 I COMMAND [conn179] CMD: drop db69.tmp.mr.coll69_425 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.332-0400 m31100| 2015-07-09T14:17:07.331-0400 I COMMAND [conn179] CMD: drop db69.tmp.mr.coll69_425 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.332-0400 m31100| 2015-07-09T14:17:07.332-0400 I COMMAND [conn185] CMD: drop db69.tmp.mr.coll69_426 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.341-0400 m31100| 2015-07-09T14:17:07.340-0400 I COMMAND [conn179] command db69.tmp.mrs.coll69_1436465826_117 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.341-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.341-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.341-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.342-0400 m31100| values...., out: "tmp.mrs.coll69_1436465826_117", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:3 reslen:213 locks:{ Global: { acquireCount: { r: 159, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 2, W: 1 }, timeAcquiringMicros: { r: 3971, w: 9751, W: 84889 } }, Database: { acquireCount: { r: 26, w: 66, R: 15, W: 11 }, acquireWaitCount: { r: 8, w: 29, R: 15, W: 7 }, timeAcquiringMicros: { r: 8603, w: 142373, R: 89123, W: 68367 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 700ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.345-0400 m31100| 2015-07-09T14:17:07.344-0400 I COMMAND [conn191] CMD: drop db69.tmp.mrs.coll69_1436465826_118 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.346-0400 m31100| 2015-07-09T14:17:07.345-0400 I COMMAND [conn185] command db69.tmp.mrs.coll69_1436465826_140 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.346-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.346-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.346-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.347-0400 m31100| values...., out: "tmp.mrs.coll69_1436465826_140", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:3 reslen:213 locks:{ Global: { acquireCount: { r: 159, w: 74, W: 3 }, acquireWaitCount: { r: 8, w: 8, W: 1 }, timeAcquiringMicros: { r: 41773, w: 19930, W: 5812 } }, Database: { acquireCount: { r: 26, w: 66, R: 15, W: 11 }, acquireWaitCount: { r: 9, w: 29, R: 14, W: 8 }, timeAcquiringMicros: { r: 67536, w: 142268, R: 47771, W: 68389 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 681ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.349-0400 m31100| 2015-07-09T14:17:07.349-0400 I COMMAND [conn179] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll69_433 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.350-0400 m31100| 2015-07-09T14:17:07.349-0400 I COMMAND [conn185] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll69_434 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.353-0400 m31100| 2015-07-09T14:17:07.350-0400 I COMMAND [conn191] CMD: drop db69.tmp.mr.coll69_428 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.353-0400 m31100| 2015-07-09T14:17:07.350-0400 I COMMAND [conn191] CMD: drop db69.tmp.mr.coll69_428 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.377-0400 m31200| 2015-07-09T14:17:07.376-0400 I COMMAND [conn37] CMD: drop db69.tmp.mrs.coll69_1436465827_142 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.381-0400 m31100| 2015-07-09T14:17:07.381-0400 I COMMAND [conn191] CMD: drop db69.tmp.mr.coll69_428 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.385-0400 m31200| 2015-07-09T14:17:07.385-0400 I COMMAND [conn37] CMD: drop db69.tmp.mr.coll69_265 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.386-0400 m31200| 2015-07-09T14:17:07.385-0400 I COMMAND [conn37] CMD: drop db69.tmp.mr.coll69_265 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.386-0400 m31200| 2015-07-09T14:17:07.386-0400 I COMMAND [conn37] CMD: drop db69.tmp.mr.coll69_265 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.390-0400 m31200| 2015-07-09T14:17:07.387-0400 I COMMAND [conn37] command db69.tmp.mrs.coll69_1436465827_142 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.390-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.390-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.390-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.391-0400 m31200| values...., out: "tmp.mrs.coll69_1436465827_142", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:213 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 10165 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { R: 6, W: 5 }, timeAcquiringMicros: { R: 4160, W: 2108 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 156ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.415-0400 m31100| 2015-07-09T14:17:07.414-0400 I COMMAND [conn191] command db69.tmp.mrs.coll69_1436465826_118 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.415-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.415-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.415-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.417-0400 m31100| values...., out: "tmp.mrs.coll69_1436465826_118", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:213 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { r: 6, w: 5 }, timeAcquiringMicros: { r: 45728, w: 7192 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 6, w: 26, R: 11, W: 7 }, timeAcquiringMicros: { r: 25108, w: 165840, R: 40023, W: 84274 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 624ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.420-0400 m31100| 2015-07-09T14:17:07.420-0400 I COMMAND [conn191] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll69_435 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.467-0400 m31100| 2015-07-09T14:17:07.467-0400 I COMMAND [conn179] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll69_433 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.468-0400 m31100| 2015-07-09T14:17:07.468-0400 I COMMAND [conn179] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll69_433 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.468-0400 m31100| 2015-07-09T14:17:07.468-0400 I COMMAND [conn179] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll69_433 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.469-0400 m31100| 2015-07-09T14:17:07.469-0400 I COMMAND [conn179] command map_reduce_merge_nonatomic3.coll69 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.469-0400 m31101| 2015-07-09T14:17:07.469-0400 I COMMAND [repl writer worker 2] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll69_433 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.470-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.470-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.470-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.470-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.470-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.471-0400 m31100| }, out: { merge: "coll69", db: "map_reduce_merge_nonatomic3", nonAtomic: true } }, inputDB: "db69", shardedOutputCollection: "tmp.mrs.coll69_1436465826_117", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll69_1436465826_117", timeMillis: 690, counts: { input: 1015, emit: 1015, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465827000|134, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll69_1436465826_117", timeMillis: 336, counts: { input: 985, emit: 985, reduce: 79, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465826000|150, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1015, emit: 1015, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 985, emit: 985, reduce: 79, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 159 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:303 locks:{ Global: { acquireCount: { r: 76, w: 68 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 3711 } }, Database: { acquireCount: { r: 4, w: 44, W: 25 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 127ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.471-0400 m31100| 2015-07-09T14:17:07.469-0400 I COMMAND [conn185] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll69_434 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.472-0400 m31100| 2015-07-09T14:17:07.469-0400 I COMMAND [conn185] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll69_434 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.472-0400 m31100| 2015-07-09T14:17:07.470-0400 I COMMAND [conn185] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll69_434 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.472-0400 m31100| 2015-07-09T14:17:07.470-0400 I COMMAND [conn38] CMD: drop db69.tmp.mrs.coll69_1436465826_117 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.472-0400 m31100| 2015-07-09T14:17:07.470-0400 I COMMAND [conn185] command map_reduce_merge_nonatomic2.coll69 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.472-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.472-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.472-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.473-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.473-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.474-0400 m31100| }, out: { merge: "coll69", db: "map_reduce_merge_nonatomic2", nonAtomic: true } }, inputDB: "db69", shardedOutputCollection: "tmp.mrs.coll69_1436465826_140", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll69_1436465826_140", timeMillis: 661, counts: { input: 1015, emit: 1015, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465827000|133, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll69_1436465826_140", timeMillis: 323, counts: { input: 985, emit: 985, reduce: 79, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465826000|160, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1015, emit: 1015, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 985, emit: 985, reduce: 79, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 159 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:303 locks:{ Global: { acquireCount: { r: 76, w: 68 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 1351 } }, Database: { acquireCount: { r: 4, w: 44, W: 25 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 123ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.474-0400 m31100| 2015-07-09T14:17:07.471-0400 I COMMAND [conn32] CMD: drop db69.tmp.mrs.coll69_1436465826_140 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.474-0400 m31102| 2015-07-09T14:17:07.471-0400 I COMMAND [repl writer worker 9] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll69_433 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.474-0400 m31101| 2015-07-09T14:17:07.472-0400 I COMMAND [repl writer worker 5] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll69_434 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.474-0400 m31102| 2015-07-09T14:17:07.474-0400 I COMMAND [repl writer worker 0] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll69_434 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.480-0400 m31200| 2015-07-09T14:17:07.479-0400 I COMMAND [conn64] CMD: drop db69.tmp.mrs.coll69_1436465826_117 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.482-0400 m31200| 2015-07-09T14:17:07.481-0400 I COMMAND [conn65] CMD: drop db69.tmp.mrs.coll69_1436465826_140 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.482-0400 m31100| 2015-07-09T14:17:07.482-0400 I COMMAND [conn49] CMD: drop db69.tmp.mrs.coll69_1436465827_141 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.482-0400 m31101| 2015-07-09T14:17:07.482-0400 I COMMAND [repl writer worker 6] CMD: drop db69.tmp.mrs.coll69_1436465826_117 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.483-0400 m31200| 2015-07-09T14:17:07.483-0400 I COMMAND [conn41] CMD: drop db69.tmp.mr.coll69_266 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.484-0400 m31202| 2015-07-09T14:17:07.484-0400 I COMMAND [repl writer worker 12] CMD: drop db69.tmp.mrs.coll69_1436465826_117 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.484-0400 m31201| 2015-07-09T14:17:07.484-0400 I COMMAND [repl writer worker 11] CMD: drop db69.tmp.mrs.coll69_1436465826_117 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.488-0400 m31201| 2015-07-09T14:17:07.488-0400 I COMMAND [repl writer worker 3] CMD: drop db69.tmp.mrs.coll69_1436465826_140 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.491-0400 m31202| 2015-07-09T14:17:07.491-0400 I COMMAND [repl writer worker 15] CMD: drop db69.tmp.mrs.coll69_1436465826_140 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.493-0400 m31100| 2015-07-09T14:17:07.491-0400 I COMMAND [conn49] CMD: drop db69.tmp.mr.coll69_430 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.493-0400 m31100| 2015-07-09T14:17:07.491-0400 I COMMAND [conn49] CMD: drop db69.tmp.mr.coll69_430 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.493-0400 m31100| 2015-07-09T14:17:07.492-0400 I COMMAND [conn179] CMD: drop db69.tmp.mr.coll69_436 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.493-0400 m31101| 2015-07-09T14:17:07.492-0400 I COMMAND [repl writer worker 12] CMD: drop db69.tmp.mrs.coll69_1436465826_140 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.493-0400 m31102| 2015-07-09T14:17:07.493-0400 I COMMAND [repl writer worker 4] CMD: drop db69.tmp.mrs.coll69_1436465826_117 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.495-0400 m31100| 2015-07-09T14:17:07.495-0400 I COMMAND [conn49] CMD: drop db69.tmp.mr.coll69_430 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.497-0400 m31100| 2015-07-09T14:17:07.497-0400 I COMMAND [conn191] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll69_435 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.497-0400 m31200| 2015-07-09T14:17:07.497-0400 I COMMAND [conn52] CMD: drop db69.tmp.mr.coll69_267 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.498-0400 m31100| 2015-07-09T14:17:07.498-0400 I COMMAND [conn191] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll69_435 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.499-0400 m31100| 2015-07-09T14:17:07.498-0400 I COMMAND [conn191] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll69_435 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.500-0400 m31102| 2015-07-09T14:17:07.498-0400 I COMMAND [repl writer worker 13] CMD: drop db69.tmp.mrs.coll69_1436465826_140 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.500-0400 m31100| 2015-07-09T14:17:07.499-0400 I COMMAND [conn38] CMD: drop db69.tmp.mrs.coll69_1436465826_118 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.502-0400 m31100| 2015-07-09T14:17:07.502-0400 I COMMAND [conn185] CMD: drop db69.tmp.mr.coll69_437 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.509-0400 m31200| 2015-07-09T14:17:07.509-0400 I COMMAND [conn64] CMD: drop db69.tmp.mrs.coll69_1436465826_118 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.509-0400 m31100| 2015-07-09T14:17:07.509-0400 I COMMAND [conn49] command db69.tmp.mrs.coll69_1436465827_141 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.510-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.510-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.510-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.511-0400 m31100| values...., out: "tmp.mrs.coll69_1436465827_141", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:213 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 5 }, timeAcquiringMicros: { r: 30232 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 2, w: 7, R: 12, W: 7 }, timeAcquiringMicros: { r: 6823, w: 94230, R: 32501, W: 23440 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 381ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.511-0400 m31101| 2015-07-09T14:17:07.509-0400 I COMMAND [repl writer worker 8] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll69_435 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.512-0400 m31100| 2015-07-09T14:17:07.510-0400 I COMMAND [conn49] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll69_438 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.517-0400 m31102| 2015-07-09T14:17:07.517-0400 I COMMAND [repl writer worker 12] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll69_435 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.549-0400 m31202| 2015-07-09T14:17:07.548-0400 I COMMAND [repl writer worker 1] CMD: drop db69.tmp.mrs.coll69_1436465826_118 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.553-0400 m31200| 2015-07-09T14:17:07.553-0400 I COMMAND [conn32] CMD: drop db69.tmp.mr.coll69_268 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.556-0400 m31201| 2015-07-09T14:17:07.554-0400 I COMMAND [repl writer worker 2] CMD: drop db69.tmp.mrs.coll69_1436465826_118 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.564-0400 m31101| 2015-07-09T14:17:07.563-0400 I COMMAND [repl writer worker 13] CMD: drop db69.tmp.mrs.coll69_1436465826_118 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.565-0400 m31102| 2015-07-09T14:17:07.565-0400 I COMMAND [repl writer worker 14] CMD: drop db69.tmp.mrs.coll69_1436465826_118 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.578-0400 m31100| 2015-07-09T14:17:07.578-0400 I COMMAND [conn191] CMD: drop db69.tmp.mr.coll69_439 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.647-0400 m31100| 2015-07-09T14:17:07.645-0400 I COMMAND [conn49] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll69_438 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.655-0400 m31100| 2015-07-09T14:17:07.654-0400 I COMMAND [conn49] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll69_438 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.656-0400 m31100| 2015-07-09T14:17:07.655-0400 I COMMAND [conn49] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll69_438 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.657-0400 m31100| 2015-07-09T14:17:07.656-0400 I COMMAND [conn49] command map_reduce_merge_nonatomic4.coll69 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.657-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.659-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.659-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.659-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.660-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.662-0400 m31100| }, out: { merge: "coll69", db: "map_reduce_merge_nonatomic4", nonAtomic: true } }, inputDB: "db69", shardedOutputCollection: "tmp.mrs.coll69_1436465827_141", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll69_1436465827_141", timeMillis: 363, counts: { input: 1015, emit: 1015, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465827000|228, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll69_1436465827_141", timeMillis: 177, counts: { input: 985, emit: 985, reduce: 79, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465827000|45, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1015, emit: 1015, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 985, emit: 985, reduce: 79, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 159 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:303 locks:{ Global: { acquireCount: { r: 76, w: 68 } }, Database: { acquireCount: { r: 4, w: 44, W: 25 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 146ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.662-0400 m31100| 2015-07-09T14:17:07.657-0400 I COMMAND [conn32] CMD: drop db69.tmp.mrs.coll69_1436465827_141 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.684-0400 m31101| 2015-07-09T14:17:07.683-0400 I COMMAND [repl writer worker 1] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll69_438 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.685-0400 m31102| 2015-07-09T14:17:07.684-0400 I COMMAND [repl writer worker 6] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll69_438 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.685-0400 m31200| 2015-07-09T14:17:07.685-0400 I COMMAND [conn65] CMD: drop db69.tmp.mrs.coll69_1436465827_141 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.693-0400 m31101| 2015-07-09T14:17:07.692-0400 I COMMAND [repl writer worker 7] CMD: drop db69.tmp.mrs.coll69_1436465827_141 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.698-0400 m31201| 2015-07-09T14:17:07.698-0400 I COMMAND [repl writer worker 13] CMD: drop db69.tmp.mrs.coll69_1436465827_141 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.699-0400 m31202| 2015-07-09T14:17:07.699-0400 I COMMAND [repl writer worker 3] CMD: drop db69.tmp.mrs.coll69_1436465827_141 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.703-0400 m31102| 2015-07-09T14:17:07.702-0400 I COMMAND [repl writer worker 2] CMD: drop db69.tmp.mrs.coll69_1436465827_141 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.706-0400 m31200| 2015-07-09T14:17:07.705-0400 I COMMAND [conn80] CMD: drop db69.tmp.mr.coll69_269 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.719-0400 m31100| 2015-07-09T14:17:07.719-0400 I COMMAND [conn49] CMD: drop db69.tmp.mr.coll69_440 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.887-0400 m31200| 2015-07-09T14:17:07.887-0400 I COMMAND [conn41] CMD: drop db69.tmp.mrs.coll69_1436465827_119 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.892-0400 m31200| 2015-07-09T14:17:07.891-0400 I COMMAND [conn41] CMD: drop db69.tmp.mr.coll69_266 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.892-0400 m31200| 2015-07-09T14:17:07.891-0400 I COMMAND [conn41] CMD: drop db69.tmp.mr.coll69_266 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.894-0400 m31200| 2015-07-09T14:17:07.893-0400 I COMMAND [conn41] CMD: drop db69.tmp.mr.coll69_266 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.898-0400 m31200| 2015-07-09T14:17:07.898-0400 I COMMAND [conn41] command db69.tmp.mrs.coll69_1436465827_119 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.899-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.899-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.899-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.899-0400 m31200| values...., out: "tmp.mrs.coll69_1436465827_119", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:3 reslen:213 locks:{ Global: { acquireCount: { r: 157, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 320 } }, Database: { acquireCount: { r: 26, w: 66, R: 14, W: 11 }, acquireWaitCount: { r: 1, w: 13, R: 11, W: 4 }, timeAcquiringMicros: { r: 2313, w: 90066, R: 87712, W: 7058 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 415ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.901-0400 m31200| 2015-07-09T14:17:07.900-0400 I COMMAND [conn52] CMD: drop db69.tmp.mrs.coll69_1436465827_143 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.908-0400 m31200| 2015-07-09T14:17:07.905-0400 I COMMAND [conn52] CMD: drop db69.tmp.mr.coll69_267 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.908-0400 m31200| 2015-07-09T14:17:07.906-0400 I COMMAND [conn52] CMD: drop db69.tmp.mr.coll69_267 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.909-0400 m31200| 2015-07-09T14:17:07.906-0400 I COMMAND [conn52] CMD: drop db69.tmp.mr.coll69_267 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.909-0400 m31200| 2015-07-09T14:17:07.907-0400 I COMMAND [conn52] command db69.tmp.mrs.coll69_1436465827_143 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.909-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.910-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.910-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.911-0400 m31200| values...., out: "tmp.mrs.coll69_1436465827_143", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:3 reslen:213 locks:{ Global: { acquireCount: { r: 157, w: 74, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 5596, W: 2279 } }, Database: { acquireCount: { r: 26, w: 66, R: 14, W: 11 }, acquireWaitCount: { r: 3, w: 13, R: 12, W: 7 }, timeAcquiringMicros: { r: 3942, w: 59226, R: 58847, W: 20554 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 414ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.911-0400 m31200| 2015-07-09T14:17:07.909-0400 I COMMAND [conn32] CMD: drop db69.tmp.mrs.coll69_1436465827_120 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.916-0400 m31200| 2015-07-09T14:17:07.916-0400 I COMMAND [conn32] CMD: drop db69.tmp.mr.coll69_268 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.917-0400 m31200| 2015-07-09T14:17:07.916-0400 I COMMAND [conn32] CMD: drop db69.tmp.mr.coll69_268 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.917-0400 m31200| 2015-07-09T14:17:07.917-0400 I COMMAND [conn32] CMD: drop db69.tmp.mr.coll69_268 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.918-0400 m31200| 2015-07-09T14:17:07.918-0400 I COMMAND [conn32] command db69.tmp.mrs.coll69_1436465827_120 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.918-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.918-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.918-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.919-0400 m31200| values...., out: "tmp.mrs.coll69_1436465827_120", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:3 reslen:213 locks:{ Global: { acquireCount: { r: 157, w: 74, W: 3 }, acquireWaitCount: { r: 2, W: 1 }, timeAcquiringMicros: { r: 10694, W: 172 } }, Database: { acquireCount: { r: 26, w: 66, R: 14, W: 11 }, acquireWaitCount: { r: 5, w: 10, R: 10, W: 8 }, timeAcquiringMicros: { r: 27850, w: 52589, R: 39583, W: 24666 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 396ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.927-0400 m31200| 2015-07-09T14:17:07.927-0400 I COMMAND [conn80] CMD: drop db69.tmp.mrs.coll69_1436465827_144 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.934-0400 m31200| 2015-07-09T14:17:07.933-0400 I COMMAND [conn80] CMD: drop db69.tmp.mr.coll69_269 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.934-0400 m31200| 2015-07-09T14:17:07.933-0400 I COMMAND [conn80] CMD: drop db69.tmp.mr.coll69_269 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.935-0400 m31200| 2015-07-09T14:17:07.935-0400 I COMMAND [conn80] CMD: drop db69.tmp.mr.coll69_269 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.935-0400 m31200| 2015-07-09T14:17:07.935-0400 I COMMAND [conn80] command db69.tmp.mrs.coll69_1436465827_144 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.936-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.936-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.936-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.937-0400 m31200| values...., out: "tmp.mrs.coll69_1436465827_144", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:213 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { w: 2 }, timeAcquiringMicros: { w: 10025 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { w: 11, R: 10, W: 4 }, timeAcquiringMicros: { w: 34180, R: 12295, W: 31373 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 235ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.949-0400 m31100| 2015-07-09T14:17:07.949-0400 I COMMAND [conn177] CMD: drop db69.tmp.mrs.coll69_1436465827_142 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.956-0400 m31100| 2015-07-09T14:17:07.955-0400 I COMMAND [conn177] CMD: drop db69.tmp.mr.coll69_432 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.956-0400 m31100| 2015-07-09T14:17:07.956-0400 I COMMAND [conn177] CMD: drop db69.tmp.mr.coll69_432 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.970-0400 m31100| 2015-07-09T14:17:07.970-0400 I COMMAND [conn177] CMD: drop db69.tmp.mr.coll69_432 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.976-0400 m31100| 2015-07-09T14:17:07.976-0400 I COMMAND [conn177] command db69.tmp.mrs.coll69_1436465827_142 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.977-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.977-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.977-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.979-0400 m31100| values...., out: "tmp.mrs.coll69_1436465827_142", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:213 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { r: 5, w: 3, W: 1 }, timeAcquiringMicros: { r: 28599, w: 1998, W: 499 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { w: 31, R: 13, W: 8 }, timeAcquiringMicros: { w: 419322, R: 45855, W: 30883 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 745ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.980-0400 m31100| 2015-07-09T14:17:07.977-0400 I COMMAND [conn177] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll69_441 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:07.995-0400 m31100| 2015-07-09T14:17:07.994-0400 I COMMAND [conn179] CMD: drop db69.tmp.mrs.coll69_1436465827_119 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.003-0400 m31100| 2015-07-09T14:17:08.002-0400 I COMMAND [conn179] CMD: drop db69.tmp.mr.coll69_436 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.003-0400 m31100| 2015-07-09T14:17:08.002-0400 I COMMAND [conn179] CMD: drop db69.tmp.mr.coll69_436 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.003-0400 m31100| 2015-07-09T14:17:08.002-0400 I COMMAND [conn185] CMD: drop db69.tmp.mrs.coll69_1436465827_143 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.016-0400 m31100| 2015-07-09T14:17:08.015-0400 I COMMAND [conn185] CMD: drop db69.tmp.mr.coll69_437 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.016-0400 m31100| 2015-07-09T14:17:08.015-0400 I COMMAND [conn185] CMD: drop db69.tmp.mr.coll69_437 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.018-0400 m31100| 2015-07-09T14:17:08.017-0400 I COMMAND [conn185] CMD: drop db69.tmp.mr.coll69_437 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.022-0400 m31100| 2015-07-09T14:17:08.020-0400 I COMMAND [conn179] CMD: drop db69.tmp.mr.coll69_436 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.022-0400 m31100| 2015-07-09T14:17:08.021-0400 I COMMAND [conn185] command db69.tmp.mrs.coll69_1436465827_143 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.023-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.023-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.023-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.023-0400 m31100| values...., out: "tmp.mrs.coll69_1436465827_143", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:6 reslen:213 locks:{ Global: { acquireCount: { r: 165, w: 74, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 7334, W: 17641 } }, Database: { acquireCount: { r: 26, w: 66, R: 18, W: 11 }, acquireWaitCount: { r: 5, w: 9, R: 18, W: 6 }, timeAcquiringMicros: { r: 10419, w: 100567, R: 110329, W: 19540 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 528ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.024-0400 m31100| 2015-07-09T14:17:08.024-0400 I COMMAND [conn185] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll69_442 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.025-0400 m31100| 2015-07-09T14:17:08.024-0400 I COMMAND [conn179] command db69.tmp.mrs.coll69_1436465827_119 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.025-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.025-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.025-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.027-0400 m31100| values...., out: "tmp.mrs.coll69_1436465827_119", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:4 reslen:213 locks:{ Global: { acquireCount: { r: 161, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 1, W: 1 }, timeAcquiringMicros: { r: 14612, w: 13825, W: 8749 } }, Database: { acquireCount: { r: 26, w: 66, R: 16, W: 11 }, acquireWaitCount: { r: 5, w: 15, R: 15, W: 9 }, timeAcquiringMicros: { r: 2354, w: 93095, R: 94656, W: 30324 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 540ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.027-0400 m31100| 2015-07-09T14:17:08.025-0400 I COMMAND [conn179] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll69_443 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.062-0400 m31100| 2015-07-09T14:17:08.062-0400 I COMMAND [conn191] CMD: drop db69.tmp.mrs.coll69_1436465827_120 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.070-0400 m31100| 2015-07-09T14:17:08.069-0400 I COMMAND [conn191] CMD: drop db69.tmp.mr.coll69_439 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.071-0400 m31100| 2015-07-09T14:17:08.071-0400 I COMMAND [conn191] CMD: drop db69.tmp.mr.coll69_439 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.074-0400 m31100| 2015-07-09T14:17:08.074-0400 I COMMAND [conn191] CMD: drop db69.tmp.mr.coll69_439 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.078-0400 m31100| 2015-07-09T14:17:08.077-0400 I COMMAND [conn191] command db69.tmp.mrs.coll69_1436465827_120 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.078-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.078-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.078-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.079-0400 m31100| values...., out: "tmp.mrs.coll69_1436465827_120", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:4 reslen:213 locks:{ Global: { acquireCount: { r: 161, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 2, W: 1 }, timeAcquiringMicros: { r: 24404, w: 13793, W: 33619 } }, Database: { acquireCount: { r: 26, w: 66, R: 16, W: 11 }, acquireWaitCount: { r: 5, w: 10, R: 16, W: 9 }, timeAcquiringMicros: { r: 39880, w: 52170, R: 62619, W: 65231 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 556ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.085-0400 m31100| 2015-07-09T14:17:08.084-0400 I COMMAND [conn49] CMD: drop db69.tmp.mrs.coll69_1436465827_144 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.086-0400 m31100| 2015-07-09T14:17:08.085-0400 I COMMAND [conn191] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll69_444 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.090-0400 m31100| 2015-07-09T14:17:08.090-0400 I COMMAND [conn49] CMD: drop db69.tmp.mr.coll69_440 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.090-0400 m31100| 2015-07-09T14:17:08.090-0400 I COMMAND [conn49] CMD: drop db69.tmp.mr.coll69_440 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.107-0400 m31100| 2015-07-09T14:17:08.107-0400 I COMMAND [conn49] CMD: drop db69.tmp.mr.coll69_440 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.116-0400 m31100| 2015-07-09T14:17:08.114-0400 I COMMAND [conn49] command db69.tmp.mrs.coll69_1436465827_144 command: mapReduce { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.117-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.117-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.117-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.118-0400 m31100| values...., out: "tmp.mrs.coll69_1436465827_144", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:3 reslen:213 locks:{ Global: { acquireCount: { r: 159, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 3 }, timeAcquiringMicros: { r: 7763, w: 52612 } }, Database: { acquireCount: { r: 26, w: 66, R: 15, W: 11 }, acquireWaitCount: { r: 1, w: 12, R: 14, W: 5 }, timeAcquiringMicros: { r: 2961, w: 37355, R: 47700, W: 41704 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 413ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.119-0400 m31100| 2015-07-09T14:17:08.118-0400 I COMMAND [conn49] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll69_445 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.130-0400 m31100| 2015-07-09T14:17:08.130-0400 I COMMAND [conn177] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll69_441 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.139-0400 m31100| 2015-07-09T14:17:08.138-0400 I COMMAND [conn177] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll69_441 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.139-0400 m31100| 2015-07-09T14:17:08.138-0400 I COMMAND [conn177] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll69_441 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.140-0400 m31100| 2015-07-09T14:17:08.139-0400 I COMMAND [conn177] command map_reduce_merge_nonatomic0.coll69 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.140-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.140-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.140-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.140-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.140-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.142-0400 m31100| }, out: { merge: "coll69", db: "map_reduce_merge_nonatomic0", nonAtomic: true } }, inputDB: "db69", shardedOutputCollection: "tmp.mrs.coll69_1436465827_142", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll69_1436465827_142", timeMillis: 725, counts: { input: 1015, emit: 1015, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465827000|287, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll69_1436465827_142", timeMillis: 154, counts: { input: 985, emit: 985, reduce: 79, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465827000|66, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1015, emit: 1015, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 985, emit: 985, reduce: 79, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 159 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:303 locks:{ Global: { acquireCount: { r: 76, w: 68 }, acquireWaitCount: { w: 4 }, timeAcquiringMicros: { w: 65033 } }, Database: { acquireCount: { r: 4, w: 44, W: 25 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 162ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.142-0400 m31100| 2015-07-09T14:17:08.139-0400 I COMMAND [conn32] CMD: drop db69.tmp.mrs.coll69_1436465827_142 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.145-0400 m31200| 2015-07-09T14:17:08.144-0400 I COMMAND [conn65] CMD: drop db69.tmp.mrs.coll69_1436465827_142 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.147-0400 m31100| 2015-07-09T14:17:08.146-0400 I COMMAND [conn185] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll69_442 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.149-0400 m31202| 2015-07-09T14:17:08.148-0400 I COMMAND [repl writer worker 0] CMD: drop db69.tmp.mrs.coll69_1436465827_142 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.152-0400 m30998| 2015-07-09T14:17:08.152-0400 I NETWORK [conn450] end connection 127.0.0.1:64027 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.152-0400 m31100| 2015-07-09T14:17:08.152-0400 I COMMAND [conn185] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll69_442 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.153-0400 m31100| 2015-07-09T14:17:08.153-0400 I COMMAND [conn185] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll69_442 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.154-0400 m31100| 2015-07-09T14:17:08.153-0400 I COMMAND [conn185] command map_reduce_merge_nonatomic2.coll69 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.154-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.154-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.154-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.154-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.154-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.155-0400 m31100| }, out: { merge: "coll69", db: "map_reduce_merge_nonatomic2", nonAtomic: true } }, inputDB: "db69", shardedOutputCollection: "tmp.mrs.coll69_1436465827_143", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll69_1436465827_143", timeMillis: 523, counts: { input: 1015, emit: 1015, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465828000|2, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll69_1436465827_143", timeMillis: 413, counts: { input: 985, emit: 985, reduce: 79, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465827000|129, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1015, emit: 1015, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 985, emit: 985, reduce: 79, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 159 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:303 locks:{ Global: { acquireCount: { r: 76, w: 68 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 5403 } }, Database: { acquireCount: { r: 4, w: 44, W: 25 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 129ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.155-0400 m31100| 2015-07-09T14:17:08.153-0400 I COMMAND [conn32] CMD: drop db69.tmp.mrs.coll69_1436465827_143 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.155-0400 m31100| 2015-07-09T14:17:08.153-0400 I COMMAND [conn179] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll69_443 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.156-0400 m31201| 2015-07-09T14:17:08.156-0400 I COMMAND [repl writer worker 6] CMD: drop db69.tmp.mrs.coll69_1436465827_142 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.167-0400 m31100| 2015-07-09T14:17:08.165-0400 I COMMAND [conn179] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll69_443 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.167-0400 m31100| 2015-07-09T14:17:08.166-0400 I COMMAND [conn179] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll69_443 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.168-0400 m31100| 2015-07-09T14:17:08.166-0400 I COMMAND [conn179] command map_reduce_merge_nonatomic3.coll69 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.168-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.168-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.168-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.168-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.168-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.170-0400 m31100| }, out: { merge: "coll69", db: "map_reduce_merge_nonatomic3", nonAtomic: true } }, inputDB: "db69", shardedOutputCollection: "tmp.mrs.coll69_1436465827_119", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll69_1436465827_119", timeMillis: 518, counts: { input: 1015, emit: 1015, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465828000|1, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll69_1436465827_119", timeMillis: 408, counts: { input: 985, emit: 985, reduce: 79, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465827000|125, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1015, emit: 1015, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 985, emit: 985, reduce: 79, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 159 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:303 locks:{ Global: { acquireCount: { r: 76, w: 68 }, acquireWaitCount: { w: 2 }, timeAcquiringMicros: { w: 4818 } }, Database: { acquireCount: { r: 4, w: 44, W: 25 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 140ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.170-0400 m31200| 2015-07-09T14:17:08.165-0400 I COMMAND [conn65] CMD: drop db69.tmp.mrs.coll69_1436465827_143 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.171-0400 m31100| 2015-07-09T14:17:08.166-0400 I COMMAND [conn38] CMD: drop db69.tmp.mrs.coll69_1436465827_119 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.171-0400 m31200| 2015-07-09T14:17:08.169-0400 I COMMAND [conn64] CMD: drop db69.tmp.mrs.coll69_1436465827_119 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.176-0400 m31201| 2015-07-09T14:17:08.176-0400 I COMMAND [repl writer worker 5] CMD: drop db69.tmp.mrs.coll69_1436465827_143 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.185-0400 m31202| 2015-07-09T14:17:08.177-0400 I COMMAND [repl writer worker 11] CMD: drop db69.tmp.mrs.coll69_1436465827_143 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.444-0400 m31100| 2015-07-09T14:17:08.180-0400 I COMMAND [conn191] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll69_444 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.444-0400 m30998| 2015-07-09T14:17:08.180-0400 I NETWORK [conn451] end connection 127.0.0.1:64028 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.444-0400 m31102| 2015-07-09T14:17:08.181-0400 I COMMAND [repl writer worker 3] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll69_441 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.444-0400 m31100| 2015-07-09T14:17:08.186-0400 I COMMAND [conn191] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll69_444 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.444-0400 m31100| 2015-07-09T14:17:08.186-0400 I COMMAND [conn191] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll69_444 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.444-0400 m30999| 2015-07-09T14:17:08.190-0400 I NETWORK [conn451] end connection 127.0.0.1:64029 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.445-0400 m31102| 2015-07-09T14:17:08.190-0400 I COMMAND [repl writer worker 9] CMD: drop db69.tmp.mrs.coll69_1436465827_142 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.445-0400 m31100| 2015-07-09T14:17:08.191-0400 I COMMAND [conn49] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll69_445 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.445-0400 m31100| 2015-07-09T14:17:08.192-0400 I COMMAND [conn49] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll69_445 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.445-0400 m31100| 2015-07-09T14:17:08.192-0400 I COMMAND [conn49] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll69_445 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.445-0400 m31102| 2015-07-09T14:17:08.198-0400 I COMMAND [repl writer worker 5] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll69_442 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.446-0400 m31100| 2015-07-09T14:17:08.206-0400 I COMMAND [conn191] command map_reduce_merge_nonatomic1.coll69 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.446-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.446-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.446-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.446-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.446-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.447-0400 m31100| }, out: { merge: "coll69", db: "map_reduce_merge_nonatomic1", nonAtomic: true } }, inputDB: "db69", shardedOutputCollection: "tmp.mrs.coll69_1436465827_120", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll69_1436465827_120", timeMillis: 549, counts: { input: 1015, emit: 1015, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465828000|16, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll69_1436465827_120", timeMillis: 395, counts: { input: 985, emit: 985, reduce: 79, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465827000|137, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1015, emit: 1015, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 985, emit: 985, reduce: 79, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 159 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:303 locks:{ Global: { acquireCount: { r: 76, w: 68 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 4935 } }, Database: { acquireCount: { r: 4, w: 44, W: 25 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 125ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.448-0400 m31100| 2015-07-09T14:17:08.206-0400 I COMMAND [conn38] CMD: drop db69.tmp.mrs.coll69_1436465827_120 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.448-0400 m31102| 2015-07-09T14:17:08.208-0400 I COMMAND [repl writer worker 2] CMD: drop db69.tmp.mrs.coll69_1436465827_143 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.448-0400 m31200| 2015-07-09T14:17:08.210-0400 I COMMAND [conn64] CMD: drop db69.tmp.mrs.coll69_1436465827_120 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.448-0400 m31102| 2015-07-09T14:17:08.212-0400 I COMMAND [repl writer worker 7] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll69_443 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.448-0400 m31202| 2015-07-09T14:17:08.213-0400 I COMMAND [repl writer worker 4] CMD: drop db69.tmp.mrs.coll69_1436465827_119 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.448-0400 m31201| 2015-07-09T14:17:08.213-0400 I COMMAND [repl writer worker 1] CMD: drop db69.tmp.mrs.coll69_1436465827_119 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.448-0400 m31101| 2015-07-09T14:17:08.221-0400 I COMMAND [repl writer worker 8] CMD: drop map_reduce_merge_nonatomic0.tmp.mr.coll69_441 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.449-0400 m31102| 2015-07-09T14:17:08.222-0400 I COMMAND [repl writer worker 0] CMD: drop db69.tmp.mrs.coll69_1436465827_119 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.449-0400 m30999| 2015-07-09T14:17:08.224-0400 I NETWORK [conn450] end connection 127.0.0.1:64025 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.449-0400 m31100| 2015-07-09T14:17:08.229-0400 I COMMAND [conn49] command map_reduce_merge_nonatomic4.coll69 command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll69", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.449-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.449-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.449-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.449-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.449-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.450-0400 m31100| }, out: { merge: "coll69", db: "map_reduce_merge_nonatomic4", nonAtomic: true } }, inputDB: "db69", shardedOutputCollection: "tmp.mrs.coll69_1436465827_144", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll69_1436465827_144", timeMillis: 390, counts: { input: 1015, emit: 1015, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465828000|43, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll69_1436465827_144", timeMillis: 233, counts: { input: 985, emit: 985, reduce: 79, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465827000|158, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 1015, emit: 1015, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 985, emit: 985, reduce: 79, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 159 } } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:303 locks:{ Global: { acquireCount: { r: 76, w: 68 } }, Database: { acquireCount: { r: 4, w: 44, W: 25 } }, Collection: { acquireCount: { r: 4, w: 23 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_command 110ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.450-0400 m31102| 2015-07-09T14:17:08.230-0400 I COMMAND [repl writer worker 7] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll69_444 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.451-0400 m31100| 2015-07-09T14:17:08.230-0400 I COMMAND [conn32] CMD: drop db69.tmp.mrs.coll69_1436465827_144 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.451-0400 m31200| 2015-07-09T14:17:08.231-0400 I COMMAND [conn65] CMD: drop db69.tmp.mrs.coll69_1436465827_144 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.451-0400 m31101| 2015-07-09T14:17:08.232-0400 I COMMAND [repl writer worker 14] CMD: drop db69.tmp.mrs.coll69_1436465827_142 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.451-0400 m31201| 2015-07-09T14:17:08.232-0400 I COMMAND [repl writer worker 12] CMD: drop db69.tmp.mrs.coll69_1436465827_120 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.451-0400 m31202| 2015-07-09T14:17:08.232-0400 I COMMAND [repl writer worker 12] CMD: drop db69.tmp.mrs.coll69_1436465827_120 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.451-0400 m31101| 2015-07-09T14:17:08.237-0400 I COMMAND [repl writer worker 0] CMD: drop map_reduce_merge_nonatomic2.tmp.mr.coll69_442 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.451-0400 m31101| 2015-07-09T14:17:08.243-0400 I COMMAND [repl writer worker 3] CMD: drop db69.tmp.mrs.coll69_1436465827_143 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.451-0400 m30998| 2015-07-09T14:17:08.247-0400 I NETWORK [conn449] end connection 127.0.0.1:64026 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.452-0400 m31102| 2015-07-09T14:17:08.248-0400 I COMMAND [repl writer worker 12] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll69_445 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.452-0400 m31101| 2015-07-09T14:17:08.251-0400 I COMMAND [repl writer worker 11] CMD: drop map_reduce_merge_nonatomic3.tmp.mr.coll69_443 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.452-0400 m31102| 2015-07-09T14:17:08.251-0400 I COMMAND [repl writer worker 8] CMD: drop db69.tmp.mrs.coll69_1436465827_120 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.452-0400 m31102| 2015-07-09T14:17:08.255-0400 I COMMAND [repl writer worker 6] CMD: drop db69.tmp.mrs.coll69_1436465827_144 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.452-0400 m31101| 2015-07-09T14:17:08.257-0400 I COMMAND [repl writer worker 8] CMD: drop db69.tmp.mrs.coll69_1436465827_119 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.452-0400 m31101| 2015-07-09T14:17:08.261-0400 I COMMAND [repl writer worker 11] CMD: drop map_reduce_merge_nonatomic1.tmp.mr.coll69_444 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.452-0400 m31101| 2015-07-09T14:17:08.265-0400 I COMMAND [repl writer worker 2] CMD: drop map_reduce_merge_nonatomic4.tmp.mr.coll69_445 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.453-0400 m31101| 2015-07-09T14:17:08.267-0400 I COMMAND [repl writer worker 1] CMD: drop db69.tmp.mrs.coll69_1436465827_120 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.453-0400 m31101| 2015-07-09T14:17:08.268-0400 I COMMAND [repl writer worker 13] CMD: drop db69.tmp.mrs.coll69_1436465827_144 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.453-0400 m30999| 2015-07-09T14:17:08.283-0400 I COMMAND [conn1] DROP DATABASE: map_reduce_merge_nonatomic0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.453-0400 m30999| 2015-07-09T14:17:08.283-0400 I SHARDING [conn1] DBConfig::dropDatabase: map_reduce_merge_nonatomic0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.453-0400 m30999| 2015-07-09T14:17:08.283-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:08.283-0400-559ebaa4ca4787b9985d1ed2", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465828283), what: "dropDatabase.start", ns: "map_reduce_merge_nonatomic0", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.454-0400 m30999| 2015-07-09T14:17:08.389-0400 I SHARDING [conn1] DBConfig::dropDatabase: map_reduce_merge_nonatomic0 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.454-0400 m31100| 2015-07-09T14:17:08.390-0400 I COMMAND [conn160] dropDatabase map_reduce_merge_nonatomic0 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.454-0400 m31100| 2015-07-09T14:17:08.390-0400 I COMMAND [conn160] dropDatabase map_reduce_merge_nonatomic0 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.454-0400 m30999| 2015-07-09T14:17:08.391-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:08.391-0400-559ebaa4ca4787b9985d1ed3", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465828391), what: "dropDatabase", ns: "map_reduce_merge_nonatomic0", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.454-0400 m31101| 2015-07-09T14:17:08.392-0400 I COMMAND [repl writer worker 0] dropDatabase map_reduce_merge_nonatomic0 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.454-0400 m31102| 2015-07-09T14:17:08.392-0400 I COMMAND [repl writer worker 3] dropDatabase map_reduce_merge_nonatomic0 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.454-0400 m31102| 2015-07-09T14:17:08.393-0400 I COMMAND [repl writer worker 3] dropDatabase map_reduce_merge_nonatomic0 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.455-0400 m31101| 2015-07-09T14:17:08.393-0400 I COMMAND [repl writer worker 0] dropDatabase map_reduce_merge_nonatomic0 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.455-0400 m30999| 2015-07-09T14:17:08.448-0400 I COMMAND [conn1] DROP DATABASE: map_reduce_merge_nonatomic1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.455-0400 m30999| 2015-07-09T14:17:08.448-0400 I SHARDING [conn1] DBConfig::dropDatabase: map_reduce_merge_nonatomic1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.455-0400 m30999| 2015-07-09T14:17:08.448-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:08.448-0400-559ebaa4ca4787b9985d1ed4", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465828448), what: "dropDatabase.start", ns: "map_reduce_merge_nonatomic1", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.556-0400 m30999| 2015-07-09T14:17:08.555-0400 I SHARDING [conn1] DBConfig::dropDatabase: map_reduce_merge_nonatomic1 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.556-0400 m31100| 2015-07-09T14:17:08.556-0400 I COMMAND [conn160] dropDatabase map_reduce_merge_nonatomic1 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.558-0400 m31100| 2015-07-09T14:17:08.557-0400 I COMMAND [conn160] dropDatabase map_reduce_merge_nonatomic1 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.558-0400 m30999| 2015-07-09T14:17:08.558-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:08.558-0400-559ebaa4ca4787b9985d1ed5", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465828558), what: "dropDatabase", ns: "map_reduce_merge_nonatomic1", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.559-0400 m31101| 2015-07-09T14:17:08.558-0400 I COMMAND [repl writer worker 8] dropDatabase map_reduce_merge_nonatomic1 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.559-0400 m31102| 2015-07-09T14:17:08.558-0400 I COMMAND [repl writer worker 0] dropDatabase map_reduce_merge_nonatomic1 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.559-0400 m31102| 2015-07-09T14:17:08.559-0400 I COMMAND [repl writer worker 0] dropDatabase map_reduce_merge_nonatomic1 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.559-0400 m31101| 2015-07-09T14:17:08.559-0400 I COMMAND [repl writer worker 8] dropDatabase map_reduce_merge_nonatomic1 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.615-0400 m30999| 2015-07-09T14:17:08.614-0400 I COMMAND [conn1] DROP DATABASE: map_reduce_merge_nonatomic2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.615-0400 m30999| 2015-07-09T14:17:08.614-0400 I SHARDING [conn1] DBConfig::dropDatabase: map_reduce_merge_nonatomic2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.615-0400 m30999| 2015-07-09T14:17:08.615-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:08.614-0400-559ebaa4ca4787b9985d1ed6", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465828614), what: "dropDatabase.start", ns: "map_reduce_merge_nonatomic2", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.723-0400 m30999| 2015-07-09T14:17:08.722-0400 I SHARDING [conn1] DBConfig::dropDatabase: map_reduce_merge_nonatomic2 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.723-0400 m31100| 2015-07-09T14:17:08.723-0400 I COMMAND [conn160] dropDatabase map_reduce_merge_nonatomic2 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.725-0400 m31100| 2015-07-09T14:17:08.725-0400 I COMMAND [conn160] dropDatabase map_reduce_merge_nonatomic2 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.726-0400 m30999| 2015-07-09T14:17:08.725-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:08.725-0400-559ebaa4ca4787b9985d1ed7", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465828725), what: "dropDatabase", ns: "map_reduce_merge_nonatomic2", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.726-0400 m31101| 2015-07-09T14:17:08.726-0400 I COMMAND [repl writer worker 6] dropDatabase map_reduce_merge_nonatomic2 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.726-0400 m31102| 2015-07-09T14:17:08.726-0400 I COMMAND [repl writer worker 9] dropDatabase map_reduce_merge_nonatomic2 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.727-0400 m31101| 2015-07-09T14:17:08.727-0400 I COMMAND [repl writer worker 6] dropDatabase map_reduce_merge_nonatomic2 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.728-0400 m31102| 2015-07-09T14:17:08.727-0400 I COMMAND [repl writer worker 9] dropDatabase map_reduce_merge_nonatomic2 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.782-0400 m30999| 2015-07-09T14:17:08.781-0400 I COMMAND [conn1] DROP DATABASE: map_reduce_merge_nonatomic3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.782-0400 m30999| 2015-07-09T14:17:08.781-0400 I SHARDING [conn1] DBConfig::dropDatabase: map_reduce_merge_nonatomic3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.782-0400 m30999| 2015-07-09T14:17:08.781-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:08.781-0400-559ebaa4ca4787b9985d1ed8", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465828781), what: "dropDatabase.start", ns: "map_reduce_merge_nonatomic3", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.889-0400 m30999| 2015-07-09T14:17:08.888-0400 I SHARDING [conn1] DBConfig::dropDatabase: map_reduce_merge_nonatomic3 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.890-0400 m31100| 2015-07-09T14:17:08.889-0400 I COMMAND [conn160] dropDatabase map_reduce_merge_nonatomic3 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.891-0400 m31100| 2015-07-09T14:17:08.890-0400 I COMMAND [conn160] dropDatabase map_reduce_merge_nonatomic3 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.891-0400 m30999| 2015-07-09T14:17:08.891-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:08.891-0400-559ebaa4ca4787b9985d1ed9", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465828891), what: "dropDatabase", ns: "map_reduce_merge_nonatomic3", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.892-0400 m31102| 2015-07-09T14:17:08.891-0400 I COMMAND [repl writer worker 2] dropDatabase map_reduce_merge_nonatomic3 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.892-0400 m31101| 2015-07-09T14:17:08.892-0400 I COMMAND [repl writer worker 12] dropDatabase map_reduce_merge_nonatomic3 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.893-0400 m31102| 2015-07-09T14:17:08.893-0400 I COMMAND [repl writer worker 2] dropDatabase map_reduce_merge_nonatomic3 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.893-0400 m31101| 2015-07-09T14:17:08.893-0400 I COMMAND [repl writer worker 12] dropDatabase map_reduce_merge_nonatomic3 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.955-0400 m30999| 2015-07-09T14:17:08.954-0400 I COMMAND [conn1] DROP DATABASE: map_reduce_merge_nonatomic4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.955-0400 m30999| 2015-07-09T14:17:08.954-0400 I SHARDING [conn1] DBConfig::dropDatabase: map_reduce_merge_nonatomic4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:08.955-0400 m30999| 2015-07-09T14:17:08.954-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:08.954-0400-559ebaa4ca4787b9985d1eda", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465828954), what: "dropDatabase.start", ns: "map_reduce_merge_nonatomic4", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.066-0400 m30999| 2015-07-09T14:17:09.066-0400 I SHARDING [conn1] DBConfig::dropDatabase: map_reduce_merge_nonatomic4 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.067-0400 m31100| 2015-07-09T14:17:09.066-0400 I COMMAND [conn160] dropDatabase map_reduce_merge_nonatomic4 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.069-0400 m31100| 2015-07-09T14:17:09.069-0400 I COMMAND [conn160] dropDatabase map_reduce_merge_nonatomic4 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.070-0400 m30999| 2015-07-09T14:17:09.069-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:09.069-0400-559ebaa5ca4787b9985d1edb", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465829069), what: "dropDatabase", ns: "map_reduce_merge_nonatomic4", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.070-0400 m31101| 2015-07-09T14:17:09.070-0400 I COMMAND [repl writer worker 7] dropDatabase map_reduce_merge_nonatomic4 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.070-0400 m31102| 2015-07-09T14:17:09.070-0400 I COMMAND [repl writer worker 4] dropDatabase map_reduce_merge_nonatomic4 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.070-0400 m31101| 2015-07-09T14:17:09.070-0400 I COMMAND [repl writer worker 7] dropDatabase map_reduce_merge_nonatomic4 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.071-0400 m31102| 2015-07-09T14:17:09.070-0400 I COMMAND [repl writer worker 4] dropDatabase map_reduce_merge_nonatomic4 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.123-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.123-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.123-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.123-0400 jstests/concurrency/fsm_workloads/map_reduce_merge_nonatomic.js: Workload completed in 9211 ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.123-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.124-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.124-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.124-0400 m30999| 2015-07-09T14:17:09.123-0400 I COMMAND [conn1] DROP: db69.coll69 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.124-0400 m30999| 2015-07-09T14:17:09.124-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:09.124-0400-559ebaa5ca4787b9985d1edc", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465829124), what: "dropCollection.start", ns: "db69.coll69", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.182-0400 m30999| 2015-07-09T14:17:09.182-0400 I SHARDING [conn1] distributed lock 'db69.coll69/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559ebaa5ca4787b9985d1edd [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.184-0400 m31100| 2015-07-09T14:17:09.184-0400 I COMMAND [conn38] CMD: drop db69.coll69 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.187-0400 m31200| 2015-07-09T14:17:09.187-0400 I COMMAND [conn64] CMD: drop db69.coll69 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.189-0400 m31201| 2015-07-09T14:17:09.188-0400 I COMMAND [repl writer worker 0] CMD: drop db69.tmp.mrs.coll69_1436465827_144 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.189-0400 m31102| 2015-07-09T14:17:09.188-0400 I COMMAND [repl writer worker 15] CMD: drop db69.coll69 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.189-0400 m31101| 2015-07-09T14:17:09.188-0400 I COMMAND [repl writer worker 15] CMD: drop db69.coll69 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.190-0400 m31202| 2015-07-09T14:17:09.188-0400 I COMMAND [repl writer worker 15] CMD: drop db69.tmp.mrs.coll69_1436465827_144 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.191-0400 m31201| 2015-07-09T14:17:09.191-0400 I COMMAND [repl writer worker 11] CMD: drop db69.coll69 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.192-0400 m31202| 2015-07-09T14:17:09.192-0400 I COMMAND [repl writer worker 3] CMD: drop db69.coll69 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.243-0400 m31100| 2015-07-09T14:17:09.243-0400 I SHARDING [conn38] remotely refreshing metadata for db69.coll69 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559eba99ca4787b9985d1ece, current metadata version is 2|3||559eba99ca4787b9985d1ece [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.245-0400 m31100| 2015-07-09T14:17:09.245-0400 W SHARDING [conn38] no chunks found when reloading db69.coll69, previous version was 0|0||559eba99ca4787b9985d1ece, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.245-0400 m31100| 2015-07-09T14:17:09.245-0400 I SHARDING [conn38] dropping metadata for db69.coll69 at shard version 2|3||559eba99ca4787b9985d1ece, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.247-0400 m31200| 2015-07-09T14:17:09.246-0400 I SHARDING [conn64] remotely refreshing metadata for db69.coll69 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559eba99ca4787b9985d1ece, current metadata version is 2|5||559eba99ca4787b9985d1ece [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.248-0400 m31200| 2015-07-09T14:17:09.248-0400 W SHARDING [conn64] no chunks found when reloading db69.coll69, previous version was 0|0||559eba99ca4787b9985d1ece, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.249-0400 m31200| 2015-07-09T14:17:09.248-0400 I SHARDING [conn64] dropping metadata for db69.coll69 at shard version 2|5||559eba99ca4787b9985d1ece, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.250-0400 m30999| 2015-07-09T14:17:09.249-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:09.249-0400-559ebaa5ca4787b9985d1ede", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465829249), what: "dropCollection", ns: "db69.coll69", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.305-0400 m30999| 2015-07-09T14:17:09.305-0400 I SHARDING [conn1] distributed lock 'db69.coll69/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.362-0400 m30999| 2015-07-09T14:17:09.362-0400 I COMMAND [conn1] DROP DATABASE: db69 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.363-0400 m30999| 2015-07-09T14:17:09.362-0400 I SHARDING [conn1] DBConfig::dropDatabase: db69 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.363-0400 m30999| 2015-07-09T14:17:09.362-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:09.362-0400-559ebaa5ca4787b9985d1edf", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465829362), what: "dropDatabase.start", ns: "db69", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.469-0400 m30999| 2015-07-09T14:17:09.469-0400 I SHARDING [conn1] DBConfig::dropDatabase: db69 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.469-0400 m31100| 2015-07-09T14:17:09.469-0400 I COMMAND [conn160] dropDatabase db69 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.470-0400 m31100| 2015-07-09T14:17:09.469-0400 I COMMAND [conn160] dropDatabase db69 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.470-0400 m30999| 2015-07-09T14:17:09.470-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:09.470-0400-559ebaa5ca4787b9985d1ee0", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465829470), what: "dropDatabase", ns: "db69", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.471-0400 m31102| 2015-07-09T14:17:09.471-0400 I COMMAND [repl writer worker 11] dropDatabase db69 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.471-0400 m31101| 2015-07-09T14:17:09.471-0400 I COMMAND [repl writer worker 3] dropDatabase db69 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.471-0400 m31101| 2015-07-09T14:17:09.471-0400 I COMMAND [repl writer worker 3] dropDatabase db69 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.471-0400 m31102| 2015-07-09T14:17:09.471-0400 I COMMAND [repl writer worker 11] dropDatabase db69 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.558-0400 m31100| 2015-07-09T14:17:09.558-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.562-0400 m31101| 2015-07-09T14:17:09.561-0400 I COMMAND [repl writer worker 11] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.562-0400 m31102| 2015-07-09T14:17:09.561-0400 I COMMAND [repl writer worker 7] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.600-0400 m31200| 2015-07-09T14:17:09.599-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.602-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.603-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.603-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.603-0400 jstests/concurrency/fsm_workloads/update_multifield.js [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.603-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.603-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.603-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.603-0400 m31202| 2015-07-09T14:17:09.603-0400 I COMMAND [repl writer worker 2] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.603-0400 m31201| 2015-07-09T14:17:09.603-0400 I COMMAND [repl writer worker 3] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.617-0400 m30999| 2015-07-09T14:17:09.616-0400 I SHARDING [conn1] distributed lock 'db70/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559ebaa5ca4787b9985d1ee1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.621-0400 m30999| 2015-07-09T14:17:09.621-0400 I SHARDING [conn1] Placing [db70] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.621-0400 m30999| 2015-07-09T14:17:09.621-0400 I SHARDING [conn1] Enabling sharding for database [db70] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.677-0400 m30999| 2015-07-09T14:17:09.677-0400 I SHARDING [conn1] distributed lock 'db70/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.702-0400 m31100| 2015-07-09T14:17:09.701-0400 I INDEX [conn68] build index on: db70.coll70 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db70.coll70" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.702-0400 m31100| 2015-07-09T14:17:09.701-0400 I INDEX [conn68] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.711-0400 m31100| 2015-07-09T14:17:09.710-0400 I INDEX [conn68] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.712-0400 m30999| 2015-07-09T14:17:09.712-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db70.coll70", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.716-0400 m30999| 2015-07-09T14:17:09.716-0400 I SHARDING [conn1] distributed lock 'db70.coll70/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559ebaa5ca4787b9985d1ee2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.718-0400 m30999| 2015-07-09T14:17:09.717-0400 I SHARDING [conn1] enable sharding on: db70.coll70 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.718-0400 m30999| 2015-07-09T14:17:09.718-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:09.717-0400-559ebaa5ca4787b9985d1ee3", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465829717), what: "shardCollection.start", ns: "db70.coll70", details: { shardKey: { _id: "hashed" }, collection: "db70.coll70", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.727-0400 m31102| 2015-07-09T14:17:09.727-0400 I INDEX [repl writer worker 5] build index on: db70.coll70 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db70.coll70" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.728-0400 m31102| 2015-07-09T14:17:09.727-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.728-0400 m31101| 2015-07-09T14:17:09.727-0400 I INDEX [repl writer worker 10] build index on: db70.coll70 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db70.coll70" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.728-0400 m31101| 2015-07-09T14:17:09.728-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.732-0400 m31102| 2015-07-09T14:17:09.731-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.736-0400 m31101| 2015-07-09T14:17:09.736-0400 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.772-0400 m30999| 2015-07-09T14:17:09.771-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db70.coll70 using new epoch 559ebaa5ca4787b9985d1ee4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.881-0400 m30999| 2015-07-09T14:17:09.880-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db70.coll70: 0ms sequenceNumber: 302 version: 1|1||559ebaa5ca4787b9985d1ee4 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.937-0400 m30999| 2015-07-09T14:17:09.936-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db70.coll70: 0ms sequenceNumber: 303 version: 1|1||559ebaa5ca4787b9985d1ee4 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.939-0400 m31100| 2015-07-09T14:17:09.939-0400 I SHARDING [conn191] remotely refreshing metadata for db70.coll70 with requested shard version 1|1||559ebaa5ca4787b9985d1ee4, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.941-0400 m31100| 2015-07-09T14:17:09.940-0400 I SHARDING [conn191] collection db70.coll70 was previously unsharded, new metadata loaded with shard version 1|1||559ebaa5ca4787b9985d1ee4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.941-0400 m31100| 2015-07-09T14:17:09.940-0400 I SHARDING [conn191] collection version was loaded at version 1|1||559ebaa5ca4787b9985d1ee4, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.941-0400 m30999| 2015-07-09T14:17:09.941-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:09.941-0400-559ebaa5ca4787b9985d1ee5", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465829941), what: "shardCollection", ns: "db70.coll70", details: { version: "1|1||559ebaa5ca4787b9985d1ee4" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.995-0400 m30999| 2015-07-09T14:17:09.995-0400 I SHARDING [conn1] distributed lock 'db70.coll70/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.996-0400 m30999| 2015-07-09T14:17:09.996-0400 I SHARDING [conn1] moving chunk ns: db70.coll70 moving ( ns: db70.coll70, shard: test-rs0, lastmod: 1|1||000000000000000000000000, min: { _id: 0 }, max: { _id: MaxKey }) test-rs0 -> test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.997-0400 m31100| 2015-07-09T14:17:09.996-0400 I SHARDING [conn38] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:09.998-0400 m31100| 2015-07-09T14:17:09.997-0400 I SHARDING [conn38] received moveChunk request: { moveChunk: "db70.coll70", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559ebaa5ca4787b9985d1ee4') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.003-0400 m31100| 2015-07-09T14:17:10.003-0400 I SHARDING [conn38] distributed lock 'db70.coll70/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559ebaa5792e00bb67274ab9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.003-0400 m31100| 2015-07-09T14:17:10.003-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:10.003-0400-559ebaa6792e00bb67274aba", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465830003), what: "moveChunk.start", ns: "db70.coll70", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.057-0400 m31100| 2015-07-09T14:17:10.056-0400 I SHARDING [conn38] remotely refreshing metadata for db70.coll70 based on current shard version 1|1||559ebaa5ca4787b9985d1ee4, current metadata version is 1|1||559ebaa5ca4787b9985d1ee4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.059-0400 m31100| 2015-07-09T14:17:10.058-0400 I SHARDING [conn38] metadata of collection db70.coll70 already up to date (shard version : 1|1||559ebaa5ca4787b9985d1ee4, took 2ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.059-0400 m31100| 2015-07-09T14:17:10.058-0400 I SHARDING [conn38] moveChunk request accepted at version 1|1||559ebaa5ca4787b9985d1ee4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.059-0400 m31100| 2015-07-09T14:17:10.059-0400 I SHARDING [conn38] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.060-0400 m31200| 2015-07-09T14:17:10.059-0400 I SHARDING [conn16] remotely refreshing metadata for db70.coll70, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.062-0400 m31200| 2015-07-09T14:17:10.061-0400 I SHARDING [conn16] collection db70.coll70 was previously unsharded, new metadata loaded with shard version 0|0||559ebaa5ca4787b9985d1ee4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.062-0400 m31200| 2015-07-09T14:17:10.061-0400 I SHARDING [conn16] collection version was loaded at version 1|1||559ebaa5ca4787b9985d1ee4, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.062-0400 m31200| 2015-07-09T14:17:10.062-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: 0 } -> { _id: MaxKey } for collection db70.coll70 from test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 at epoch 559ebaa5ca4787b9985d1ee4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.065-0400 m31100| 2015-07-09T14:17:10.064-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db70.coll70", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.069-0400 m31100| 2015-07-09T14:17:10.068-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db70.coll70", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.074-0400 m31100| 2015-07-09T14:17:10.073-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db70.coll70", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.078-0400 m31200| 2015-07-09T14:17:10.077-0400 I INDEX [migrateThread] build index on: db70.coll70 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db70.coll70" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.078-0400 m31200| 2015-07-09T14:17:10.078-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.084-0400 m31100| 2015-07-09T14:17:10.083-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db70.coll70", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.089-0400 m31200| 2015-07-09T14:17:10.089-0400 I INDEX [migrateThread] build index on: db70.coll70 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db70.coll70" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.089-0400 m31200| 2015-07-09T14:17:10.089-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.099-0400 m31200| 2015-07-09T14:17:10.098-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.100-0400 m31200| 2015-07-09T14:17:10.099-0400 I SHARDING [migrateThread] Deleter starting delete for: db70.coll70 from { _id: 0 } -> { _id: MaxKey }, with opId: 97590 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.101-0400 m31200| 2015-07-09T14:17:10.100-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db70.coll70 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.102-0400 m31100| 2015-07-09T14:17:10.101-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db70.coll70", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.112-0400 m31202| 2015-07-09T14:17:10.111-0400 I INDEX [repl writer worker 8] build index on: db70.coll70 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db70.coll70" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.112-0400 m31201| 2015-07-09T14:17:10.111-0400 I INDEX [repl writer worker 13] build index on: db70.coll70 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db70.coll70" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.112-0400 m31202| 2015-07-09T14:17:10.111-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.112-0400 m31201| 2015-07-09T14:17:10.111-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.116-0400 m31201| 2015-07-09T14:17:10.116-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.118-0400 m31202| 2015-07-09T14:17:10.117-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.119-0400 m31200| 2015-07-09T14:17:10.118-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.119-0400 m31200| 2015-07-09T14:17:10.118-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db70.coll70' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.136-0400 m31100| 2015-07-09T14:17:10.136-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db70.coll70", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.136-0400 m31100| 2015-07-09T14:17:10.136-0400 I SHARDING [conn38] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.137-0400 m31100| 2015-07-09T14:17:10.137-0400 I SHARDING [conn38] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.137-0400 m31100| 2015-07-09T14:17:10.137-0400 I SHARDING [conn38] moveChunk setting version to: 2|0||559ebaa5ca4787b9985d1ee4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.142-0400 m31200| 2015-07-09T14:17:10.142-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db70.coll70' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.143-0400 m31200| 2015-07-09T14:17:10.142-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:10.142-0400-559ebaa6d5a107a5b9c0db70", server: "bs-osx108-8", clientAddr: "", time: new Date(1436465830142), what: "moveChunk.to", ns: "db70.coll70", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 5: 37, step 2 of 5: 18, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 24, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.197-0400 m31100| 2015-07-09T14:17:10.197-0400 I SHARDING [conn38] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db70.coll70", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.198-0400 m31100| 2015-07-09T14:17:10.197-0400 I SHARDING [conn38] moveChunk updating self version to: 2|1||559ebaa5ca4787b9985d1ee4 through { _id: MinKey } -> { _id: 0 } for collection 'db70.coll70' [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.199-0400 m31100| 2015-07-09T14:17:10.198-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:10.198-0400-559ebaa6792e00bb67274abb", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465830198), what: "moveChunk.commit", ns: "db70.coll70", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.252-0400 m31100| 2015-07-09T14:17:10.251-0400 I SHARDING [conn38] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.252-0400 m31100| 2015-07-09T14:17:10.251-0400 I SHARDING [conn38] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.252-0400 m31100| 2015-07-09T14:17:10.252-0400 I SHARDING [conn38] Deleter starting delete for: db70.coll70 from { _id: 0 } -> { _id: MaxKey }, with opId: 232699 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.252-0400 m31100| 2015-07-09T14:17:10.252-0400 I SHARDING [conn38] rangeDeleter deleted 0 documents for db70.coll70 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.252-0400 m31100| 2015-07-09T14:17:10.252-0400 I SHARDING [conn38] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.253-0400 m31100| 2015-07-09T14:17:10.253-0400 I SHARDING [conn38] distributed lock 'db70.coll70/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.254-0400 m31100| 2015-07-09T14:17:10.253-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:10.253-0400-559ebaa6792e00bb67274abc", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465830253), what: "moveChunk.from", ns: "db70.coll70", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 6: 0, step 2 of 6: 61, step 3 of 6: 3, step 4 of 6: 73, step 5 of 6: 115, step 6 of 6: 0, to: "test-rs1", from: "test-rs0", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.307-0400 m31100| 2015-07-09T14:17:10.306-0400 I COMMAND [conn38] command db70.coll70 command: moveChunk { moveChunk: "db70.coll70", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559ebaa5ca4787b9985d1ee4') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 310ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.309-0400 m30999| 2015-07-09T14:17:10.309-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db70.coll70: 0ms sequenceNumber: 304 version: 2|1||559ebaa5ca4787b9985d1ee4 based on: 1|1||559ebaa5ca4787b9985d1ee4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.311-0400 m31100| 2015-07-09T14:17:10.310-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db70.coll70", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa5ca4787b9985d1ee4') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.316-0400 m31100| 2015-07-09T14:17:10.315-0400 I SHARDING [conn38] distributed lock 'db70.coll70/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559ebaa6792e00bb67274abd [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.316-0400 m31100| 2015-07-09T14:17:10.315-0400 I SHARDING [conn38] remotely refreshing metadata for db70.coll70 based on current shard version 2|0||559ebaa5ca4787b9985d1ee4, current metadata version is 2|0||559ebaa5ca4787b9985d1ee4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.317-0400 m31100| 2015-07-09T14:17:10.316-0400 I SHARDING [conn38] updating metadata for db70.coll70 from shard version 2|0||559ebaa5ca4787b9985d1ee4 to shard version 2|1||559ebaa5ca4787b9985d1ee4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.317-0400 m31100| 2015-07-09T14:17:10.316-0400 I SHARDING [conn38] collection version was loaded at version 2|1||559ebaa5ca4787b9985d1ee4, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.317-0400 m31100| 2015-07-09T14:17:10.316-0400 I SHARDING [conn38] splitChunk accepted at version 2|1||559ebaa5ca4787b9985d1ee4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.318-0400 m31100| 2015-07-09T14:17:10.317-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:10.317-0400-559ebaa6792e00bb67274abe", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465830317), what: "split", ns: "db70.coll70", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559ebaa5ca4787b9985d1ee4') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559ebaa5ca4787b9985d1ee4') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.372-0400 m31100| 2015-07-09T14:17:10.371-0400 I SHARDING [conn38] distributed lock 'db70.coll70/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.374-0400 m30999| 2015-07-09T14:17:10.374-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db70.coll70: 0ms sequenceNumber: 305 version: 2|3||559ebaa5ca4787b9985d1ee4 based on: 2|1||559ebaa5ca4787b9985d1ee4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.375-0400 m31200| 2015-07-09T14:17:10.374-0400 I SHARDING [conn64] received splitChunk request: { splitChunk: "db70.coll70", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa5ca4787b9985d1ee4') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.379-0400 m31200| 2015-07-09T14:17:10.379-0400 I SHARDING [conn64] distributed lock 'db70.coll70/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559ebaa6d5a107a5b9c0db71 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.379-0400 m31200| 2015-07-09T14:17:10.379-0400 I SHARDING [conn64] remotely refreshing metadata for db70.coll70 based on current shard version 0|0||559ebaa5ca4787b9985d1ee4, current metadata version is 1|1||559ebaa5ca4787b9985d1ee4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.380-0400 m31200| 2015-07-09T14:17:10.380-0400 I SHARDING [conn64] updating metadata for db70.coll70 from shard version 0|0||559ebaa5ca4787b9985d1ee4 to shard version 2|0||559ebaa5ca4787b9985d1ee4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.381-0400 m31200| 2015-07-09T14:17:10.380-0400 I SHARDING [conn64] collection version was loaded at version 2|3||559ebaa5ca4787b9985d1ee4, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.381-0400 m31200| 2015-07-09T14:17:10.380-0400 I SHARDING [conn64] splitChunk accepted at version 2|0||559ebaa5ca4787b9985d1ee4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.383-0400 m31200| 2015-07-09T14:17:10.382-0400 I SHARDING [conn64] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:10.382-0400-559ebaa6d5a107a5b9c0db72", server: "bs-osx108-8", clientAddr: "127.0.0.1:62863", time: new Date(1436465830382), what: "split", ns: "db70.coll70", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559ebaa5ca4787b9985d1ee4') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559ebaa5ca4787b9985d1ee4') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.437-0400 m31200| 2015-07-09T14:17:10.437-0400 I SHARDING [conn64] distributed lock 'db70.coll70/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.439-0400 m30999| 2015-07-09T14:17:10.439-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db70.coll70: 0ms sequenceNumber: 306 version: 2|5||559ebaa5ca4787b9985d1ee4 based on: 2|3||559ebaa5ca4787b9985d1ee4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.448-0400 m31100| 2015-07-09T14:17:10.448-0400 I INDEX [conn191] build index on: db70.coll70 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db70.coll70" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.448-0400 m31100| 2015-07-09T14:17:10.448-0400 I INDEX [conn191] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.449-0400 m31200| 2015-07-09T14:17:10.448-0400 I INDEX [conn32] build index on: db70.coll70 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db70.coll70" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.449-0400 m31200| 2015-07-09T14:17:10.448-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.456-0400 m31200| 2015-07-09T14:17:10.456-0400 I INDEX [conn32] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.457-0400 m31100| 2015-07-09T14:17:10.457-0400 I INDEX [conn191] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.472-0400 m31102| 2015-07-09T14:17:10.472-0400 I INDEX [repl writer worker 13] build index on: db70.coll70 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db70.coll70" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.473-0400 m31102| 2015-07-09T14:17:10.472-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.476-0400 m31201| 2015-07-09T14:17:10.475-0400 I INDEX [repl writer worker 7] build index on: db70.coll70 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db70.coll70" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.476-0400 m31201| 2015-07-09T14:17:10.475-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.481-0400 m31200| 2015-07-09T14:17:10.480-0400 I INDEX [conn32] build index on: db70.coll70 properties: { v: 1, key: { y: 1.0 }, name: "y_1", ns: "db70.coll70" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.481-0400 m31200| 2015-07-09T14:17:10.480-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.481-0400 m31101| 2015-07-09T14:17:10.480-0400 I INDEX [repl writer worker 4] build index on: db70.coll70 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db70.coll70" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.482-0400 m31101| 2015-07-09T14:17:10.480-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.483-0400 m31202| 2015-07-09T14:17:10.482-0400 I INDEX [repl writer worker 6] build index on: db70.coll70 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db70.coll70" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.483-0400 m31202| 2015-07-09T14:17:10.482-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.484-0400 m31102| 2015-07-09T14:17:10.483-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.486-0400 m31100| 2015-07-09T14:17:10.486-0400 I INDEX [conn191] build index on: db70.coll70 properties: { v: 1, key: { y: 1.0 }, name: "y_1", ns: "db70.coll70" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.486-0400 m31201| 2015-07-09T14:17:10.485-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.486-0400 m31100| 2015-07-09T14:17:10.486-0400 I INDEX [conn191] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.493-0400 m31101| 2015-07-09T14:17:10.493-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.494-0400 m31200| 2015-07-09T14:17:10.494-0400 I INDEX [conn32] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.494-0400 m31100| 2015-07-09T14:17:10.494-0400 I INDEX [conn191] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.509-0400 m31202| 2015-07-09T14:17:10.508-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.509-0400 m31201| 2015-07-09T14:17:10.508-0400 I INDEX [repl writer worker 4] build index on: db70.coll70 properties: { v: 1, key: { y: 1.0 }, name: "y_1", ns: "db70.coll70" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.509-0400 m31201| 2015-07-09T14:17:10.508-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.509-0400 m31200| 2015-07-09T14:17:10.509-0400 I INDEX [conn32] build index on: db70.coll70 properties: { v: 1, key: { z: 1.0 }, name: "z_1", ns: "db70.coll70" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.509-0400 m31200| 2015-07-09T14:17:10.509-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.511-0400 m31100| 2015-07-09T14:17:10.510-0400 I INDEX [conn191] build index on: db70.coll70 properties: { v: 1, key: { z: 1.0 }, name: "z_1", ns: "db70.coll70" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.511-0400 m31100| 2015-07-09T14:17:10.510-0400 I INDEX [conn191] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.520-0400 m31101| 2015-07-09T14:17:10.519-0400 I INDEX [repl writer worker 2] build index on: db70.coll70 properties: { v: 1, key: { y: 1.0 }, name: "y_1", ns: "db70.coll70" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.520-0400 m31101| 2015-07-09T14:17:10.519-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.521-0400 m31102| 2015-07-09T14:17:10.519-0400 I INDEX [repl writer worker 12] build index on: db70.coll70 properties: { v: 1, key: { y: 1.0 }, name: "y_1", ns: "db70.coll70" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.521-0400 m31102| 2015-07-09T14:17:10.519-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.525-0400 m31201| 2015-07-09T14:17:10.524-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.528-0400 m31200| 2015-07-09T14:17:10.528-0400 I INDEX [conn32] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.531-0400 m31202| 2015-07-09T14:17:10.530-0400 I INDEX [repl writer worker 1] build index on: db70.coll70 properties: { v: 1, key: { y: 1.0 }, name: "y_1", ns: "db70.coll70" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.531-0400 m31202| 2015-07-09T14:17:10.530-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.534-0400 m31100| 2015-07-09T14:17:10.533-0400 I INDEX [conn191] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.537-0400 m31101| 2015-07-09T14:17:10.537-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.541-0400 m31102| 2015-07-09T14:17:10.541-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.542-0400 m31201| 2015-07-09T14:17:10.541-0400 I INDEX [repl writer worker 14] build index on: db70.coll70 properties: { v: 1, key: { z: 1.0 }, name: "z_1", ns: "db70.coll70" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.543-0400 m31201| 2015-07-09T14:17:10.541-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.546-0400 m31202| 2015-07-09T14:17:10.546-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.546-0400 m31101| 2015-07-09T14:17:10.546-0400 I INDEX [repl writer worker 1] build index on: db70.coll70 properties: { v: 1, key: { z: 1.0 }, name: "z_1", ns: "db70.coll70" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.547-0400 m31101| 2015-07-09T14:17:10.546-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.558-0400 m31201| 2015-07-09T14:17:10.557-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.559-0400 m31200| 2015-07-09T14:17:10.558-0400 I INDEX [conn32] build index on: db70.coll70 properties: { v: 1, key: { x: 1.0, y: 1.0, z: 1.0 }, name: "x_1_y_1_z_1", ns: "db70.coll70" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.559-0400 m31200| 2015-07-09T14:17:10.558-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.567-0400 m31102| 2015-07-09T14:17:10.565-0400 I INDEX [repl writer worker 8] build index on: db70.coll70 properties: { v: 1, key: { z: 1.0 }, name: "z_1", ns: "db70.coll70" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.567-0400 m31102| 2015-07-09T14:17:10.565-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.567-0400 m31202| 2015-07-09T14:17:10.565-0400 I INDEX [repl writer worker 5] build index on: db70.coll70 properties: { v: 1, key: { z: 1.0 }, name: "z_1", ns: "db70.coll70" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.568-0400 m31202| 2015-07-09T14:17:10.565-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.568-0400 m31100| 2015-07-09T14:17:10.566-0400 I INDEX [conn191] build index on: db70.coll70 properties: { v: 1, key: { x: 1.0, y: 1.0, z: 1.0 }, name: "x_1_y_1_z_1", ns: "db70.coll70" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.568-0400 m31100| 2015-07-09T14:17:10.566-0400 I INDEX [conn191] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.569-0400 m31101| 2015-07-09T14:17:10.569-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.578-0400 m31200| 2015-07-09T14:17:10.577-0400 I INDEX [conn32] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.585-0400 m31100| 2015-07-09T14:17:10.585-0400 I INDEX [conn191] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.585-0400 m31102| 2015-07-09T14:17:10.585-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.588-0400 m31202| 2015-07-09T14:17:10.588-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.590-0400 m31201| 2015-07-09T14:17:10.590-0400 I INDEX [repl writer worker 8] build index on: db70.coll70 properties: { v: 1, key: { x: 1.0, y: 1.0, z: 1.0 }, name: "x_1_y_1_z_1", ns: "db70.coll70" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.590-0400 m31201| 2015-07-09T14:17:10.590-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.596-0400 m31101| 2015-07-09T14:17:10.596-0400 I INDEX [repl writer worker 13] build index on: db70.coll70 properties: { v: 1, key: { x: 1.0, y: 1.0, z: 1.0 }, name: "x_1_y_1_z_1", ns: "db70.coll70" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.596-0400 m31101| 2015-07-09T14:17:10.596-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.601-0400 Using 10 threads (requested 10) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.620-0400 m31201| 2015-07-09T14:17:10.603-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.666-0400 m31102| 2015-07-09T14:17:10.656-0400 I INDEX [repl writer worker 6] build index on: db70.coll70 properties: { v: 1, key: { x: 1.0, y: 1.0, z: 1.0 }, name: "x_1_y_1_z_1", ns: "db70.coll70" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.666-0400 m31102| 2015-07-09T14:17:10.656-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.666-0400 m31202| 2015-07-09T14:17:10.655-0400 I INDEX [repl writer worker 13] build index on: db70.coll70 properties: { v: 1, key: { x: 1.0, y: 1.0, z: 1.0 }, name: "x_1_y_1_z_1", ns: "db70.coll70" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.667-0400 m31202| 2015-07-09T14:17:10.655-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.693-0400 m31101| 2015-07-09T14:17:10.669-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.693-0400 m30999| 2015-07-09T14:17:10.686-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64035 #452 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.718-0400 m31202| 2015-07-09T14:17:10.718-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.719-0400 m31102| 2015-07-09T14:17:10.719-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.721-0400 m30998| 2015-07-09T14:17:10.721-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64036 #452 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.721-0400 m30998| 2015-07-09T14:17:10.721-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64037 #453 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.727-0400 m30998| 2015-07-09T14:17:10.727-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64041 #454 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.727-0400 m30999| 2015-07-09T14:17:10.726-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64038 #453 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.728-0400 m30999| 2015-07-09T14:17:10.727-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64039 #454 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.728-0400 m30999| 2015-07-09T14:17:10.727-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64040 #455 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.728-0400 m30999| 2015-07-09T14:17:10.728-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64042 #456 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.731-0400 m30998| 2015-07-09T14:17:10.729-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64043 #455 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.731-0400 m30998| 2015-07-09T14:17:10.731-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64044 #456 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.741-0400 setting random seed: 5790575067512 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.742-0400 setting random seed: 3026159005239 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.742-0400 setting random seed: 2195205003954 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.742-0400 setting random seed: 3048235750757 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.743-0400 setting random seed: 654424745589 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.743-0400 setting random seed: 3106788275763 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.745-0400 m30998| 2015-07-09T14:17:10.744-0400 I SHARDING [conn452] ChunkManager: time to load chunks for db70.coll70: 0ms sequenceNumber: 86 version: 2|5||559ebaa5ca4787b9985d1ee4 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.745-0400 setting random seed: 1698286831378 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.746-0400 setting random seed: 8916704240255 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.746-0400 setting random seed: 4810518641024 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.756-0400 setting random seed: 2002322017215 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.838-0400 m30998| 2015-07-09T14:17:10.837-0400 I NETWORK [conn452] end connection 127.0.0.1:64036 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.850-0400 m30998| 2015-07-09T14:17:10.849-0400 I NETWORK [conn454] end connection 127.0.0.1:64041 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.862-0400 m30999| 2015-07-09T14:17:10.862-0400 I NETWORK [conn456] end connection 127.0.0.1:64042 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.862-0400 m30999| 2015-07-09T14:17:10.862-0400 I NETWORK [conn453] end connection 127.0.0.1:64038 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.874-0400 m30998| 2015-07-09T14:17:10.873-0400 I NETWORK [conn455] end connection 127.0.0.1:64043 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.881-0400 m30998| 2015-07-09T14:17:10.881-0400 I NETWORK [conn453] end connection 127.0.0.1:64037 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.891-0400 m30998| 2015-07-09T14:17:10.891-0400 I NETWORK [conn456] end connection 127.0.0.1:64044 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.901-0400 m30999| 2015-07-09T14:17:10.901-0400 I NETWORK [conn455] end connection 127.0.0.1:64040 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.930-0400 m30999| 2015-07-09T14:17:10.928-0400 I NETWORK [conn452] end connection 127.0.0.1:64035 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.930-0400 m30999| 2015-07-09T14:17:10.928-0400 I NETWORK [conn454] end connection 127.0.0.1:64039 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.951-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.951-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.951-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.951-0400 jstests/concurrency/fsm_workloads/update_multifield.js: Workload completed in 350 ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.952-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.952-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.952-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.952-0400 m30999| 2015-07-09T14:17:10.952-0400 I COMMAND [conn1] DROP: db70.coll70 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:10.952-0400 m30999| 2015-07-09T14:17:10.952-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:10.952-0400-559ebaa6ca4787b9985d1ee6", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465830952), what: "dropCollection.start", ns: "db70.coll70", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.010-0400 m30999| 2015-07-09T14:17:11.010-0400 I SHARDING [conn1] distributed lock 'db70.coll70/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559ebaa7ca4787b9985d1ee7 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.012-0400 m31100| 2015-07-09T14:17:11.011-0400 I COMMAND [conn38] CMD: drop db70.coll70 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.016-0400 m31102| 2015-07-09T14:17:11.016-0400 I COMMAND [repl writer worker 3] CMD: drop db70.coll70 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.016-0400 m31101| 2015-07-09T14:17:11.015-0400 I COMMAND [repl writer worker 0] CMD: drop db70.coll70 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.017-0400 m31200| 2015-07-09T14:17:11.015-0400 I COMMAND [conn64] CMD: drop db70.coll70 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.022-0400 m31201| 2015-07-09T14:17:11.021-0400 I COMMAND [repl writer worker 14] CMD: drop db70.coll70 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.022-0400 m31202| 2015-07-09T14:17:11.022-0400 I COMMAND [repl writer worker 15] CMD: drop db70.coll70 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.075-0400 m31100| 2015-07-09T14:17:11.075-0400 I SHARDING [conn38] remotely refreshing metadata for db70.coll70 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559ebaa5ca4787b9985d1ee4, current metadata version is 2|3||559ebaa5ca4787b9985d1ee4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.077-0400 m31100| 2015-07-09T14:17:11.077-0400 W SHARDING [conn38] no chunks found when reloading db70.coll70, previous version was 0|0||559ebaa5ca4787b9985d1ee4, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.077-0400 m31100| 2015-07-09T14:17:11.077-0400 I SHARDING [conn38] dropping metadata for db70.coll70 at shard version 2|3||559ebaa5ca4787b9985d1ee4, took 2ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.079-0400 m31200| 2015-07-09T14:17:11.078-0400 I SHARDING [conn64] remotely refreshing metadata for db70.coll70 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559ebaa5ca4787b9985d1ee4, current metadata version is 2|5||559ebaa5ca4787b9985d1ee4 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.081-0400 m31200| 2015-07-09T14:17:11.080-0400 W SHARDING [conn64] no chunks found when reloading db70.coll70, previous version was 0|0||559ebaa5ca4787b9985d1ee4, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.081-0400 m31200| 2015-07-09T14:17:11.080-0400 I SHARDING [conn64] dropping metadata for db70.coll70 at shard version 2|5||559ebaa5ca4787b9985d1ee4, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.082-0400 m30999| 2015-07-09T14:17:11.081-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:11.081-0400-559ebaa7ca4787b9985d1ee8", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465831081), what: "dropCollection", ns: "db70.coll70", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.137-0400 m30999| 2015-07-09T14:17:11.136-0400 I SHARDING [conn1] distributed lock 'db70.coll70/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.194-0400 m30999| 2015-07-09T14:17:11.193-0400 I COMMAND [conn1] DROP DATABASE: db70 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.194-0400 m30999| 2015-07-09T14:17:11.193-0400 I SHARDING [conn1] DBConfig::dropDatabase: db70 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.194-0400 m30999| 2015-07-09T14:17:11.193-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:11.193-0400-559ebaa7ca4787b9985d1ee9", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465831193), what: "dropDatabase.start", ns: "db70", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.302-0400 m30999| 2015-07-09T14:17:11.301-0400 I SHARDING [conn1] DBConfig::dropDatabase: db70 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.302-0400 m31100| 2015-07-09T14:17:11.302-0400 I COMMAND [conn160] dropDatabase db70 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.302-0400 m31100| 2015-07-09T14:17:11.302-0400 I COMMAND [conn160] dropDatabase db70 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.303-0400 m30999| 2015-07-09T14:17:11.303-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:11.302-0400-559ebaa7ca4787b9985d1eea", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465831303), what: "dropDatabase", ns: "db70", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.303-0400 m31101| 2015-07-09T14:17:11.303-0400 I COMMAND [repl writer worker 8] dropDatabase db70 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.303-0400 m31101| 2015-07-09T14:17:11.303-0400 I COMMAND [repl writer worker 8] dropDatabase db70 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.304-0400 m31102| 2015-07-09T14:17:11.303-0400 I COMMAND [repl writer worker 0] dropDatabase db70 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.304-0400 m31102| 2015-07-09T14:17:11.303-0400 I COMMAND [repl writer worker 0] dropDatabase db70 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.392-0400 m31100| 2015-07-09T14:17:11.392-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.396-0400 m31101| 2015-07-09T14:17:11.396-0400 I COMMAND [repl writer worker 7] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.397-0400 m31102| 2015-07-09T14:17:11.396-0400 I COMMAND [repl writer worker 4] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.434-0400 m31200| 2015-07-09T14:17:11.433-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.437-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.437-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.437-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.437-0400 jstests/concurrency/fsm_workloads/explain_find.js [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.437-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.438-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.438-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.439-0400 m31202| 2015-07-09T14:17:11.437-0400 I COMMAND [repl writer worker 14] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.439-0400 m31201| 2015-07-09T14:17:11.438-0400 I COMMAND [repl writer worker 5] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.445-0400 m30999| 2015-07-09T14:17:11.444-0400 I SHARDING [conn1] distributed lock 'db71/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559ebaa7ca4787b9985d1eeb [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.449-0400 m30999| 2015-07-09T14:17:11.448-0400 I SHARDING [conn1] Placing [db71] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.449-0400 m30999| 2015-07-09T14:17:11.448-0400 I SHARDING [conn1] Enabling sharding for database [db71] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.504-0400 m30999| 2015-07-09T14:17:11.504-0400 I SHARDING [conn1] distributed lock 'db71/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.527-0400 m31100| 2015-07-09T14:17:11.526-0400 I INDEX [conn68] build index on: db71.coll71 properties: { v: 1, key: { j: 1.0 }, name: "j_1", ns: "db71.coll71" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.527-0400 m31100| 2015-07-09T14:17:11.526-0400 I INDEX [conn68] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.544-0400 m31100| 2015-07-09T14:17:11.543-0400 I INDEX [conn68] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.545-0400 m30999| 2015-07-09T14:17:11.545-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db71.coll71", key: { j: 1.0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.550-0400 m30999| 2015-07-09T14:17:11.549-0400 I SHARDING [conn1] distributed lock 'db71.coll71/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559ebaa7ca4787b9985d1eec [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.551-0400 m31101| 2015-07-09T14:17:11.551-0400 I INDEX [repl writer worker 3] build index on: db71.coll71 properties: { v: 1, key: { j: 1.0 }, name: "j_1", ns: "db71.coll71" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.551-0400 m31101| 2015-07-09T14:17:11.551-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.551-0400 m30999| 2015-07-09T14:17:11.551-0400 I SHARDING [conn1] enable sharding on: db71.coll71 with shard key: { j: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.552-0400 m30999| 2015-07-09T14:17:11.551-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:11.551-0400-559ebaa7ca4787b9985d1eed", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465831551), what: "shardCollection.start", ns: "db71.coll71", details: { shardKey: { j: 1.0 }, collection: "db71.coll71", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 1 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.555-0400 m31102| 2015-07-09T14:17:11.555-0400 I INDEX [repl writer worker 11] build index on: db71.coll71 properties: { v: 1, key: { j: 1.0 }, name: "j_1", ns: "db71.coll71" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.555-0400 m31102| 2015-07-09T14:17:11.555-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.562-0400 m31101| 2015-07-09T14:17:11.561-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.564-0400 m31102| 2015-07-09T14:17:11.564-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.605-0400 m30999| 2015-07-09T14:17:11.605-0400 I SHARDING [conn1] going to create 1 chunk(s) for: db71.coll71 using new epoch 559ebaa7ca4787b9985d1eee [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.660-0400 m30999| 2015-07-09T14:17:11.659-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db71.coll71: 0ms sequenceNumber: 307 version: 1|0||559ebaa7ca4787b9985d1eee based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.716-0400 m30999| 2015-07-09T14:17:11.715-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db71.coll71: 0ms sequenceNumber: 308 version: 1|0||559ebaa7ca4787b9985d1eee based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.717-0400 m31100| 2015-07-09T14:17:11.717-0400 I SHARDING [conn191] remotely refreshing metadata for db71.coll71 with requested shard version 1|0||559ebaa7ca4787b9985d1eee, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.719-0400 m31100| 2015-07-09T14:17:11.718-0400 I SHARDING [conn191] collection db71.coll71 was previously unsharded, new metadata loaded with shard version 1|0||559ebaa7ca4787b9985d1eee [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.719-0400 m31100| 2015-07-09T14:17:11.718-0400 I SHARDING [conn191] collection version was loaded at version 1|0||559ebaa7ca4787b9985d1eee, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.719-0400 m30999| 2015-07-09T14:17:11.719-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:11.719-0400-559ebaa7ca4787b9985d1eef", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465831719), what: "shardCollection", ns: "db71.coll71", details: { version: "1|0||559ebaa7ca4787b9985d1eee" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.774-0400 m30999| 2015-07-09T14:17:11.773-0400 I SHARDING [conn1] distributed lock 'db71.coll71/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.774-0400 Using 10 threads (requested 10) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.898-0400 m30999| 2015-07-09T14:17:11.898-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64045 #457 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.900-0400 m30999| 2015-07-09T14:17:11.899-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64046 #458 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.900-0400 m30998| 2015-07-09T14:17:11.899-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64047 #457 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.900-0400 m30999| 2015-07-09T14:17:11.900-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64048 #459 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.909-0400 m30998| 2015-07-09T14:17:11.909-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64049 #458 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.912-0400 m30999| 2015-07-09T14:17:11.912-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64050 #460 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.915-0400 m30998| 2015-07-09T14:17:11.915-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64051 #459 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.917-0400 m30999| 2015-07-09T14:17:11.917-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64052 #461 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.932-0400 m30998| 2015-07-09T14:17:11.932-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64053 #460 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.936-0400 m30998| 2015-07-09T14:17:11.936-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64054 #461 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.948-0400 setting random seed: 4974749139510 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.948-0400 setting random seed: 1575394873507 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.948-0400 setting random seed: 386837352998 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.948-0400 setting random seed: 7719103628769 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.949-0400 setting random seed: 7690856740809 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.949-0400 setting random seed: 3895265012979 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.950-0400 setting random seed: 229302984662 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.951-0400 setting random seed: 7661651363596 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.953-0400 setting random seed: 9374527921900 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.954-0400 setting random seed: 608826680108 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:11.961-0400 m30998| 2015-07-09T14:17:11.961-0400 I SHARDING [conn461] ChunkManager: time to load chunks for db71.coll71: 0ms sequenceNumber: 87 version: 1|0||559ebaa7ca4787b9985d1eee based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.015-0400 m31100| 2015-07-09T14:17:12.014-0400 I SHARDING [conn187] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.015-0400 m31100| 2015-07-09T14:17:12.014-0400 I SHARDING [conn38] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.015-0400 m31100| 2015-07-09T14:17:12.015-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.016-0400 m31100| 2015-07-09T14:17:12.016-0400 I SHARDING [conn187] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.017-0400 m31100| 2015-07-09T14:17:12.017-0400 I SHARDING [conn187] could not acquire lock 'db71.coll71/bs-osx108-8:31100:1436464536:197041335' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.017-0400 m31100| 2015-07-09T14:17:12.017-0400 I SHARDING [conn187] distributed lock 'db71.coll71/bs-osx108-8:31100:1436464536:197041335' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.018-0400 m31100| 2015-07-09T14:17:12.017-0400 W SHARDING [conn187] could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db71.coll71 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.019-0400 m30999| 2015-07-09T14:17:12.017-0400 W SHARDING [conn457] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.019-0400 m31100| 2015-07-09T14:17:12.017-0400 I SHARDING [conn15] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.019-0400 m31100| 2015-07-09T14:17:12.018-0400 I SHARDING [conn38] distributed lock 'db71.coll71/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559ebaa8792e00bb67274ac0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.019-0400 m31100| 2015-07-09T14:17:12.018-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.019-0400 m31100| 2015-07-09T14:17:12.018-0400 I SHARDING [conn38] remotely refreshing metadata for db71.coll71 based on current shard version 1|0||559ebaa7ca4787b9985d1eee, current metadata version is 1|0||559ebaa7ca4787b9985d1eee [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.020-0400 m31100| 2015-07-09T14:17:12.019-0400 I SHARDING [conn38] metadata of collection db71.coll71 already up to date (shard version : 1|0||559ebaa7ca4787b9985d1eee, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.020-0400 m31100| 2015-07-09T14:17:12.019-0400 I SHARDING [conn38] splitChunk accepted at version 1|0||559ebaa7ca4787b9985d1eee [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.021-0400 m31100| 2015-07-09T14:17:12.020-0400 I SHARDING [conn39] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.021-0400 m31100| 2015-07-09T14:17:12.021-0400 I SHARDING [conn39] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.022-0400 m31100| 2015-07-09T14:17:12.021-0400 W SHARDING [conn15] could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db71.coll71 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.022-0400 m30999| 2015-07-09T14:17:12.021-0400 W SHARDING [conn460] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.025-0400 m31100| 2015-07-09T14:17:12.025-0400 W SHARDING [conn39] could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db71.coll71 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.027-0400 m30998| 2015-07-09T14:17:12.026-0400 W SHARDING [conn458] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.028-0400 m31100| 2015-07-09T14:17:12.027-0400 I SHARDING [conn32] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.028-0400 m31100| 2015-07-09T14:17:12.027-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:12.027-0400-559ebaa8792e00bb67274ac2", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465832027), what: "multi-split", ns: "db71.coll71", details: { before: { min: { j: MinKey }, max: { j: MaxKey } }, number: 1, of: 3, chunk: { min: { j: MinKey }, max: { j: 0.0 }, lastmod: Timestamp 1000|1, lastmodEpoch: ObjectId('559ebaa7ca4787b9985d1eee') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.029-0400 m31100| 2015-07-09T14:17:12.028-0400 I SHARDING [conn32] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.030-0400 m31100| 2015-07-09T14:17:12.030-0400 W SHARDING [conn32] could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db71.coll71 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.030-0400 m30998| 2015-07-09T14:17:12.030-0400 W SHARDING [conn457] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.035-0400 m31100| 2015-07-09T14:17:12.035-0400 I SHARDING [conn15] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.036-0400 m31100| 2015-07-09T14:17:12.035-0400 I SHARDING [conn187] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.037-0400 m31100| 2015-07-09T14:17:12.036-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 8.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.037-0400 m31100| 2015-07-09T14:17:12.036-0400 I SHARDING [conn187] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 8.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.039-0400 m31100| 2015-07-09T14:17:12.038-0400 W SHARDING [conn15] could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db71.coll71 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.039-0400 m30999| 2015-07-09T14:17:12.038-0400 W SHARDING [conn457] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 8.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.040-0400 m31100| 2015-07-09T14:17:12.040-0400 W SHARDING [conn187] could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db71.coll71 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.041-0400 m30999| 2015-07-09T14:17:12.040-0400 W SHARDING [conn460] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 8.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.047-0400 m31100| 2015-07-09T14:17:12.046-0400 I SHARDING [conn32] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.048-0400 m31100| 2015-07-09T14:17:12.047-0400 I SHARDING [conn32] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.049-0400 m31100| 2015-07-09T14:17:12.048-0400 I SHARDING [conn39] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.050-0400 m31100| 2015-07-09T14:17:12.049-0400 W SHARDING [conn32] could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db71.coll71 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.051-0400 m30998| 2015-07-09T14:17:12.050-0400 W SHARDING [conn458] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.051-0400 m31100| 2015-07-09T14:17:12.050-0400 I SHARDING [conn35] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.051-0400 m31100| 2015-07-09T14:17:12.050-0400 I SHARDING [conn39] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.051-0400 m31100| 2015-07-09T14:17:12.051-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.053-0400 m31100| 2015-07-09T14:17:12.052-0400 W SHARDING [conn39] could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db71.coll71 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.054-0400 m30998| 2015-07-09T14:17:12.053-0400 W SHARDING [conn457] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.054-0400 m31100| 2015-07-09T14:17:12.053-0400 W SHARDING [conn35] could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db71.coll71 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.055-0400 m30998| 2015-07-09T14:17:12.053-0400 W SHARDING [conn461] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.071-0400 m31100| 2015-07-09T14:17:12.070-0400 I SHARDING [conn187] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.074-0400 m31100| 2015-07-09T14:17:12.072-0400 I SHARDING [conn187] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 10.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.076-0400 m31100| 2015-07-09T14:17:12.074-0400 W SHARDING [conn187] could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db71.coll71 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.076-0400 m31100| 2015-07-09T14:17:12.074-0400 I SHARDING [conn35] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.078-0400 m30999| 2015-07-09T14:17:12.074-0400 W SHARDING [conn457] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 10.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.079-0400 m31100| 2015-07-09T14:17:12.075-0400 I SHARDING [conn39] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.080-0400 m31100| 2015-07-09T14:17:12.075-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 10.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.080-0400 m31100| 2015-07-09T14:17:12.075-0400 I SHARDING [conn39] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 10.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.080-0400 m31100| 2015-07-09T14:17:12.077-0400 I SHARDING [conn32] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.080-0400 m31100| 2015-07-09T14:17:12.077-0400 W SHARDING [conn39] could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db71.coll71 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.080-0400 m31100| 2015-07-09T14:17:12.078-0400 I SHARDING [conn32] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.081-0400 m30998| 2015-07-09T14:17:12.078-0400 W SHARDING [conn458] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 10.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.081-0400 m31100| 2015-07-09T14:17:12.078-0400 W SHARDING [conn35] could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db71.coll71 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.082-0400 m30998| 2015-07-09T14:17:12.078-0400 W SHARDING [conn459] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 10.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.082-0400 m31100| 2015-07-09T14:17:12.079-0400 W SHARDING [conn32] could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db71.coll71 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.082-0400 m30998| 2015-07-09T14:17:12.080-0400 W SHARDING [conn457] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.082-0400 m31100| 2015-07-09T14:17:12.080-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:12.080-0400-559ebaa8792e00bb67274ac3", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465832080), what: "multi-split", ns: "db71.coll71", details: { before: { min: { j: MinKey }, max: { j: MaxKey } }, number: 2, of: 3, chunk: { min: { j: 0.0 }, max: { j: 4.0 }, lastmod: Timestamp 1000|2, lastmodEpoch: ObjectId('559ebaa7ca4787b9985d1eee') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.091-0400 m31100| 2015-07-09T14:17:12.090-0400 I SHARDING [conn187] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.092-0400 m31100| 2015-07-09T14:17:12.091-0400 I SHARDING [conn187] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.093-0400 m31100| 2015-07-09T14:17:12.093-0400 W SHARDING [conn187] could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db71.coll71 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.094-0400 m30999| 2015-07-09T14:17:12.093-0400 W SHARDING [conn459] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.103-0400 m31100| 2015-07-09T14:17:12.103-0400 I SHARDING [conn32] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.104-0400 m31100| 2015-07-09T14:17:12.104-0400 I SHARDING [conn35] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.106-0400 m31100| 2015-07-09T14:17:12.105-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 16.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.106-0400 m31100| 2015-07-09T14:17:12.105-0400 I SHARDING [conn32] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.107-0400 m31100| 2015-07-09T14:17:12.106-0400 I SHARDING [conn39] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.108-0400 m31100| 2015-07-09T14:17:12.106-0400 W SHARDING [conn35] could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db71.coll71 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.109-0400 m30998| 2015-07-09T14:17:12.106-0400 W SHARDING [conn459] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 16.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.109-0400 m31100| 2015-07-09T14:17:12.107-0400 W SHARDING [conn32] could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db71.coll71 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.110-0400 m30998| 2015-07-09T14:17:12.107-0400 W SHARDING [conn460] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 18.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.110-0400 m31100| 2015-07-09T14:17:12.107-0400 I SHARDING [conn39] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.110-0400 m31100| 2015-07-09T14:17:12.109-0400 W SHARDING [conn39] could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db71.coll71 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.111-0400 m30998| 2015-07-09T14:17:12.109-0400 W SHARDING [conn457] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.121-0400 m31100| 2015-07-09T14:17:12.120-0400 I SHARDING [conn187] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.122-0400 m31100| 2015-07-09T14:17:12.121-0400 I SHARDING [conn187] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.125-0400 m31100| 2015-07-09T14:17:12.124-0400 W SHARDING [conn187] could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db71.coll71 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.125-0400 m30999| 2015-07-09T14:17:12.124-0400 W SHARDING [conn459] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.129-0400 m31100| 2015-07-09T14:17:12.129-0400 I SHARDING [conn39] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.131-0400 m31100| 2015-07-09T14:17:12.130-0400 I SHARDING [conn39] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.132-0400 m31100| 2015-07-09T14:17:12.131-0400 W SHARDING [conn39] could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db71.coll71 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.134-0400 m30998| 2015-07-09T14:17:12.132-0400 W SHARDING [conn457] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.136-0400 m31100| 2015-07-09T14:17:12.135-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:12.135-0400-559ebaa8792e00bb67274ac4", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465832135), what: "multi-split", ns: "db71.coll71", details: { before: { min: { j: MinKey }, max: { j: MaxKey } }, number: 3, of: 3, chunk: { min: { j: 4.0 }, max: { j: MaxKey }, lastmod: Timestamp 1000|3, lastmodEpoch: ObjectId('559ebaa7ca4787b9985d1eee') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.145-0400 m31100| 2015-07-09T14:17:12.144-0400 I SHARDING [conn39] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.147-0400 m31100| 2015-07-09T14:17:12.146-0400 I SHARDING [conn39] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.148-0400 m31100| 2015-07-09T14:17:12.147-0400 I SHARDING [conn187] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.150-0400 m31100| 2015-07-09T14:17:12.148-0400 W SHARDING [conn39] could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db71.coll71 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.151-0400 m31100| 2015-07-09T14:17:12.149-0400 I SHARDING [conn187] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.152-0400 m30998| 2015-07-09T14:17:12.149-0400 W SHARDING [conn461] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.153-0400 m31100| 2015-07-09T14:17:12.150-0400 I SHARDING [conn15] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.153-0400 m31100| 2015-07-09T14:17:12.150-0400 I SHARDING [conn34] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.154-0400 m31100| 2015-07-09T14:17:12.151-0400 W SHARDING [conn187] could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db71.coll71 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.154-0400 m31100| 2015-07-09T14:17:12.151-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.155-0400 m30999| 2015-07-09T14:17:12.151-0400 W SHARDING [conn457] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.155-0400 m31100| 2015-07-09T14:17:12.152-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.155-0400 m31100| 2015-07-09T14:17:12.152-0400 W SHARDING [conn34] could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db71.coll71 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.156-0400 m30999| 2015-07-09T14:17:12.153-0400 W SHARDING [conn459] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.156-0400 m31100| 2015-07-09T14:17:12.154-0400 W SHARDING [conn15] could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db71.coll71 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.156-0400 m30999| 2015-07-09T14:17:12.154-0400 W SHARDING [conn461] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.169-0400 m31100| 2015-07-09T14:17:12.167-0400 I SHARDING [conn15] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.171-0400 m31100| 2015-07-09T14:17:12.167-0400 I SHARDING [conn39] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.172-0400 m31100| 2015-07-09T14:17:12.167-0400 I SHARDING [conn34] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.172-0400 m31100| 2015-07-09T14:17:12.167-0400 I SHARDING [conn32] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.172-0400 m31100| 2015-07-09T14:17:12.168-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.173-0400 m31100| 2015-07-09T14:17:12.168-0400 I SHARDING [conn39] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.173-0400 m31100| 2015-07-09T14:17:12.169-0400 I SHARDING [conn32] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.175-0400 m31100| 2015-07-09T14:17:12.169-0400 I SHARDING [conn35] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.175-0400 m31100| 2015-07-09T14:17:12.170-0400 W SHARDING [conn15] could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db71.coll71 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.176-0400 m30999| 2015-07-09T14:17:12.170-0400 W SHARDING [conn459] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.176-0400 m31100| 2015-07-09T14:17:12.170-0400 W SHARDING [conn32] could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db71.coll71 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.177-0400 m30998| 2015-07-09T14:17:12.170-0400 W SHARDING [conn459] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.177-0400 m31100| 2015-07-09T14:17:12.170-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.178-0400 m31100| 2015-07-09T14:17:12.171-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.178-0400 m31100| 2015-07-09T14:17:12.171-0400 W SHARDING [conn39] could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db71.coll71 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.179-0400 m30998| 2015-07-09T14:17:12.171-0400 W SHARDING [conn460] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.179-0400 m31100| 2015-07-09T14:17:12.172-0400 W SHARDING [conn35] could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db71.coll71 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.179-0400 m31100| 2015-07-09T14:17:12.173-0400 W SHARDING [conn34] could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db71.coll71 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.180-0400 m30998| 2015-07-09T14:17:12.173-0400 W SHARDING [conn461] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.180-0400 m30999| 2015-07-09T14:17:12.173-0400 W SHARDING [conn457] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.192-0400 m31100| 2015-07-09T14:17:12.191-0400 I SHARDING [conn35] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.193-0400 m31100| 2015-07-09T14:17:12.192-0400 I SHARDING [conn38] distributed lock 'db71.coll71/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.194-0400 m31100| 2015-07-09T14:17:12.192-0400 I COMMAND [conn38] command db71.coll71 command: splitChunk { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 4, w: 2 } }, Database: { acquireCount: { r: 1, w: 2 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 491 } }, Collection: { acquireCount: { r: 1, W: 2 }, acquireWaitCount: { W: 2 }, timeAcquiringMicros: { W: 2972 } } } protocol:op_command 177ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.194-0400 m31100| 2015-07-09T14:17:12.193-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 24.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.197-0400 m30999| 2015-07-09T14:17:12.195-0400 I SHARDING [conn458] ChunkManager: time to load chunks for db71.coll71: 2ms sequenceNumber: 309 version: 1|3||559ebaa7ca4787b9985d1eee based on: 1|0||559ebaa7ca4787b9985d1eee [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.197-0400 m30999| 2015-07-09T14:17:12.195-0400 I SHARDING [conn458] autosplitted db71.coll71 shard: ns: db71.coll71, shard: test-rs0, lastmod: 1|0||000000000000000000000000, min: { j: MinKey }, max: { j: MaxKey } into 3 (splitThreshold 921) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.206-0400 m31100| 2015-07-09T14:17:12.205-0400 I SHARDING [conn35] distributed lock 'db71.coll71/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559ebaa8792e00bb67274ac5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.206-0400 m31100| 2015-07-09T14:17:12.205-0400 I SHARDING [conn35] remotely refreshing metadata for db71.coll71 based on current shard version 1|3||559ebaa7ca4787b9985d1eee, current metadata version is 1|3||559ebaa7ca4787b9985d1eee [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.212-0400 m31100| 2015-07-09T14:17:12.209-0400 I SHARDING [conn35] metadata of collection db71.coll71 already up to date (shard version : 1|3||559ebaa7ca4787b9985d1eee, took 3ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.214-0400 m31100| 2015-07-09T14:17:12.209-0400 W SHARDING [conn35] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.214-0400 m31100| 2015-07-09T14:17:12.209-0400 I SHARDING [conn32] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.214-0400 m31100| 2015-07-09T14:17:12.209-0400 I SHARDING [conn39] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.215-0400 m31100| 2015-07-09T14:17:12.211-0400 I SHARDING [conn39] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 22.0 }, { j: 28.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.216-0400 m31100| 2015-07-09T14:17:12.211-0400 I SHARDING [conn32] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 22.0 }, { j: 28.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.216-0400 m31100| 2015-07-09T14:17:12.211-0400 I SHARDING [conn35] distributed lock 'db71.coll71/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.216-0400 m30998| 2015-07-09T14:17:12.212-0400 W SHARDING [conn459] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 12.0 }, { j: 14.0 }, { j: 16.0 }, { j: 20.0 }, { j: 24.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.217-0400 m31100| 2015-07-09T14:17:12.214-0400 I SHARDING [conn32] could not acquire lock 'db71.coll71/bs-osx108-8:31100:1436464536:197041335' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.217-0400 m31100| 2015-07-09T14:17:12.214-0400 I SHARDING [conn32] distributed lock 'db71.coll71/bs-osx108-8:31100:1436464536:197041335' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.217-0400 m31100| 2015-07-09T14:17:12.214-0400 W SHARDING [conn32] could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db71.coll71 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.218-0400 m30998| 2015-07-09T14:17:12.215-0400 W SHARDING [conn460] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 22.0 }, { j: 28.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.218-0400 m31100| 2015-07-09T14:17:12.215-0400 I SHARDING [conn39] distributed lock 'db71.coll71/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559ebaa8792e00bb67274ac6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.218-0400 m31100| 2015-07-09T14:17:12.216-0400 I SHARDING [conn39] remotely refreshing metadata for db71.coll71 based on current shard version 1|3||559ebaa7ca4787b9985d1eee, current metadata version is 1|3||559ebaa7ca4787b9985d1eee [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.219-0400 m31100| 2015-07-09T14:17:12.219-0400 I SHARDING [conn39] metadata of collection db71.coll71 already up to date (shard version : 1|3||559ebaa7ca4787b9985d1eee, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.219-0400 m31100| 2015-07-09T14:17:12.219-0400 W SHARDING [conn39] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.220-0400 m31100| 2015-07-09T14:17:12.220-0400 I SHARDING [conn39] distributed lock 'db71.coll71/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.221-0400 m30998| 2015-07-09T14:17:12.220-0400 W SHARDING [conn458] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 22.0 }, { j: 28.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.241-0400 m31100| 2015-07-09T14:17:12.238-0400 I SHARDING [conn39] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.242-0400 m31100| 2015-07-09T14:17:12.239-0400 I SHARDING [conn32] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.242-0400 m31100| 2015-07-09T14:17:12.240-0400 I SHARDING [conn39] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 22.0 }, { j: 24.0 }, { j: 28.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.242-0400 m31100| 2015-07-09T14:17:12.240-0400 I SHARDING [conn32] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 22.0 }, { j: 24.0 }, { j: 28.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.246-0400 m31100| 2015-07-09T14:17:12.243-0400 I SHARDING [conn32] could not acquire lock 'db71.coll71/bs-osx108-8:31100:1436464536:197041335' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.247-0400 m31100| 2015-07-09T14:17:12.243-0400 I SHARDING [conn32] distributed lock 'db71.coll71/bs-osx108-8:31100:1436464536:197041335' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.247-0400 m31100| 2015-07-09T14:17:12.243-0400 W SHARDING [conn32] could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db71.coll71 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.247-0400 m30998| 2015-07-09T14:17:12.244-0400 W SHARDING [conn461] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 22.0 }, { j: 24.0 }, { j: 28.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.248-0400 m31100| 2015-07-09T14:17:12.245-0400 I SHARDING [conn39] distributed lock 'db71.coll71/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559ebaa8792e00bb67274ac8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.248-0400 m31100| 2015-07-09T14:17:12.245-0400 I SHARDING [conn39] remotely refreshing metadata for db71.coll71 based on current shard version 1|3||559ebaa7ca4787b9985d1eee, current metadata version is 1|3||559ebaa7ca4787b9985d1eee [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.249-0400 m31100| 2015-07-09T14:17:12.248-0400 I SHARDING [conn39] metadata of collection db71.coll71 already up to date (shard version : 1|3||559ebaa7ca4787b9985d1eee, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.249-0400 m31100| 2015-07-09T14:17:12.248-0400 W SHARDING [conn39] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.253-0400 m31100| 2015-07-09T14:17:12.253-0400 I SHARDING [conn39] distributed lock 'db71.coll71/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.254-0400 m30998| 2015-07-09T14:17:12.253-0400 W SHARDING [conn457] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 22.0 }, { j: 24.0 }, { j: 28.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.272-0400 m31100| 2015-07-09T14:17:12.271-0400 I SHARDING [conn39] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.272-0400 m31100| 2015-07-09T14:17:12.272-0400 I SHARDING [conn32] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.272-0400 m31100| 2015-07-09T14:17:12.272-0400 I SHARDING [conn35] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.274-0400 m31100| 2015-07-09T14:17:12.273-0400 I SHARDING [conn39] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 24.0 }, { j: 26.0 }, { j: 30.0 }, { j: 34.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.274-0400 m31100| 2015-07-09T14:17:12.273-0400 I SHARDING [conn36] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.274-0400 m31100| 2015-07-09T14:17:12.273-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 24.0 }, { j: 26.0 }, { j: 30.0 }, { j: 34.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.274-0400 m31100| 2015-07-09T14:17:12.274-0400 I SHARDING [conn36] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 24.0 }, { j: 26.0 }, { j: 30.0 }, { j: 34.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.275-0400 m31100| 2015-07-09T14:17:12.274-0400 I SHARDING [conn32] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 24.0 }, { j: 26.0 }, { j: 30.0 }, { j: 34.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.277-0400 m31100| 2015-07-09T14:17:12.276-0400 I SHARDING [conn32] could not acquire lock 'db71.coll71/bs-osx108-8:31100:1436464536:197041335' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.278-0400 m31100| 2015-07-09T14:17:12.276-0400 I SHARDING [conn36] could not acquire lock 'db71.coll71/bs-osx108-8:31100:1436464536:197041335' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.278-0400 m31100| 2015-07-09T14:17:12.277-0400 I SHARDING [conn32] distributed lock 'db71.coll71/bs-osx108-8:31100:1436464536:197041335' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.278-0400 m31100| 2015-07-09T14:17:12.277-0400 I SHARDING [conn36] distributed lock 'db71.coll71/bs-osx108-8:31100:1436464536:197041335' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.279-0400 m31100| 2015-07-09T14:17:12.277-0400 W SHARDING [conn32] could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db71.coll71 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.279-0400 m31100| 2015-07-09T14:17:12.277-0400 W SHARDING [conn36] could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db71.coll71 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.279-0400 m30998| 2015-07-09T14:17:12.277-0400 W SHARDING [conn460] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 24.0 }, { j: 26.0 }, { j: 30.0 }, { j: 34.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.279-0400 m31100| 2015-07-09T14:17:12.277-0400 I SHARDING [conn39] could not acquire lock 'db71.coll71/bs-osx108-8:31100:1436464536:197041335' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.280-0400 m31100| 2015-07-09T14:17:12.277-0400 I SHARDING [conn39] distributed lock 'db71.coll71/bs-osx108-8:31100:1436464536:197041335' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.280-0400 m31100| 2015-07-09T14:17:12.277-0400 W SHARDING [conn39] could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db71.coll71 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.280-0400 m30998| 2015-07-09T14:17:12.277-0400 W SHARDING [conn461] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 24.0 }, { j: 26.0 }, { j: 30.0 }, { j: 34.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.281-0400 m30998| 2015-07-09T14:17:12.278-0400 W SHARDING [conn457] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 24.0 }, { j: 26.0 }, { j: 30.0 }, { j: 34.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.285-0400 m31100| 2015-07-09T14:17:12.285-0400 I SHARDING [conn35] distributed lock 'db71.coll71/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559ebaa8792e00bb67274acb [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.286-0400 m31100| 2015-07-09T14:17:12.285-0400 I SHARDING [conn35] remotely refreshing metadata for db71.coll71 based on current shard version 1|3||559ebaa7ca4787b9985d1eee, current metadata version is 1|3||559ebaa7ca4787b9985d1eee [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.287-0400 m31100| 2015-07-09T14:17:12.287-0400 I SHARDING [conn35] metadata of collection db71.coll71 already up to date (shard version : 1|3||559ebaa7ca4787b9985d1eee, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.288-0400 m31100| 2015-07-09T14:17:12.287-0400 W SHARDING [conn35] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.290-0400 m31100| 2015-07-09T14:17:12.290-0400 I SHARDING [conn35] distributed lock 'db71.coll71/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.292-0400 m30998| 2015-07-09T14:17:12.291-0400 W SHARDING [conn459] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 24.0 }, { j: 26.0 }, { j: 30.0 }, { j: 34.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.305-0400 m31100| 2015-07-09T14:17:12.303-0400 I SHARDING [conn35] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.305-0400 m31100| 2015-07-09T14:17:12.303-0400 I SHARDING [conn39] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.305-0400 m31100| 2015-07-09T14:17:12.304-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 30.0 }, { j: 36.0 }, { j: 42.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.306-0400 m31100| 2015-07-09T14:17:12.305-0400 I SHARDING [conn39] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 30.0 }, { j: 36.0 }, { j: 42.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.308-0400 m31100| 2015-07-09T14:17:12.308-0400 I SHARDING [conn36] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.310-0400 m31100| 2015-07-09T14:17:12.310-0400 I SHARDING [conn36] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 30.0 }, { j: 34.0 }, { j: 40.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.312-0400 m31100| 2015-07-09T14:17:12.311-0400 W SHARDING [conn36] could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db71.coll71 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.313-0400 m30998| 2015-07-09T14:17:12.311-0400 W SHARDING [conn460] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 30.0 }, { j: 34.0 }, { j: 40.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.313-0400 m31100| 2015-07-09T14:17:12.311-0400 I SHARDING [conn39] could not acquire lock 'db71.coll71/bs-osx108-8:31100:1436464536:197041335' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.314-0400 m31100| 2015-07-09T14:17:12.311-0400 I SHARDING [conn39] distributed lock 'db71.coll71/bs-osx108-8:31100:1436464536:197041335' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.314-0400 m30998| 2015-07-09T14:17:12.312-0400 W SHARDING [conn457] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 30.0 }, { j: 36.0 }, { j: 42.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.314-0400 m31100| 2015-07-09T14:17:12.311-0400 W SHARDING [conn39] could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db71.coll71 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.314-0400 m31100| 2015-07-09T14:17:12.313-0400 I SHARDING [conn35] distributed lock 'db71.coll71/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559ebaa8792e00bb67274ace [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.315-0400 m31100| 2015-07-09T14:17:12.313-0400 I SHARDING [conn35] remotely refreshing metadata for db71.coll71 based on current shard version 1|3||559ebaa7ca4787b9985d1eee, current metadata version is 1|3||559ebaa7ca4787b9985d1eee [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.315-0400 m31100| 2015-07-09T14:17:12.315-0400 I SHARDING [conn35] metadata of collection db71.coll71 already up to date (shard version : 1|3||559ebaa7ca4787b9985d1eee, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.318-0400 m31100| 2015-07-09T14:17:12.315-0400 W SHARDING [conn35] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.324-0400 m31100| 2015-07-09T14:17:12.323-0400 I SHARDING [conn35] distributed lock 'db71.coll71/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.325-0400 m30998| 2015-07-09T14:17:12.324-0400 W SHARDING [conn459] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 30.0 }, { j: 36.0 }, { j: 42.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.332-0400 m31100| 2015-07-09T14:17:12.332-0400 I SHARDING [conn35] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.336-0400 m31100| 2015-07-09T14:17:12.335-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 30.0 }, { j: 32.0 }, { j: 36.0 }, { j: 42.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.338-0400 m31100| 2015-07-09T14:17:12.338-0400 I SHARDING [conn36] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.339-0400 m31100| 2015-07-09T14:17:12.338-0400 I SHARDING [conn39] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.340-0400 m31100| 2015-07-09T14:17:12.339-0400 I SHARDING [conn32] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.340-0400 m31100| 2015-07-09T14:17:12.339-0400 I SHARDING [conn36] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 30.0 }, { j: 32.0 }, { j: 36.0 }, { j: 40.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.340-0400 m31100| 2015-07-09T14:17:12.339-0400 I SHARDING [conn35] distributed lock 'db71.coll71/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559ebaa8792e00bb67274ad0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.340-0400 m31100| 2015-07-09T14:17:12.339-0400 I SHARDING [conn35] remotely refreshing metadata for db71.coll71 based on current shard version 1|3||559ebaa7ca4787b9985d1eee, current metadata version is 1|3||559ebaa7ca4787b9985d1eee [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.341-0400 m31100| 2015-07-09T14:17:12.340-0400 I SHARDING [conn32] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 30.0 }, { j: 32.0 }, { j: 36.0 }, { j: 40.0 }, { j: 50.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.342-0400 m31100| 2015-07-09T14:17:12.341-0400 W SHARDING [conn36] could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db71.coll71 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.343-0400 m31100| 2015-07-09T14:17:12.341-0400 I SHARDING [conn39] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 30.0 }, { j: 32.0 }, { j: 36.0 }, { j: 40.0 }, { j: 50.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.344-0400 m30998| 2015-07-09T14:17:12.341-0400 W SHARDING [conn457] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 30.0 }, { j: 32.0 }, { j: 36.0 }, { j: 40.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.345-0400 m31100| 2015-07-09T14:17:12.341-0400 W SHARDING [conn32] could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db71.coll71 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.346-0400 m30998| 2015-07-09T14:17:12.342-0400 W SHARDING [conn458] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 30.0 }, { j: 32.0 }, { j: 36.0 }, { j: 40.0 }, { j: 50.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.347-0400 m31100| 2015-07-09T14:17:12.342-0400 W SHARDING [conn39] could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db71.coll71 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.349-0400 m30998| 2015-07-09T14:17:12.343-0400 W SHARDING [conn460] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 30.0 }, { j: 32.0 }, { j: 36.0 }, { j: 40.0 }, { j: 50.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.349-0400 m31100| 2015-07-09T14:17:12.344-0400 I SHARDING [conn35] metadata of collection db71.coll71 already up to date (shard version : 1|3||559ebaa7ca4787b9985d1eee, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.349-0400 m31100| 2015-07-09T14:17:12.344-0400 W SHARDING [conn35] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.350-0400 m31100| 2015-07-09T14:17:12.349-0400 I SHARDING [conn35] distributed lock 'db71.coll71/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.350-0400 m30998| 2015-07-09T14:17:12.349-0400 W SHARDING [conn459] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 30.0 }, { j: 32.0 }, { j: 36.0 }, { j: 42.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.364-0400 m31100| 2015-07-09T14:17:12.363-0400 I SHARDING [conn35] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.365-0400 m31100| 2015-07-09T14:17:12.363-0400 I SHARDING [conn39] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.365-0400 m31100| 2015-07-09T14:17:12.364-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 30.0 }, { j: 32.0 }, { j: 34.0 }, { j: 38.0 }, { j: 42.0 }, { j: 48.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.366-0400 m31100| 2015-07-09T14:17:12.365-0400 I SHARDING [conn39] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 30.0 }, { j: 32.0 }, { j: 34.0 }, { j: 38.0 }, { j: 42.0 }, { j: 48.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.368-0400 m31100| 2015-07-09T14:17:12.367-0400 I SHARDING [conn32] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.368-0400 m31100| 2015-07-09T14:17:12.367-0400 I SHARDING [conn35] could not acquire lock 'db71.coll71/bs-osx108-8:31100:1436464536:197041335' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.368-0400 m31100| 2015-07-09T14:17:12.367-0400 I SHARDING [conn35] distributed lock 'db71.coll71/bs-osx108-8:31100:1436464536:197041335' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.369-0400 m31100| 2015-07-09T14:17:12.367-0400 W SHARDING [conn35] could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db71.coll71 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.369-0400 m30998| 2015-07-09T14:17:12.368-0400 W SHARDING [conn461] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 30.0 }, { j: 32.0 }, { j: 34.0 }, { j: 38.0 }, { j: 42.0 }, { j: 48.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.370-0400 m31100| 2015-07-09T14:17:12.368-0400 I SHARDING [conn32] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 30.0 }, { j: 32.0 }, { j: 34.0 }, { j: 38.0 }, { j: 42.0 }, { j: 46.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.371-0400 m31100| 2015-07-09T14:17:12.370-0400 W SHARDING [conn32] could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db71.coll71 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.372-0400 m30998| 2015-07-09T14:17:12.371-0400 W SHARDING [conn459] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 30.0 }, { j: 32.0 }, { j: 34.0 }, { j: 38.0 }, { j: 42.0 }, { j: 46.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.377-0400 m31100| 2015-07-09T14:17:12.376-0400 I SHARDING [conn39] distributed lock 'db71.coll71/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559ebaa8792e00bb67274ad1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.377-0400 m31100| 2015-07-09T14:17:12.376-0400 I SHARDING [conn39] remotely refreshing metadata for db71.coll71 based on current shard version 1|3||559ebaa7ca4787b9985d1eee, current metadata version is 1|3||559ebaa7ca4787b9985d1eee [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.380-0400 m31100| 2015-07-09T14:17:12.379-0400 I SHARDING [conn39] metadata of collection db71.coll71 already up to date (shard version : 1|3||559ebaa7ca4787b9985d1eee, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.380-0400 m31100| 2015-07-09T14:17:12.379-0400 W SHARDING [conn39] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.381-0400 m31100| 2015-07-09T14:17:12.380-0400 I SHARDING [conn39] distributed lock 'db71.coll71/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.382-0400 m30998| 2015-07-09T14:17:12.381-0400 W SHARDING [conn458] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 30.0 }, { j: 32.0 }, { j: 34.0 }, { j: 38.0 }, { j: 42.0 }, { j: 48.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.397-0400 m31100| 2015-07-09T14:17:12.396-0400 I SHARDING [conn39] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.399-0400 m31100| 2015-07-09T14:17:12.397-0400 I SHARDING [conn39] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 30.0 }, { j: 32.0 }, { j: 34.0 }, { j: 38.0 }, { j: 40.0 }, { j: 44.0 }, { j: 48.0 }, { j: 58.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.399-0400 m31100| 2015-07-09T14:17:12.398-0400 I SHARDING [conn32] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.399-0400 m31100| 2015-07-09T14:17:12.399-0400 I SHARDING [conn35] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.402-0400 m31100| 2015-07-09T14:17:12.399-0400 I SHARDING [conn32] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 30.0 }, { j: 32.0 }, { j: 34.0 }, { j: 38.0 }, { j: 40.0 }, { j: 44.0 }, { j: 48.0 }, { j: 54.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.402-0400 m31100| 2015-07-09T14:17:12.400-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 30.0 }, { j: 32.0 }, { j: 34.0 }, { j: 38.0 }, { j: 40.0 }, { j: 44.0 }, { j: 48.0 }, { j: 54.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.406-0400 m31100| 2015-07-09T14:17:12.405-0400 I SHARDING [conn36] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.410-0400 m31100| 2015-07-09T14:17:12.406-0400 I SHARDING [conn36] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 30.0 }, { j: 32.0 }, { j: 34.0 }, { j: 38.0 }, { j: 40.0 }, { j: 42.0 }, { j: 48.0 }, { j: 52.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.410-0400 m31100| 2015-07-09T14:17:12.407-0400 I SHARDING [conn35] could not acquire lock 'db71.coll71/bs-osx108-8:31100:1436464536:197041335' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.410-0400 m31100| 2015-07-09T14:17:12.407-0400 I SHARDING [conn35] distributed lock 'db71.coll71/bs-osx108-8:31100:1436464536:197041335' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.410-0400 m31100| 2015-07-09T14:17:12.407-0400 W SHARDING [conn35] could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db71.coll71 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.411-0400 m30998| 2015-07-09T14:17:12.408-0400 W SHARDING [conn460] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 30.0 }, { j: 32.0 }, { j: 34.0 }, { j: 38.0 }, { j: 40.0 }, { j: 44.0 }, { j: 48.0 }, { j: 54.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.411-0400 m31100| 2015-07-09T14:17:12.408-0400 W SHARDING [conn36] could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db71.coll71 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.412-0400 m30998| 2015-07-09T14:17:12.408-0400 W SHARDING [conn457] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 30.0 }, { j: 32.0 }, { j: 34.0 }, { j: 38.0 }, { j: 40.0 }, { j: 42.0 }, { j: 48.0 }, { j: 52.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.412-0400 m31100| 2015-07-09T14:17:12.408-0400 I SHARDING [conn39] distributed lock 'db71.coll71/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559ebaa8792e00bb67274ad3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.413-0400 m31100| 2015-07-09T14:17:12.408-0400 I SHARDING [conn32] could not acquire lock 'db71.coll71/bs-osx108-8:31100:1436464536:197041335' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.413-0400 m31100| 2015-07-09T14:17:12.408-0400 I SHARDING [conn39] remotely refreshing metadata for db71.coll71 based on current shard version 1|3||559ebaa7ca4787b9985d1eee, current metadata version is 1|3||559ebaa7ca4787b9985d1eee [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.413-0400 m31100| 2015-07-09T14:17:12.408-0400 I SHARDING [conn32] distributed lock 'db71.coll71/bs-osx108-8:31100:1436464536:197041335' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.413-0400 m31100| 2015-07-09T14:17:12.409-0400 W SHARDING [conn32] could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db71.coll71 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.414-0400 m30998| 2015-07-09T14:17:12.409-0400 W SHARDING [conn461] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 30.0 }, { j: 32.0 }, { j: 34.0 }, { j: 38.0 }, { j: 40.0 }, { j: 44.0 }, { j: 48.0 }, { j: 54.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.414-0400 m31100| 2015-07-09T14:17:12.411-0400 I SHARDING [conn39] metadata of collection db71.coll71 already up to date (shard version : 1|3||559ebaa7ca4787b9985d1eee, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.414-0400 m31100| 2015-07-09T14:17:12.411-0400 W SHARDING [conn39] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.414-0400 m31100| 2015-07-09T14:17:12.412-0400 I SHARDING [conn39] distributed lock 'db71.coll71/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.415-0400 m30998| 2015-07-09T14:17:12.412-0400 W SHARDING [conn459] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 30.0 }, { j: 32.0 }, { j: 34.0 }, { j: 38.0 }, { j: 40.0 }, { j: 44.0 }, { j: 48.0 }, { j: 58.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.425-0400 m31100| 2015-07-09T14:17:12.425-0400 I SHARDING [conn39] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.427-0400 m31100| 2015-07-09T14:17:12.426-0400 I SHARDING [conn39] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 30.0 }, { j: 32.0 }, { j: 34.0 }, { j: 38.0 }, { j: 40.0 }, { j: 42.0 }, { j: 46.0 }, { j: 50.0 }, { j: 56.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.428-0400 m31100| 2015-07-09T14:17:12.427-0400 I SHARDING [conn32] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.429-0400 m31100| 2015-07-09T14:17:12.428-0400 I SHARDING [conn32] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 30.0 }, { j: 32.0 }, { j: 34.0 }, { j: 38.0 }, { j: 40.0 }, { j: 42.0 }, { j: 46.0 }, { j: 50.0 }, { j: 54.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.430-0400 m31100| 2015-07-09T14:17:12.429-0400 W SHARDING [conn32] could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db71.coll71 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.431-0400 m30998| 2015-07-09T14:17:12.430-0400 W SHARDING [conn459] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 30.0 }, { j: 32.0 }, { j: 34.0 }, { j: 38.0 }, { j: 40.0 }, { j: 42.0 }, { j: 46.0 }, { j: 50.0 }, { j: 54.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.440-0400 m31100| 2015-07-09T14:17:12.439-0400 I SHARDING [conn39] distributed lock 'db71.coll71/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559ebaa8792e00bb67274ad6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.442-0400 m31100| 2015-07-09T14:17:12.439-0400 I SHARDING [conn39] remotely refreshing metadata for db71.coll71 based on current shard version 1|3||559ebaa7ca4787b9985d1eee, current metadata version is 1|3||559ebaa7ca4787b9985d1eee [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.442-0400 m31100| 2015-07-09T14:17:12.441-0400 I SHARDING [conn39] metadata of collection db71.coll71 already up to date (shard version : 1|3||559ebaa7ca4787b9985d1eee, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.442-0400 m31100| 2015-07-09T14:17:12.441-0400 W SHARDING [conn39] splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.443-0400 m31100| 2015-07-09T14:17:12.442-0400 I SHARDING [conn32] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.445-0400 m31100| 2015-07-09T14:17:12.444-0400 I SHARDING [conn32] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 30.0 }, { j: 32.0 }, { j: 34.0 }, { j: 38.0 }, { j: 40.0 }, { j: 42.0 }, { j: 44.0 }, { j: 48.0 }, { j: 52.0 }, { j: 60.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.447-0400 m31100| 2015-07-09T14:17:12.446-0400 W SHARDING [conn32] could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db71.coll71 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.448-0400 m30998| 2015-07-09T14:17:12.446-0400 W SHARDING [conn458] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 30.0 }, { j: 32.0 }, { j: 34.0 }, { j: 38.0 }, { j: 40.0 }, { j: 42.0 }, { j: 44.0 }, { j: 48.0 }, { j: 52.0 }, { j: 60.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.448-0400 m31100| 2015-07-09T14:17:12.446-0400 I SHARDING [conn36] request split points lookup for chunk db71.coll71 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.449-0400 m31100| 2015-07-09T14:17:12.448-0400 I SHARDING [conn36] received splitChunk request: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 30.0 }, { j: 32.0 }, { j: 34.0 }, { j: 38.0 }, { j: 40.0 }, { j: 42.0 }, { j: 44.0 }, { j: 48.0 }, { j: 52.0 }, { j: 56.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.451-0400 m31100| 2015-07-09T14:17:12.449-0400 I SHARDING [conn39] distributed lock 'db71.coll71/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.452-0400 m30998| 2015-07-09T14:17:12.450-0400 W SHARDING [conn460] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 30.0 }, { j: 32.0 }, { j: 34.0 }, { j: 38.0 }, { j: 40.0 }, { j: 42.0 }, { j: 46.0 }, { j: 50.0 }, { j: 56.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "splitChunk cannot find chunk [{ : MinKey },{ : MaxKey }) to split, the chunk boundaries may be stale", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.455-0400 m31100| 2015-07-09T14:17:12.450-0400 W SHARDING [conn36] could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db71.coll71 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.455-0400 m30998| 2015-07-09T14:17:12.450-0400 W SHARDING [conn461] splitChunk failed - cmd: { splitChunk: "db71.coll71", keyPattern: { j: 1.0 }, min: { j: MinKey }, max: { j: MaxKey }, from: "test-rs0", splitKeys: [ { j: 0.0 }, { j: 4.0 }, { j: 6.0 }, { j: 8.0 }, { j: 10.0 }, { j: 14.0 }, { j: 16.0 }, { j: 18.0 }, { j: 20.0 }, { j: 22.0 }, { j: 26.0 }, { j: 28.0 }, { j: 30.0 }, { j: 32.0 }, { j: 34.0 }, { j: 38.0 }, { j: 40.0 }, { j: 42.0 }, { j: 44.0 }, { j: 48.0 }, { j: 52.0 }, { j: 56.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa7ca4787b9985d1eee') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db71.coll71 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.464-0400 m30998| 2015-07-09T14:17:12.459-0400 I SHARDING [conn460] ChunkManager: time to load chunks for db71.coll71: 0ms sequenceNumber: 88 version: 1|3||559ebaa7ca4787b9985d1eee based on: 1|0||559ebaa7ca4787b9985d1eee [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.651-0400 m30999| 2015-07-09T14:17:12.651-0400 I NETWORK [conn459] end connection 127.0.0.1:64048 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.658-0400 m30999| 2015-07-09T14:17:12.657-0400 I NETWORK [conn461] end connection 127.0.0.1:64052 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.690-0400 m30998| 2015-07-09T14:17:12.690-0400 I NETWORK [conn458] end connection 127.0.0.1:64049 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.699-0400 m30999| 2015-07-09T14:17:12.698-0400 I NETWORK [conn457] end connection 127.0.0.1:64045 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.717-0400 m30998| 2015-07-09T14:17:12.712-0400 I NETWORK [conn461] end connection 127.0.0.1:64054 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.718-0400 m30999| 2015-07-09T14:17:12.718-0400 I NETWORK [conn460] end connection 127.0.0.1:64050 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.726-0400 m30998| 2015-07-09T14:17:12.726-0400 I NETWORK [conn457] end connection 127.0.0.1:64047 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.739-0400 m30998| 2015-07-09T14:17:12.739-0400 I NETWORK [conn459] end connection 127.0.0.1:64051 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.747-0400 m30999| 2015-07-09T14:17:12.746-0400 I NETWORK [conn458] end connection 127.0.0.1:64046 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.762-0400 m30998| 2015-07-09T14:17:12.762-0400 I NETWORK [conn460] end connection 127.0.0.1:64053 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.762-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.762-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.762-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.762-0400 jstests/concurrency/fsm_workloads/explain_find.js: Workload completed in 988 ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.763-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.763-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.763-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.763-0400 m30999| 2015-07-09T14:17:12.762-0400 I COMMAND [conn1] DROP: db71.coll71 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.763-0400 m30999| 2015-07-09T14:17:12.762-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:12.762-0400-559ebaa8ca4787b9985d1ef0", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465832762), what: "dropCollection.start", ns: "db71.coll71", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.819-0400 m30999| 2015-07-09T14:17:12.819-0400 I SHARDING [conn1] distributed lock 'db71.coll71/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559ebaa8ca4787b9985d1ef1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.820-0400 m31100| 2015-07-09T14:17:12.820-0400 I COMMAND [conn38] CMD: drop db71.coll71 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.823-0400 m31200| 2015-07-09T14:17:12.822-0400 I COMMAND [conn64] CMD: drop db71.coll71 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.824-0400 m31101| 2015-07-09T14:17:12.824-0400 I COMMAND [repl writer worker 13] CMD: drop db71.coll71 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.824-0400 m31102| 2015-07-09T14:17:12.824-0400 I COMMAND [repl writer worker 0] CMD: drop db71.coll71 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.877-0400 m31100| 2015-07-09T14:17:12.876-0400 I SHARDING [conn38] remotely refreshing metadata for db71.coll71 with requested shard version 0|0||000000000000000000000000, current shard version is 1|3||559ebaa7ca4787b9985d1eee, current metadata version is 1|3||559ebaa7ca4787b9985d1eee [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.878-0400 m31100| 2015-07-09T14:17:12.878-0400 W SHARDING [conn38] no chunks found when reloading db71.coll71, previous version was 0|0||559ebaa7ca4787b9985d1eee, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.879-0400 m31100| 2015-07-09T14:17:12.878-0400 I SHARDING [conn38] dropping metadata for db71.coll71 at shard version 1|3||559ebaa7ca4787b9985d1eee, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.881-0400 m30999| 2015-07-09T14:17:12.880-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:12.880-0400-559ebaa8ca4787b9985d1ef2", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465832880), what: "dropCollection", ns: "db71.coll71", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.935-0400 m30999| 2015-07-09T14:17:12.935-0400 I SHARDING [conn1] distributed lock 'db71.coll71/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.992-0400 m30999| 2015-07-09T14:17:12.991-0400 I COMMAND [conn1] DROP DATABASE: db71 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.992-0400 m30999| 2015-07-09T14:17:12.992-0400 I SHARDING [conn1] DBConfig::dropDatabase: db71 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:12.992-0400 m30999| 2015-07-09T14:17:12.992-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:12.992-0400-559ebaa8ca4787b9985d1ef3", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465832992), what: "dropDatabase.start", ns: "db71", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.097-0400 m30999| 2015-07-09T14:17:13.097-0400 I SHARDING [conn1] DBConfig::dropDatabase: db71 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.098-0400 m31100| 2015-07-09T14:17:13.097-0400 I COMMAND [conn160] dropDatabase db71 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.098-0400 m31100| 2015-07-09T14:17:13.098-0400 I COMMAND [conn160] dropDatabase db71 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.099-0400 m30999| 2015-07-09T14:17:13.098-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:13.098-0400-559ebaa9ca4787b9985d1ef4", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465833098), what: "dropDatabase", ns: "db71", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.099-0400 m31101| 2015-07-09T14:17:13.099-0400 I COMMAND [repl writer worker 14] dropDatabase db71 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.099-0400 m31101| 2015-07-09T14:17:13.099-0400 I COMMAND [repl writer worker 14] dropDatabase db71 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.100-0400 m31102| 2015-07-09T14:17:13.099-0400 I COMMAND [repl writer worker 6] dropDatabase db71 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.100-0400 m31102| 2015-07-09T14:17:13.099-0400 I COMMAND [repl writer worker 6] dropDatabase db71 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.187-0400 m31100| 2015-07-09T14:17:13.187-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.191-0400 m31101| 2015-07-09T14:17:13.191-0400 I COMMAND [repl writer worker 2] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.191-0400 m31102| 2015-07-09T14:17:13.191-0400 I COMMAND [repl writer worker 12] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.226-0400 m31200| 2015-07-09T14:17:13.226-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.229-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.230-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.230-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.230-0400 jstests/concurrency/fsm_workloads/indexed_insert_1char_noindex.js [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.230-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.230-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.230-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.230-0400 m31202| 2015-07-09T14:17:13.229-0400 I COMMAND [repl writer worker 9] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.230-0400 m31201| 2015-07-09T14:17:13.229-0400 I COMMAND [repl writer worker 12] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.238-0400 m30999| 2015-07-09T14:17:13.237-0400 I SHARDING [conn1] distributed lock 'db72/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559ebaa9ca4787b9985d1ef5 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.241-0400 m30999| 2015-07-09T14:17:13.241-0400 I SHARDING [conn1] Placing [db72] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.241-0400 m30999| 2015-07-09T14:17:13.241-0400 I SHARDING [conn1] Enabling sharding for database [db72] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.296-0400 m30999| 2015-07-09T14:17:13.295-0400 I SHARDING [conn1] distributed lock 'db72/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.323-0400 m31100| 2015-07-09T14:17:13.321-0400 I INDEX [conn69] build index on: db72.coll72 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db72.coll72" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.323-0400 m31100| 2015-07-09T14:17:13.322-0400 I INDEX [conn69] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.340-0400 m31100| 2015-07-09T14:17:13.339-0400 I INDEX [conn69] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.341-0400 m30999| 2015-07-09T14:17:13.341-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db72.coll72", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.344-0400 m30999| 2015-07-09T14:17:13.344-0400 I SHARDING [conn1] distributed lock 'db72.coll72/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559ebaa9ca4787b9985d1ef6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.345-0400 m30999| 2015-07-09T14:17:13.345-0400 I SHARDING [conn1] enable sharding on: db72.coll72 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.346-0400 m30999| 2015-07-09T14:17:13.345-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:13.345-0400-559ebaa9ca4787b9985d1ef7", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465833345), what: "shardCollection.start", ns: "db72.coll72", details: { shardKey: { _id: "hashed" }, collection: "db72.coll72", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.348-0400 m31102| 2015-07-09T14:17:13.347-0400 I INDEX [repl writer worker 7] build index on: db72.coll72 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db72.coll72" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.348-0400 m31102| 2015-07-09T14:17:13.348-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.358-0400 m31101| 2015-07-09T14:17:13.357-0400 I INDEX [repl writer worker 9] build index on: db72.coll72 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db72.coll72" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.358-0400 m31101| 2015-07-09T14:17:13.357-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.364-0400 m31102| 2015-07-09T14:17:13.364-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.366-0400 m31101| 2015-07-09T14:17:13.366-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.398-0400 m30999| 2015-07-09T14:17:13.398-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db72.coll72 using new epoch 559ebaa9ca4787b9985d1ef8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.506-0400 m30999| 2015-07-09T14:17:13.505-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db72.coll72: 1ms sequenceNumber: 310 version: 1|1||559ebaa9ca4787b9985d1ef8 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.562-0400 m30999| 2015-07-09T14:17:13.561-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db72.coll72: 0ms sequenceNumber: 311 version: 1|1||559ebaa9ca4787b9985d1ef8 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.564-0400 m31100| 2015-07-09T14:17:13.563-0400 I SHARDING [conn183] remotely refreshing metadata for db72.coll72 with requested shard version 1|1||559ebaa9ca4787b9985d1ef8, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.566-0400 m31100| 2015-07-09T14:17:13.565-0400 I SHARDING [conn183] collection db72.coll72 was previously unsharded, new metadata loaded with shard version 1|1||559ebaa9ca4787b9985d1ef8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.566-0400 m31100| 2015-07-09T14:17:13.565-0400 I SHARDING [conn183] collection version was loaded at version 1|1||559ebaa9ca4787b9985d1ef8, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.566-0400 m30999| 2015-07-09T14:17:13.566-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:13.566-0400-559ebaa9ca4787b9985d1ef9", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465833566), what: "shardCollection", ns: "db72.coll72", details: { version: "1|1||559ebaa9ca4787b9985d1ef8" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.621-0400 m30999| 2015-07-09T14:17:13.621-0400 I SHARDING [conn1] distributed lock 'db72.coll72/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.622-0400 m30999| 2015-07-09T14:17:13.622-0400 I SHARDING [conn1] moving chunk ns: db72.coll72 moving ( ns: db72.coll72, shard: test-rs0, lastmod: 1|1||000000000000000000000000, min: { _id: 0 }, max: { _id: MaxKey }) test-rs0 -> test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.623-0400 m31100| 2015-07-09T14:17:13.622-0400 I SHARDING [conn38] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.624-0400 m31100| 2015-07-09T14:17:13.624-0400 I SHARDING [conn38] received moveChunk request: { moveChunk: "db72.coll72", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559ebaa9ca4787b9985d1ef8') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.628-0400 m31100| 2015-07-09T14:17:13.628-0400 I SHARDING [conn38] distributed lock 'db72.coll72/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559ebaa9792e00bb67274ad8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.629-0400 m31100| 2015-07-09T14:17:13.628-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:13.628-0400-559ebaa9792e00bb67274ad9", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465833628), what: "moveChunk.start", ns: "db72.coll72", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.682-0400 m31100| 2015-07-09T14:17:13.682-0400 I SHARDING [conn38] remotely refreshing metadata for db72.coll72 based on current shard version 1|1||559ebaa9ca4787b9985d1ef8, current metadata version is 1|1||559ebaa9ca4787b9985d1ef8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.684-0400 m31100| 2015-07-09T14:17:13.684-0400 I SHARDING [conn38] metadata of collection db72.coll72 already up to date (shard version : 1|1||559ebaa9ca4787b9985d1ef8, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.684-0400 m31100| 2015-07-09T14:17:13.684-0400 I SHARDING [conn38] moveChunk request accepted at version 1|1||559ebaa9ca4787b9985d1ef8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.684-0400 m31100| 2015-07-09T14:17:13.684-0400 I SHARDING [conn38] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.685-0400 m31200| 2015-07-09T14:17:13.684-0400 I SHARDING [conn16] remotely refreshing metadata for db72.coll72, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.686-0400 m31200| 2015-07-09T14:17:13.686-0400 I SHARDING [conn16] collection db72.coll72 was previously unsharded, new metadata loaded with shard version 0|0||559ebaa9ca4787b9985d1ef8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.686-0400 m31200| 2015-07-09T14:17:13.686-0400 I SHARDING [conn16] collection version was loaded at version 1|1||559ebaa9ca4787b9985d1ef8, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.687-0400 m31200| 2015-07-09T14:17:13.686-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: 0 } -> { _id: MaxKey } for collection db72.coll72 from test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 at epoch 559ebaa9ca4787b9985d1ef8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.690-0400 m31100| 2015-07-09T14:17:13.689-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db72.coll72", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.693-0400 m31100| 2015-07-09T14:17:13.692-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db72.coll72", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.698-0400 m31100| 2015-07-09T14:17:13.697-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db72.coll72", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.700-0400 m31200| 2015-07-09T14:17:13.699-0400 I INDEX [migrateThread] build index on: db72.coll72 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db72.coll72" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.700-0400 m31200| 2015-07-09T14:17:13.699-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.707-0400 m31100| 2015-07-09T14:17:13.706-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db72.coll72", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.711-0400 m31200| 2015-07-09T14:17:13.710-0400 I INDEX [migrateThread] build index on: db72.coll72 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db72.coll72" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.711-0400 m31200| 2015-07-09T14:17:13.710-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.725-0400 m31100| 2015-07-09T14:17:13.724-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db72.coll72", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.730-0400 m31200| 2015-07-09T14:17:13.729-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.730-0400 m31200| 2015-07-09T14:17:13.730-0400 I SHARDING [migrateThread] Deleter starting delete for: db72.coll72 from { _id: 0 } -> { _id: MaxKey }, with opId: 97872 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.731-0400 m31200| 2015-07-09T14:17:13.730-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db72.coll72 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.740-0400 m31201| 2015-07-09T14:17:13.740-0400 I INDEX [repl writer worker 3] build index on: db72.coll72 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db72.coll72" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.741-0400 m31201| 2015-07-09T14:17:13.740-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.741-0400 m31202| 2015-07-09T14:17:13.740-0400 I INDEX [repl writer worker 1] build index on: db72.coll72 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db72.coll72" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.741-0400 m31202| 2015-07-09T14:17:13.740-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.744-0400 m31202| 2015-07-09T14:17:13.744-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.746-0400 m31201| 2015-07-09T14:17:13.746-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.746-0400 m31200| 2015-07-09T14:17:13.746-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.747-0400 m31200| 2015-07-09T14:17:13.746-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db72.coll72' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.759-0400 m31100| 2015-07-09T14:17:13.758-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db72.coll72", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.759-0400 m31100| 2015-07-09T14:17:13.758-0400 I SHARDING [conn38] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.760-0400 m31100| 2015-07-09T14:17:13.759-0400 I SHARDING [conn38] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.760-0400 m31100| 2015-07-09T14:17:13.759-0400 I SHARDING [conn38] moveChunk setting version to: 2|0||559ebaa9ca4787b9985d1ef8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.769-0400 m31200| 2015-07-09T14:17:13.769-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db72.coll72' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.770-0400 m31200| 2015-07-09T14:17:13.769-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:13.769-0400-559ebaa9d5a107a5b9c0db73", server: "bs-osx108-8", clientAddr: "", time: new Date(1436465833769), what: "moveChunk.to", ns: "db72.coll72", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 5: 43, step 2 of 5: 14, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 22, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.823-0400 m31100| 2015-07-09T14:17:13.822-0400 I SHARDING [conn38] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db72.coll72", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.823-0400 m31100| 2015-07-09T14:17:13.823-0400 I SHARDING [conn38] moveChunk updating self version to: 2|1||559ebaa9ca4787b9985d1ef8 through { _id: MinKey } -> { _id: 0 } for collection 'db72.coll72' [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.825-0400 m31100| 2015-07-09T14:17:13.824-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:13.824-0400-559ebaa9792e00bb67274ada", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465833824), what: "moveChunk.commit", ns: "db72.coll72", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.878-0400 m31100| 2015-07-09T14:17:13.878-0400 I SHARDING [conn38] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.878-0400 m31100| 2015-07-09T14:17:13.878-0400 I SHARDING [conn38] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.879-0400 m31100| 2015-07-09T14:17:13.878-0400 I SHARDING [conn38] Deleter starting delete for: db72.coll72 from { _id: 0 } -> { _id: MaxKey }, with opId: 235521 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.879-0400 m31100| 2015-07-09T14:17:13.878-0400 I SHARDING [conn38] rangeDeleter deleted 0 documents for db72.coll72 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.879-0400 m31100| 2015-07-09T14:17:13.878-0400 I SHARDING [conn38] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.879-0400 m31100| 2015-07-09T14:17:13.879-0400 I SHARDING [conn38] distributed lock 'db72.coll72/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.880-0400 m31100| 2015-07-09T14:17:13.879-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:13.879-0400-559ebaa9792e00bb67274adb", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465833879), what: "moveChunk.from", ns: "db72.coll72", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 6: 0, step 2 of 6: 60, step 3 of 6: 3, step 4 of 6: 71, step 5 of 6: 119, step 6 of 6: 0, to: "test-rs1", from: "test-rs0", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.932-0400 m31100| 2015-07-09T14:17:13.931-0400 I COMMAND [conn38] command db72.coll72 command: moveChunk { moveChunk: "db72.coll72", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559ebaa9ca4787b9985d1ef8') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 309ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.934-0400 m30999| 2015-07-09T14:17:13.933-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db72.coll72: 0ms sequenceNumber: 312 version: 2|1||559ebaa9ca4787b9985d1ef8 based on: 1|1||559ebaa9ca4787b9985d1ef8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.935-0400 m31100| 2015-07-09T14:17:13.935-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db72.coll72", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa9ca4787b9985d1ef8') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.940-0400 m31100| 2015-07-09T14:17:13.939-0400 I SHARDING [conn38] distributed lock 'db72.coll72/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559ebaa9792e00bb67274adc [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.940-0400 m31100| 2015-07-09T14:17:13.939-0400 I SHARDING [conn38] remotely refreshing metadata for db72.coll72 based on current shard version 2|0||559ebaa9ca4787b9985d1ef8, current metadata version is 2|0||559ebaa9ca4787b9985d1ef8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.941-0400 m31100| 2015-07-09T14:17:13.941-0400 I SHARDING [conn38] updating metadata for db72.coll72 from shard version 2|0||559ebaa9ca4787b9985d1ef8 to shard version 2|1||559ebaa9ca4787b9985d1ef8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.942-0400 m31100| 2015-07-09T14:17:13.941-0400 I SHARDING [conn38] collection version was loaded at version 2|1||559ebaa9ca4787b9985d1ef8, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.942-0400 m31100| 2015-07-09T14:17:13.941-0400 I SHARDING [conn38] splitChunk accepted at version 2|1||559ebaa9ca4787b9985d1ef8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.943-0400 m31100| 2015-07-09T14:17:13.942-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:13.942-0400-559ebaa9792e00bb67274add", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465833942), what: "split", ns: "db72.coll72", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559ebaa9ca4787b9985d1ef8') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559ebaa9ca4787b9985d1ef8') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:13.999-0400 m31100| 2015-07-09T14:17:13.998-0400 I SHARDING [conn38] distributed lock 'db72.coll72/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.001-0400 m30999| 2015-07-09T14:17:14.000-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db72.coll72: 0ms sequenceNumber: 313 version: 2|3||559ebaa9ca4787b9985d1ef8 based on: 2|1||559ebaa9ca4787b9985d1ef8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.001-0400 m31200| 2015-07-09T14:17:14.001-0400 I SHARDING [conn64] received splitChunk request: { splitChunk: "db72.coll72", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaa9ca4787b9985d1ef8') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.006-0400 m31200| 2015-07-09T14:17:14.005-0400 I SHARDING [conn64] distributed lock 'db72.coll72/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559ebaaad5a107a5b9c0db74 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.006-0400 m31200| 2015-07-09T14:17:14.005-0400 I SHARDING [conn64] remotely refreshing metadata for db72.coll72 based on current shard version 0|0||559ebaa9ca4787b9985d1ef8, current metadata version is 1|1||559ebaa9ca4787b9985d1ef8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.008-0400 m31200| 2015-07-09T14:17:14.007-0400 I SHARDING [conn64] updating metadata for db72.coll72 from shard version 0|0||559ebaa9ca4787b9985d1ef8 to shard version 2|0||559ebaa9ca4787b9985d1ef8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.008-0400 m31200| 2015-07-09T14:17:14.007-0400 I SHARDING [conn64] collection version was loaded at version 2|3||559ebaa9ca4787b9985d1ef8, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.008-0400 m31200| 2015-07-09T14:17:14.007-0400 I SHARDING [conn64] splitChunk accepted at version 2|0||559ebaa9ca4787b9985d1ef8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.010-0400 m31200| 2015-07-09T14:17:14.009-0400 I SHARDING [conn64] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:14.009-0400-559ebaaad5a107a5b9c0db75", server: "bs-osx108-8", clientAddr: "127.0.0.1:62863", time: new Date(1436465834009), what: "split", ns: "db72.coll72", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559ebaa9ca4787b9985d1ef8') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559ebaa9ca4787b9985d1ef8') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.064-0400 m31200| 2015-07-09T14:17:14.063-0400 I SHARDING [conn64] distributed lock 'db72.coll72/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.066-0400 m30999| 2015-07-09T14:17:14.065-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db72.coll72: 0ms sequenceNumber: 314 version: 2|5||559ebaa9ca4787b9985d1ef8 based on: 2|3||559ebaa9ca4787b9985d1ef8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.075-0400 m31200| 2015-07-09T14:17:14.074-0400 I INDEX [conn32] build index on: db72.coll72 properties: { v: 1, key: { indexed_insert_1char: 1.0 }, name: "indexed_insert_1char_1", ns: "db72.coll72" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.075-0400 m31200| 2015-07-09T14:17:14.074-0400 I INDEX [conn32] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.076-0400 m31100| 2015-07-09T14:17:14.074-0400 I INDEX [conn183] build index on: db72.coll72 properties: { v: 1, key: { indexed_insert_1char: 1.0 }, name: "indexed_insert_1char_1", ns: "db72.coll72" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.076-0400 m31100| 2015-07-09T14:17:14.074-0400 I INDEX [conn183] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.082-0400 m31100| 2015-07-09T14:17:14.081-0400 I INDEX [conn183] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.085-0400 m31200| 2015-07-09T14:17:14.085-0400 I INDEX [conn32] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.088-0400 m31200| 2015-07-09T14:17:14.088-0400 I COMMAND [conn64] CMD: dropIndexes db72.coll72 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.089-0400 m31100| 2015-07-09T14:17:14.088-0400 I COMMAND [conn38] CMD: dropIndexes db72.coll72 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.091-0400 Using 20 threads (requested 20) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.091-0400 m31102| 2015-07-09T14:17:14.091-0400 I INDEX [repl writer worker 4] build index on: db72.coll72 properties: { v: 1, key: { indexed_insert_1char: 1.0 }, name: "indexed_insert_1char_1", ns: "db72.coll72" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.091-0400 m31102| 2015-07-09T14:17:14.091-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.106-0400 m31101| 2015-07-09T14:17:14.104-0400 I INDEX [repl writer worker 4] build index on: db72.coll72 properties: { v: 1, key: { indexed_insert_1char: 1.0 }, name: "indexed_insert_1char_1", ns: "db72.coll72" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.106-0400 m31101| 2015-07-09T14:17:14.104-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.162-0400 m31202| 2015-07-09T14:17:14.162-0400 I INDEX [repl writer worker 0] build index on: db72.coll72 properties: { v: 1, key: { indexed_insert_1char: 1.0 }, name: "indexed_insert_1char_1", ns: "db72.coll72" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.163-0400 m31202| 2015-07-09T14:17:14.162-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.203-0400 m31102| 2015-07-09T14:17:14.203-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.213-0400 m31201| 2015-07-09T14:17:14.209-0400 I INDEX [repl writer worker 9] build index on: db72.coll72 properties: { v: 1, key: { indexed_insert_1char: 1.0 }, name: "indexed_insert_1char_1", ns: "db72.coll72" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.213-0400 m31201| 2015-07-09T14:17:14.209-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.213-0400 m30999| 2015-07-09T14:17:14.210-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64055 #462 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.253-0400 m31102| 2015-07-09T14:17:14.247-0400 I COMMAND [repl writer worker 11] CMD: dropIndexes db72.coll72 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.254-0400 m31101| 2015-07-09T14:17:14.251-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.258-0400 m30999| 2015-07-09T14:17:14.257-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64056 #463 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.262-0400 m31202| 2015-07-09T14:17:14.262-0400 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.268-0400 m30999| 2015-07-09T14:17:14.267-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64057 #464 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.277-0400 m30999| 2015-07-09T14:17:14.276-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64058 #465 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.277-0400 m30999| 2015-07-09T14:17:14.277-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64059 #466 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.283-0400 m31201| 2015-07-09T14:17:14.282-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.293-0400 m30999| 2015-07-09T14:17:14.289-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64060 #467 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.293-0400 m30998| 2015-07-09T14:17:14.293-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64061 #462 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.299-0400 m30999| 2015-07-09T14:17:14.299-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64064 #468 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.304-0400 m30998| 2015-07-09T14:17:14.303-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64062 #463 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.314-0400 m30998| 2015-07-09T14:17:14.314-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64063 #464 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.314-0400 m31201| 2015-07-09T14:17:14.314-0400 I COMMAND [repl writer worker 13] CMD: dropIndexes db72.coll72 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.338-0400 m30999| 2015-07-09T14:17:14.338-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64065 #469 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.338-0400 m30998| 2015-07-09T14:17:14.338-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64066 #465 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.340-0400 m30998| 2015-07-09T14:17:14.340-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64067 #466 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.341-0400 m30998| 2015-07-09T14:17:14.341-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64068 #467 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.346-0400 m30998| 2015-07-09T14:17:14.345-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64069 #468 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.346-0400 m30998| 2015-07-09T14:17:14.345-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64070 #469 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.357-0400 m30998| 2015-07-09T14:17:14.357-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64071 #470 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.363-0400 m30998| 2015-07-09T14:17:14.363-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64072 #471 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.363-0400 m30999| 2015-07-09T14:17:14.363-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64073 #470 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.366-0400 m30999| 2015-07-09T14:17:14.366-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64074 #471 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.377-0400 setting random seed: 2550017354078 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.377-0400 setting random seed: 7383094760589 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.378-0400 setting random seed: 1859214305877 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.378-0400 setting random seed: 9476099424064 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.379-0400 setting random seed: 4241718323901 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.382-0400 setting random seed: 5862554814666 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.384-0400 setting random seed: 5179953356273 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.384-0400 setting random seed: 4803094407543 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.387-0400 setting random seed: 9373157350346 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.388-0400 m31202| 2015-07-09T14:17:14.387-0400 I COMMAND [repl writer worker 13] CMD: dropIndexes db72.coll72 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.391-0400 setting random seed: 7673699003644 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.391-0400 setting random seed: 8141689952462 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.396-0400 m30998| 2015-07-09T14:17:14.396-0400 I SHARDING [conn467] ChunkManager: time to load chunks for db72.coll72: 0ms sequenceNumber: 89 version: 2|5||559ebaa9ca4787b9985d1ef8 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.404-0400 m31101| 2015-07-09T14:17:14.404-0400 I COMMAND [repl writer worker 8] CMD: dropIndexes db72.coll72 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.412-0400 setting random seed: 4094435647130 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.412-0400 setting random seed: 289827389642 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.420-0400 setting random seed: 1958815981633 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.420-0400 setting random seed: 7688216962851 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.425-0400 setting random seed: 4655660628341 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.430-0400 setting random seed: 7814853433519 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.438-0400 setting random seed: 1128939990885 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.440-0400 setting random seed: 5607745172455 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.441-0400 setting random seed: 559903006069 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.788-0400 m30999| 2015-07-09T14:17:14.788-0400 I NETWORK [conn463] end connection 127.0.0.1:64056 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.850-0400 m30998| 2015-07-09T14:17:14.849-0400 I NETWORK [conn463] end connection 127.0.0.1:64062 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.855-0400 m30999| 2015-07-09T14:17:14.855-0400 I NETWORK [conn462] end connection 127.0.0.1:64055 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.881-0400 m30998| 2015-07-09T14:17:14.880-0400 I NETWORK [conn462] end connection 127.0.0.1:64061 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.881-0400 m30999| 2015-07-09T14:17:14.881-0400 I NETWORK [conn467] end connection 127.0.0.1:64060 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.929-0400 m30999| 2015-07-09T14:17:14.928-0400 I NETWORK [conn465] end connection 127.0.0.1:64058 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.943-0400 m30998| 2015-07-09T14:17:14.943-0400 I NETWORK [conn464] end connection 127.0.0.1:64063 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.955-0400 m30999| 2015-07-09T14:17:14.955-0400 I NETWORK [conn466] end connection 127.0.0.1:64059 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:14.995-0400 m30998| 2015-07-09T14:17:14.995-0400 I NETWORK [conn467] end connection 127.0.0.1:64068 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.004-0400 m30999| 2015-07-09T14:17:15.004-0400 I NETWORK [conn468] end connection 127.0.0.1:64064 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.008-0400 m30999| 2015-07-09T14:17:15.008-0400 I NETWORK [conn464] end connection 127.0.0.1:64057 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.024-0400 m30998| 2015-07-09T14:17:15.023-0400 I NETWORK [conn470] end connection 127.0.0.1:64071 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.038-0400 m30998| 2015-07-09T14:17:15.037-0400 I NETWORK [conn468] end connection 127.0.0.1:64069 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.053-0400 m30999| 2015-07-09T14:17:15.053-0400 I NETWORK [conn469] end connection 127.0.0.1:64065 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.060-0400 m30998| 2015-07-09T14:17:15.060-0400 I NETWORK [conn466] end connection 127.0.0.1:64067 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.079-0400 m30998| 2015-07-09T14:17:15.075-0400 I NETWORK [conn465] end connection 127.0.0.1:64066 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.102-0400 m30998| 2015-07-09T14:17:15.101-0400 I NETWORK [conn469] end connection 127.0.0.1:64070 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.132-0400 m30998| 2015-07-09T14:17:15.131-0400 I NETWORK [conn471] end connection 127.0.0.1:64072 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.135-0400 m30999| 2015-07-09T14:17:15.134-0400 I NETWORK [conn470] end connection 127.0.0.1:64073 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.145-0400 m30999| 2015-07-09T14:17:15.144-0400 I NETWORK [conn471] end connection 127.0.0.1:64074 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.166-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.167-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.167-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.167-0400 jstests/concurrency/fsm_workloads/indexed_insert_1char_noindex.js: Workload completed in 1076 ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.167-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.167-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.167-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.168-0400 m30999| 2015-07-09T14:17:15.167-0400 I COMMAND [conn1] DROP: db72.coll72 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.168-0400 m30999| 2015-07-09T14:17:15.167-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:15.167-0400-559ebaabca4787b9985d1efa", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465835167), what: "dropCollection.start", ns: "db72.coll72", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.226-0400 m30999| 2015-07-09T14:17:15.225-0400 I SHARDING [conn1] distributed lock 'db72.coll72/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559ebaabca4787b9985d1efb [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.227-0400 m31100| 2015-07-09T14:17:15.226-0400 I COMMAND [conn38] CMD: drop db72.coll72 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.229-0400 m31200| 2015-07-09T14:17:15.229-0400 I COMMAND [conn64] CMD: drop db72.coll72 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.231-0400 m31101| 2015-07-09T14:17:15.230-0400 I COMMAND [repl writer worker 8] CMD: drop db72.coll72 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.231-0400 m31102| 2015-07-09T14:17:15.231-0400 I COMMAND [repl writer worker 5] CMD: drop db72.coll72 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.233-0400 m31202| 2015-07-09T14:17:15.232-0400 I COMMAND [repl writer worker 0] CMD: drop db72.coll72 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.233-0400 m31201| 2015-07-09T14:17:15.233-0400 I COMMAND [repl writer worker 4] CMD: drop db72.coll72 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.285-0400 m31100| 2015-07-09T14:17:15.284-0400 I SHARDING [conn38] remotely refreshing metadata for db72.coll72 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559ebaa9ca4787b9985d1ef8, current metadata version is 2|3||559ebaa9ca4787b9985d1ef8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.287-0400 m31100| 2015-07-09T14:17:15.286-0400 W SHARDING [conn38] no chunks found when reloading db72.coll72, previous version was 0|0||559ebaa9ca4787b9985d1ef8, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.287-0400 m31100| 2015-07-09T14:17:15.286-0400 I SHARDING [conn38] dropping metadata for db72.coll72 at shard version 2|3||559ebaa9ca4787b9985d1ef8, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.288-0400 m31200| 2015-07-09T14:17:15.288-0400 I SHARDING [conn64] remotely refreshing metadata for db72.coll72 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559ebaa9ca4787b9985d1ef8, current metadata version is 2|5||559ebaa9ca4787b9985d1ef8 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.290-0400 m31200| 2015-07-09T14:17:15.290-0400 W SHARDING [conn64] no chunks found when reloading db72.coll72, previous version was 0|0||559ebaa9ca4787b9985d1ef8, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.290-0400 m31200| 2015-07-09T14:17:15.290-0400 I SHARDING [conn64] dropping metadata for db72.coll72 at shard version 2|5||559ebaa9ca4787b9985d1ef8, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.291-0400 m30999| 2015-07-09T14:17:15.291-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:15.291-0400-559ebaabca4787b9985d1efc", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465835291), what: "dropCollection", ns: "db72.coll72", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.346-0400 m30999| 2015-07-09T14:17:15.345-0400 I SHARDING [conn1] distributed lock 'db72.coll72/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.403-0400 m30999| 2015-07-09T14:17:15.403-0400 I COMMAND [conn1] DROP DATABASE: db72 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.404-0400 m30999| 2015-07-09T14:17:15.403-0400 I SHARDING [conn1] DBConfig::dropDatabase: db72 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.404-0400 m30999| 2015-07-09T14:17:15.403-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:15.403-0400-559ebaabca4787b9985d1efd", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465835403), what: "dropDatabase.start", ns: "db72", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.510-0400 m30999| 2015-07-09T14:17:15.509-0400 I SHARDING [conn1] DBConfig::dropDatabase: db72 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.510-0400 m31100| 2015-07-09T14:17:15.510-0400 I COMMAND [conn160] dropDatabase db72 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.510-0400 m31100| 2015-07-09T14:17:15.510-0400 I COMMAND [conn160] dropDatabase db72 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.511-0400 m30999| 2015-07-09T14:17:15.511-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:15.511-0400-559ebaabca4787b9985d1efe", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465835511), what: "dropDatabase", ns: "db72", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.512-0400 m31102| 2015-07-09T14:17:15.511-0400 I COMMAND [repl writer worker 7] dropDatabase db72 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.512-0400 m31102| 2015-07-09T14:17:15.511-0400 I COMMAND [repl writer worker 7] dropDatabase db72 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.512-0400 m31101| 2015-07-09T14:17:15.512-0400 I COMMAND [repl writer worker 2] dropDatabase db72 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.512-0400 m31101| 2015-07-09T14:17:15.512-0400 I COMMAND [repl writer worker 2] dropDatabase db72 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.600-0400 m31100| 2015-07-09T14:17:15.600-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.604-0400 m31102| 2015-07-09T14:17:15.604-0400 I COMMAND [repl writer worker 0] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.604-0400 m31101| 2015-07-09T14:17:15.604-0400 I COMMAND [repl writer worker 6] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.637-0400 m31200| 2015-07-09T14:17:15.636-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.639-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.639-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.640-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.640-0400 jstests/concurrency/fsm_workloads/map_reduce_inline.js [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.640-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.640-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.640-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.641-0400 m31202| 2015-07-09T14:17:15.640-0400 I COMMAND [repl writer worker 14] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.641-0400 m31201| 2015-07-09T14:17:15.641-0400 I COMMAND [repl writer worker 15] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.648-0400 m30999| 2015-07-09T14:17:15.648-0400 I SHARDING [conn1] distributed lock 'db73/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559ebaabca4787b9985d1eff [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.652-0400 m30999| 2015-07-09T14:17:15.652-0400 I SHARDING [conn1] Placing [db73] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.653-0400 m30999| 2015-07-09T14:17:15.652-0400 I SHARDING [conn1] Enabling sharding for database [db73] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.707-0400 m30999| 2015-07-09T14:17:15.707-0400 I SHARDING [conn1] distributed lock 'db73/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.735-0400 m31100| 2015-07-09T14:17:15.734-0400 I INDEX [conn144] build index on: db73.coll73 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db73.coll73" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.735-0400 m31100| 2015-07-09T14:17:15.735-0400 I INDEX [conn144] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.743-0400 m31100| 2015-07-09T14:17:15.741-0400 I INDEX [conn144] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.743-0400 m30999| 2015-07-09T14:17:15.742-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db73.coll73", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.747-0400 m30999| 2015-07-09T14:17:15.747-0400 I SHARDING [conn1] distributed lock 'db73.coll73/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559ebaabca4787b9985d1f00 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.749-0400 m30999| 2015-07-09T14:17:15.748-0400 I SHARDING [conn1] enable sharding on: db73.coll73 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.749-0400 m30999| 2015-07-09T14:17:15.748-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:15.748-0400-559ebaabca4787b9985d1f01", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465835748), what: "shardCollection.start", ns: "db73.coll73", details: { shardKey: { _id: "hashed" }, collection: "db73.coll73", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.762-0400 m31101| 2015-07-09T14:17:15.762-0400 I INDEX [repl writer worker 7] build index on: db73.coll73 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db73.coll73" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.763-0400 m31102| 2015-07-09T14:17:15.762-0400 I INDEX [repl writer worker 9] build index on: db73.coll73 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db73.coll73" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.763-0400 m31101| 2015-07-09T14:17:15.762-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.763-0400 m31102| 2015-07-09T14:17:15.762-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.767-0400 m31101| 2015-07-09T14:17:15.767-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.768-0400 m31102| 2015-07-09T14:17:15.768-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.802-0400 m30999| 2015-07-09T14:17:15.802-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db73.coll73 using new epoch 559ebaabca4787b9985d1f02 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.910-0400 m30999| 2015-07-09T14:17:15.910-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db73.coll73: 0ms sequenceNumber: 315 version: 1|1||559ebaabca4787b9985d1f02 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.965-0400 m30999| 2015-07-09T14:17:15.965-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db73.coll73: 0ms sequenceNumber: 316 version: 1|1||559ebaabca4787b9985d1f02 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.967-0400 m31100| 2015-07-09T14:17:15.967-0400 I SHARDING [conn175] remotely refreshing metadata for db73.coll73 with requested shard version 1|1||559ebaabca4787b9985d1f02, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.969-0400 m31100| 2015-07-09T14:17:15.969-0400 I SHARDING [conn175] collection db73.coll73 was previously unsharded, new metadata loaded with shard version 1|1||559ebaabca4787b9985d1f02 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.970-0400 m31100| 2015-07-09T14:17:15.969-0400 I SHARDING [conn175] collection version was loaded at version 1|1||559ebaabca4787b9985d1f02, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:15.970-0400 m30999| 2015-07-09T14:17:15.969-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:15.969-0400-559ebaabca4787b9985d1f03", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465835969), what: "shardCollection", ns: "db73.coll73", details: { version: "1|1||559ebaabca4787b9985d1f02" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.031-0400 m30999| 2015-07-09T14:17:16.030-0400 I SHARDING [conn1] distributed lock 'db73.coll73/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.032-0400 m30999| 2015-07-09T14:17:16.032-0400 I SHARDING [conn1] moving chunk ns: db73.coll73 moving ( ns: db73.coll73, shard: test-rs0, lastmod: 1|1||000000000000000000000000, min: { _id: 0 }, max: { _id: MaxKey }) test-rs0 -> test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.032-0400 m31100| 2015-07-09T14:17:16.032-0400 I SHARDING [conn38] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.034-0400 m31100| 2015-07-09T14:17:16.033-0400 I SHARDING [conn38] received moveChunk request: { moveChunk: "db73.coll73", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559ebaabca4787b9985d1f02') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.038-0400 m31100| 2015-07-09T14:17:16.038-0400 I SHARDING [conn38] distributed lock 'db73.coll73/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559ebaac792e00bb67274adf [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.038-0400 m31100| 2015-07-09T14:17:16.038-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:16.038-0400-559ebaac792e00bb67274ae0", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465836038), what: "moveChunk.start", ns: "db73.coll73", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.092-0400 m31100| 2015-07-09T14:17:16.091-0400 I SHARDING [conn38] remotely refreshing metadata for db73.coll73 based on current shard version 1|1||559ebaabca4787b9985d1f02, current metadata version is 1|1||559ebaabca4787b9985d1f02 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.094-0400 m31100| 2015-07-09T14:17:16.093-0400 I SHARDING [conn38] metadata of collection db73.coll73 already up to date (shard version : 1|1||559ebaabca4787b9985d1f02, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.094-0400 m31100| 2015-07-09T14:17:16.093-0400 I SHARDING [conn38] moveChunk request accepted at version 1|1||559ebaabca4787b9985d1f02 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.094-0400 m31100| 2015-07-09T14:17:16.094-0400 I SHARDING [conn38] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.094-0400 m31200| 2015-07-09T14:17:16.094-0400 I SHARDING [conn16] remotely refreshing metadata for db73.coll73, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.096-0400 m31200| 2015-07-09T14:17:16.095-0400 I SHARDING [conn16] collection db73.coll73 was previously unsharded, new metadata loaded with shard version 0|0||559ebaabca4787b9985d1f02 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.096-0400 m31200| 2015-07-09T14:17:16.095-0400 I SHARDING [conn16] collection version was loaded at version 1|1||559ebaabca4787b9985d1f02, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.097-0400 m31200| 2015-07-09T14:17:16.096-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: 0 } -> { _id: MaxKey } for collection db73.coll73 from test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 at epoch 559ebaabca4787b9985d1f02 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.099-0400 m31100| 2015-07-09T14:17:16.098-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db73.coll73", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.102-0400 m31100| 2015-07-09T14:17:16.101-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db73.coll73", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.107-0400 m31100| 2015-07-09T14:17:16.106-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db73.coll73", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.109-0400 m31200| 2015-07-09T14:17:16.109-0400 I INDEX [migrateThread] build index on: db73.coll73 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db73.coll73" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.110-0400 m31200| 2015-07-09T14:17:16.109-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.117-0400 m31100| 2015-07-09T14:17:16.116-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db73.coll73", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.118-0400 m31200| 2015-07-09T14:17:16.118-0400 I INDEX [migrateThread] build index on: db73.coll73 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db73.coll73" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.119-0400 m31200| 2015-07-09T14:17:16.118-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.129-0400 m31200| 2015-07-09T14:17:16.129-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.130-0400 m31200| 2015-07-09T14:17:16.130-0400 I SHARDING [migrateThread] Deleter starting delete for: db73.coll73 from { _id: 0 } -> { _id: MaxKey }, with opId: 99485 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.130-0400 m31200| 2015-07-09T14:17:16.130-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db73.coll73 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.135-0400 m31100| 2015-07-09T14:17:16.134-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db73.coll73", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.138-0400 m31202| 2015-07-09T14:17:16.137-0400 I INDEX [repl writer worker 4] build index on: db73.coll73 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db73.coll73" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.138-0400 m31202| 2015-07-09T14:17:16.138-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.142-0400 m31201| 2015-07-09T14:17:16.141-0400 I INDEX [repl writer worker 8] build index on: db73.coll73 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db73.coll73" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.142-0400 m31201| 2015-07-09T14:17:16.142-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.148-0400 m31202| 2015-07-09T14:17:16.147-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.150-0400 m31200| 2015-07-09T14:17:16.150-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.150-0400 m31200| 2015-07-09T14:17:16.150-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db73.coll73' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.151-0400 m31201| 2015-07-09T14:17:16.150-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.168-0400 m31100| 2015-07-09T14:17:16.168-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db73.coll73", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.169-0400 m31100| 2015-07-09T14:17:16.168-0400 I SHARDING [conn38] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.169-0400 m31100| 2015-07-09T14:17:16.169-0400 I SHARDING [conn38] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.169-0400 m31100| 2015-07-09T14:17:16.169-0400 I SHARDING [conn38] moveChunk setting version to: 2|0||559ebaabca4787b9985d1f02 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.174-0400 m31200| 2015-07-09T14:17:16.173-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db73.coll73' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.174-0400 m31200| 2015-07-09T14:17:16.173-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:16.173-0400-559ebaacd5a107a5b9c0db76", server: "bs-osx108-8", clientAddr: "", time: new Date(1436465836173), what: "moveChunk.to", ns: "db73.coll73", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 5: 33, step 2 of 5: 18, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 23, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.228-0400 m31100| 2015-07-09T14:17:16.227-0400 I SHARDING [conn38] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db73.coll73", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.228-0400 m31100| 2015-07-09T14:17:16.227-0400 I SHARDING [conn38] moveChunk updating self version to: 2|1||559ebaabca4787b9985d1f02 through { _id: MinKey } -> { _id: 0 } for collection 'db73.coll73' [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.229-0400 m31100| 2015-07-09T14:17:16.228-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:16.228-0400-559ebaac792e00bb67274ae1", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465836228), what: "moveChunk.commit", ns: "db73.coll73", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.281-0400 m31100| 2015-07-09T14:17:16.281-0400 I SHARDING [conn38] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.282-0400 m31100| 2015-07-09T14:17:16.281-0400 I SHARDING [conn38] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.282-0400 m31100| 2015-07-09T14:17:16.281-0400 I SHARDING [conn38] Deleter starting delete for: db73.coll73 from { _id: 0 } -> { _id: MaxKey }, with opId: 237107 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.282-0400 m31100| 2015-07-09T14:17:16.281-0400 I SHARDING [conn38] rangeDeleter deleted 0 documents for db73.coll73 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.282-0400 m31100| 2015-07-09T14:17:16.281-0400 I SHARDING [conn38] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.283-0400 m31100| 2015-07-09T14:17:16.283-0400 I SHARDING [conn38] distributed lock 'db73.coll73/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.284-0400 m31100| 2015-07-09T14:17:16.283-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:16.283-0400-559ebaac792e00bb67274ae2", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465836283), what: "moveChunk.from", ns: "db73.coll73", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 6: 0, step 2 of 6: 59, step 3 of 6: 2, step 4 of 6: 72, step 5 of 6: 113, step 6 of 6: 0, to: "test-rs1", from: "test-rs0", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.338-0400 m31100| 2015-07-09T14:17:16.336-0400 I COMMAND [conn38] command db73.coll73 command: moveChunk { moveChunk: "db73.coll73", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559ebaabca4787b9985d1f02') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 304ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.339-0400 m30999| 2015-07-09T14:17:16.338-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db73.coll73: 0ms sequenceNumber: 317 version: 2|1||559ebaabca4787b9985d1f02 based on: 1|1||559ebaabca4787b9985d1f02 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.340-0400 m31100| 2015-07-09T14:17:16.340-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db73.coll73", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaabca4787b9985d1f02') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.344-0400 m31100| 2015-07-09T14:17:16.344-0400 I SHARDING [conn38] distributed lock 'db73.coll73/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559ebaac792e00bb67274ae3 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.345-0400 m31100| 2015-07-09T14:17:16.344-0400 I SHARDING [conn38] remotely refreshing metadata for db73.coll73 based on current shard version 2|0||559ebaabca4787b9985d1f02, current metadata version is 2|0||559ebaabca4787b9985d1f02 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.346-0400 m31100| 2015-07-09T14:17:16.345-0400 I SHARDING [conn38] updating metadata for db73.coll73 from shard version 2|0||559ebaabca4787b9985d1f02 to shard version 2|1||559ebaabca4787b9985d1f02 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.346-0400 m31100| 2015-07-09T14:17:16.345-0400 I SHARDING [conn38] collection version was loaded at version 2|1||559ebaabca4787b9985d1f02, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.346-0400 m31100| 2015-07-09T14:17:16.345-0400 I SHARDING [conn38] splitChunk accepted at version 2|1||559ebaabca4787b9985d1f02 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.347-0400 m31100| 2015-07-09T14:17:16.347-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:16.347-0400-559ebaac792e00bb67274ae4", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465836347), what: "split", ns: "db73.coll73", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559ebaabca4787b9985d1f02') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559ebaabca4787b9985d1f02') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.402-0400 m31100| 2015-07-09T14:17:16.402-0400 I SHARDING [conn38] distributed lock 'db73.coll73/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.405-0400 m30999| 2015-07-09T14:17:16.404-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db73.coll73: 0ms sequenceNumber: 318 version: 2|3||559ebaabca4787b9985d1f02 based on: 2|1||559ebaabca4787b9985d1f02 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.405-0400 m31200| 2015-07-09T14:17:16.405-0400 I SHARDING [conn64] received splitChunk request: { splitChunk: "db73.coll73", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebaabca4787b9985d1f02') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.409-0400 m31200| 2015-07-09T14:17:16.409-0400 I SHARDING [conn64] distributed lock 'db73.coll73/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559ebaacd5a107a5b9c0db77 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.409-0400 m31200| 2015-07-09T14:17:16.409-0400 I SHARDING [conn64] remotely refreshing metadata for db73.coll73 based on current shard version 0|0||559ebaabca4787b9985d1f02, current metadata version is 1|1||559ebaabca4787b9985d1f02 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.410-0400 m31200| 2015-07-09T14:17:16.410-0400 I SHARDING [conn64] updating metadata for db73.coll73 from shard version 0|0||559ebaabca4787b9985d1f02 to shard version 2|0||559ebaabca4787b9985d1f02 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.411-0400 m31200| 2015-07-09T14:17:16.410-0400 I SHARDING [conn64] collection version was loaded at version 2|3||559ebaabca4787b9985d1f02, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.411-0400 m31200| 2015-07-09T14:17:16.410-0400 I SHARDING [conn64] splitChunk accepted at version 2|0||559ebaabca4787b9985d1f02 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.412-0400 m31200| 2015-07-09T14:17:16.411-0400 I SHARDING [conn64] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:16.411-0400-559ebaacd5a107a5b9c0db78", server: "bs-osx108-8", clientAddr: "127.0.0.1:62863", time: new Date(1436465836411), what: "split", ns: "db73.coll73", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559ebaabca4787b9985d1f02') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559ebaabca4787b9985d1f02') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.466-0400 m31200| 2015-07-09T14:17:16.465-0400 I SHARDING [conn64] distributed lock 'db73.coll73/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.468-0400 m30999| 2015-07-09T14:17:16.467-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db73.coll73: 0ms sequenceNumber: 319 version: 2|5||559ebaabca4787b9985d1f02 based on: 2|3||559ebaabca4787b9985d1f02 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.729-0400 m31100| 2015-07-09T14:17:16.729-0400 I COMMAND [conn144] command db73.$cmd command: insert { insert: "coll73", documents: 471, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 2000|3, ObjectId('559ebaabca4787b9985d1f02') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 480, w: 480 } }, Database: { acquireCount: { w: 480 } }, Collection: { acquireCount: { w: 9 } }, Metadata: { acquireCount: { w: 471 } }, oplog: { acquireCount: { w: 471 } } } protocol:op_command 176ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.740-0400 m31200| 2015-07-09T14:17:16.739-0400 I COMMAND [conn71] command db73.$cmd command: insert { insert: "coll73", documents: 529, ordered: false, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 2000|5, ObjectId('559ebaabca4787b9985d1f02') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 538, w: 538 } }, Database: { acquireCount: { w: 538 } }, Collection: { acquireCount: { w: 9 } }, Metadata: { acquireCount: { w: 529 } }, oplog: { acquireCount: { w: 529 } } } protocol:op_command 186ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.933-0400 m31100| 2015-07-09T14:17:16.933-0400 I COMMAND [conn144] command db73.$cmd command: insert { insert: "coll73", documents: 500, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 2000|3, ObjectId('559ebaabca4787b9985d1f02') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 509, w: 509 } }, Database: { acquireCount: { w: 509 } }, Collection: { acquireCount: { w: 9 } }, Metadata: { acquireCount: { w: 500 } }, oplog: { acquireCount: { w: 500 } } } protocol:op_command 181ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.947-0400 m31200| 2015-07-09T14:17:16.947-0400 I COMMAND [conn71] command db73.$cmd command: insert { insert: "coll73", documents: 500, ordered: false, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 2000|5, ObjectId('559ebaabca4787b9985d1f02') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 510, w: 510 } }, Database: { acquireCount: { w: 510 } }, Collection: { acquireCount: { w: 10 } }, Metadata: { acquireCount: { w: 500 } }, oplog: { acquireCount: { w: 500 } } } protocol:op_command 195ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:16.949-0400 Using 5 threads (requested 5) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.015-0400 m30998| 2015-07-09T14:17:17.014-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64076 #472 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.015-0400 m30999| 2015-07-09T14:17:17.015-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64077 #472 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.020-0400 m30999| 2015-07-09T14:17:17.020-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64078 #473 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.022-0400 m30998| 2015-07-09T14:17:17.022-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64079 #473 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.039-0400 m30999| 2015-07-09T14:17:17.039-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64080 #474 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.046-0400 setting random seed: 1566979261115 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.047-0400 setting random seed: 6376008028164 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.047-0400 setting random seed: 2615412743762 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.047-0400 setting random seed: 6615256462246 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.047-0400 setting random seed: 7242004876025 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.049-0400 m30998| 2015-07-09T14:17:17.048-0400 I SHARDING [conn473] ChunkManager: time to load chunks for db73.coll73: 0ms sequenceNumber: 90 version: 2|5||559ebaabca4787b9985d1f02 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.093-0400 m31100| 2015-07-09T14:17:17.093-0400 I COMMAND [conn45] CMD: drop db73.tmp.mr.coll73_448 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.094-0400 m31200| 2015-07-09T14:17:17.093-0400 I COMMAND [conn32] CMD: drop db73.tmp.mr.coll73_271 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.121-0400 m31200| 2015-07-09T14:17:17.121-0400 I COMMAND [conn35] CMD: drop db73.tmp.mr.coll73_272 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.123-0400 m31200| 2015-07-09T14:17:17.123-0400 I COMMAND [conn60] CMD: drop db73.tmp.mr.coll73_274 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.124-0400 m31200| 2015-07-09T14:17:17.123-0400 I COMMAND [conn137] CMD: drop db73.tmp.mr.coll73_270 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.124-0400 m31100| 2015-07-09T14:17:17.124-0400 I COMMAND [conn185] CMD: drop db73.tmp.mr.coll73_449 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.125-0400 m31100| 2015-07-09T14:17:17.125-0400 I COMMAND [conn175] CMD: drop db73.tmp.mr.coll73_446 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.125-0400 m31100| 2015-07-09T14:17:17.125-0400 I COMMAND [conn49] CMD: drop db73.tmp.mr.coll73_450 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.126-0400 m31100| 2015-07-09T14:17:17.125-0400 I COMMAND [conn182] CMD: drop db73.tmp.mr.coll73_447 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.141-0400 m31200| 2015-07-09T14:17:17.140-0400 I COMMAND [conn38] CMD: drop db73.tmp.mr.coll73_273 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.882-0400 m31200| 2015-07-09T14:17:17.881-0400 I COMMAND [conn32] CMD: drop db73.tmp.mrs.coll73_1436465837_123 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.887-0400 m31200| 2015-07-09T14:17:17.887-0400 I COMMAND [conn32] CMD: drop db73.tmp.mr.coll73_271 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.887-0400 m31200| 2015-07-09T14:17:17.887-0400 I COMMAND [conn32] CMD: drop db73.tmp.mr.coll73_271 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.890-0400 m31200| 2015-07-09T14:17:17.890-0400 I COMMAND [conn32] CMD: drop db73.tmp.mr.coll73_271 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.896-0400 m31200| 2015-07-09T14:17:17.895-0400 I COMMAND [conn35] CMD: drop db73.tmp.mrs.coll73_1436465837_122 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.900-0400 m31200| 2015-07-09T14:17:17.899-0400 I COMMAND [conn32] command db73.tmp.mrs.coll73_1436465837_123 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.900-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.901-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.901-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.902-0400 m31200| values...., out: "tmp.mrs.coll73_1436465837_123", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:8 reslen:213 locks:{ Global: { acquireCount: { r: 171, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 104 } }, Database: { acquireCount: { r: 27, w: 66, R: 20, W: 11 }, acquireWaitCount: { r: 1, w: 14, R: 8, W: 5 }, timeAcquiringMicros: { r: 283, w: 138914, R: 177969, W: 3570 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 847ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.907-0400 m31200| 2015-07-09T14:17:17.905-0400 I COMMAND [conn35] CMD: drop db73.tmp.mr.coll73_272 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.907-0400 m31200| 2015-07-09T14:17:17.905-0400 I COMMAND [conn35] CMD: drop db73.tmp.mr.coll73_272 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.909-0400 m31200| 2015-07-09T14:17:17.909-0400 I COMMAND [conn35] CMD: drop db73.tmp.mr.coll73_272 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.912-0400 m31200| 2015-07-09T14:17:17.911-0400 I COMMAND [conn137] CMD: drop db73.tmp.mrs.coll73_1436465837_121 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.917-0400 m31200| 2015-07-09T14:17:17.917-0400 I COMMAND [conn137] CMD: drop db73.tmp.mr.coll73_270 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.917-0400 m31200| 2015-07-09T14:17:17.917-0400 I COMMAND [conn137] CMD: drop db73.tmp.mr.coll73_270 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.918-0400 m31200| 2015-07-09T14:17:17.918-0400 I COMMAND [conn60] CMD: drop db73.tmp.mrs.coll73_1436465837_146 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.922-0400 m31200| 2015-07-09T14:17:17.920-0400 I COMMAND [conn35] command db73.tmp.mrs.coll73_1436465837_122 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.923-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.923-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.923-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.924-0400 m31200| values...., out: "tmp.mrs.coll73_1436465837_122", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:14 reslen:213 locks:{ Global: { acquireCount: { r: 183, w: 74, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 5375, W: 273 } }, Database: { acquireCount: { r: 27, w: 66, R: 26, W: 11 }, acquireWaitCount: { r: 6, w: 8, R: 8, W: 7 }, timeAcquiringMicros: { r: 94852, w: 56687, R: 50131, W: 84143 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 868ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.927-0400 m31200| 2015-07-09T14:17:17.927-0400 I COMMAND [conn60] CMD: drop db73.tmp.mr.coll73_274 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.927-0400 m31200| 2015-07-09T14:17:17.927-0400 I COMMAND [conn60] CMD: drop db73.tmp.mr.coll73_274 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.928-0400 m31200| 2015-07-09T14:17:17.928-0400 I COMMAND [conn60] CMD: drop db73.tmp.mr.coll73_274 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.928-0400 m31200| 2015-07-09T14:17:17.928-0400 I COMMAND [conn137] CMD: drop db73.tmp.mr.coll73_270 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.933-0400 m31200| 2015-07-09T14:17:17.933-0400 I COMMAND [conn38] CMD: drop db73.tmp.mrs.coll73_1436465837_145 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.939-0400 m31200| 2015-07-09T14:17:17.938-0400 I COMMAND [conn38] CMD: drop db73.tmp.mr.coll73_273 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.939-0400 m31200| 2015-07-09T14:17:17.939-0400 I COMMAND [conn38] CMD: drop db73.tmp.mr.coll73_273 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.944-0400 m31200| 2015-07-09T14:17:17.944-0400 I COMMAND [conn38] CMD: drop db73.tmp.mr.coll73_273 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.953-0400 m31200| 2015-07-09T14:17:17.952-0400 I COMMAND [conn60] command db73.tmp.mrs.coll73_1436465837_146 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.953-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.953-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.953-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.954-0400 m31200| values...., out: "tmp.mrs.coll73_1436465837_146", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:15 reslen:213 locks:{ Global: { acquireCount: { r: 185, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 10025, w: 5619, W: 6355 } }, Database: { acquireCount: { r: 27, w: 66, R: 27, W: 11 }, acquireWaitCount: { r: 8, w: 7, R: 9, W: 9 }, timeAcquiringMicros: { r: 54773, w: 45160, R: 56282, W: 96407 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 875ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.961-0400 m31100| 2015-07-09T14:17:17.960-0400 I COMMAND [conn175] CMD: drop db73.tmp.mrs.coll73_1436465837_121 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.963-0400 m31200| 2015-07-09T14:17:17.960-0400 I COMMAND [conn137] command db73.tmp.mrs.coll73_1436465837_121 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.963-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.963-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.963-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.963-0400 m31200| values...., out: "tmp.mrs.coll73_1436465837_121", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:9 reslen:213 locks:{ Global: { acquireCount: { r: 173, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 2, W: 1 }, timeAcquiringMicros: { r: 10816, w: 14966, W: 72 } }, Database: { acquireCount: { r: 27, w: 66, R: 21, W: 11 }, acquireWaitCount: { r: 7, w: 12, R: 7, W: 9 }, timeAcquiringMicros: { r: 14443, w: 70527, R: 42935, W: 168443 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 911ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.966-0400 m31100| 2015-07-09T14:17:17.966-0400 I COMMAND [conn175] CMD: drop db73.tmp.mr.coll73_446 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.967-0400 m31100| 2015-07-09T14:17:17.966-0400 I COMMAND [conn175] CMD: drop db73.tmp.mr.coll73_446 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.967-0400 m31200| 2015-07-09T14:17:17.966-0400 I COMMAND [conn38] command db73.tmp.mrs.coll73_1436465837_145 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.967-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.967-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.967-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.968-0400 m31200| values...., out: "tmp.mrs.coll73_1436465837_145", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:15 reslen:213 locks:{ Global: { acquireCount: { r: 185, w: 74, W: 3 }, acquireWaitCount: { r: 3 }, timeAcquiringMicros: { r: 32829 } }, Database: { acquireCount: { r: 27, w: 66, R: 27, W: 11 }, acquireWaitCount: { r: 8, w: 10, R: 9, W: 5 }, timeAcquiringMicros: { r: 20103, w: 169054, R: 48167, W: 39525 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 891ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.973-0400 m31100| 2015-07-09T14:17:17.973-0400 I COMMAND [conn175] CMD: drop db73.tmp.mr.coll73_446 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.985-0400 m31100| 2015-07-09T14:17:17.984-0400 I COMMAND [conn175] command db73.tmp.mrs.coll73_1436465837_121 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.985-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.985-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.985-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.986-0400 m31100| values...., out: "tmp.mrs.coll73_1436465837_121", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:16 reslen:213 locks:{ Global: { acquireCount: { r: 185, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 2777 } }, Database: { acquireCount: { r: 27, w: 66, R: 27, W: 11 }, acquireWaitCount: { r: 2, w: 6, R: 3, W: 9 }, timeAcquiringMicros: { r: 17025, w: 40952, R: 53793, W: 80484 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 937ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.986-0400 m31100| 2015-07-09T14:17:17.985-0400 I COMMAND [conn45] CMD: drop db73.tmp.mrs.coll73_1436465837_123 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.990-0400 m31100| 2015-07-09T14:17:17.989-0400 I SHARDING [conn175] ChunkManager: time to load chunks for db73.coll73: 1ms sequenceNumber: 5 version: 2|5||559ebaabca4787b9985d1f02 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.991-0400 m31100| 2015-07-09T14:17:17.991-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:64081 #197 (119 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.994-0400 m31200| 2015-07-09T14:17:17.993-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:64082 #157 (100 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.994-0400 m31100| 2015-07-09T14:17:17.994-0400 I COMMAND [conn45] CMD: drop db73.tmp.mr.coll73_448 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.994-0400 m31100| 2015-07-09T14:17:17.994-0400 I COMMAND [conn45] CMD: drop db73.tmp.mr.coll73_448 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:17.999-0400 m31100| 2015-07-09T14:17:17.998-0400 I COMMAND [conn45] CMD: drop db73.tmp.mr.coll73_448 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.002-0400 m31100| 2015-07-09T14:17:18.001-0400 I COMMAND [conn45] command db73.tmp.mrs.coll73_1436465837_123 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.003-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.003-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.003-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.005-0400 m31100| values...., out: "tmp.mrs.coll73_1436465837_123", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:15 reslen:213 locks:{ Global: { acquireCount: { r: 183, w: 74, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 7932, W: 9055 } }, Database: { acquireCount: { r: 27, w: 66, R: 26, W: 11 }, acquireWaitCount: { r: 4, w: 9, R: 5, W: 5 }, timeAcquiringMicros: { r: 3125, w: 80816, R: 129398, W: 3728 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 951ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.005-0400 m31100| 2015-07-09T14:17:18.005-0400 I COMMAND [conn49] CMD: drop db73.tmp.mrs.coll73_1436465837_145 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.009-0400 m31100| 2015-07-09T14:17:18.009-0400 I COMMAND [conn38] CMD: drop db73.tmp.mrs.coll73_1436465837_121 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.012-0400 m31100| 2015-07-09T14:17:18.012-0400 I COMMAND [conn49] CMD: drop db73.tmp.mr.coll73_450 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.012-0400 m31100| 2015-07-09T14:17:18.012-0400 I COMMAND [conn49] CMD: drop db73.tmp.mr.coll73_450 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.016-0400 m31200| 2015-07-09T14:17:18.016-0400 I COMMAND [conn64] CMD: drop db73.tmp.mrs.coll73_1436465837_121 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.017-0400 m31201| 2015-07-09T14:17:18.016-0400 I COMMAND [repl writer worker 13] CMD: drop db73.tmp.mrs.coll73_1436465837_121 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.018-0400 m31200| 2015-07-09T14:17:18.018-0400 I COMMAND [conn137] CMD: drop db73.tmp.mr.coll73_275 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.021-0400 m31100| 2015-07-09T14:17:18.020-0400 I COMMAND [conn49] CMD: drop db73.tmp.mr.coll73_450 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.022-0400 m31100| 2015-07-09T14:17:18.022-0400 I COMMAND [conn175] CMD: drop db73.tmp.mr.coll73_451 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.024-0400 m31100| 2015-07-09T14:17:18.023-0400 I COMMAND [conn49] command db73.tmp.mrs.coll73_1436465837_145 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.025-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.025-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.026-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.029-0400 m31100| values...., out: "tmp.mrs.coll73_1436465837_145", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:20 reslen:213 locks:{ Global: { acquireCount: { r: 193, w: 74, W: 3 }, acquireWaitCount: { r: 2, W: 1 }, timeAcquiringMicros: { r: 24242, W: 52 } }, Database: { acquireCount: { r: 27, w: 66, R: 31, W: 11 }, acquireWaitCount: { r: 8, w: 11, R: 2, W: 9 }, timeAcquiringMicros: { r: 42125, w: 63267, R: 7464, W: 91080 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 960ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.029-0400 m31102| 2015-07-09T14:17:18.029-0400 I COMMAND [repl writer worker 10] CMD: drop db73.tmp.mrs.coll73_1436465837_121 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.030-0400 m31100| 2015-07-09T14:17:18.029-0400 I COMMAND [conn182] CMD: drop db73.tmp.mrs.coll73_1436465837_122 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.034-0400 m31100| 2015-07-09T14:17:18.033-0400 I COMMAND [conn38] CMD: drop db73.tmp.mrs.coll73_1436465837_123 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.034-0400 m31202| 2015-07-09T14:17:18.033-0400 I COMMAND [repl writer worker 7] CMD: drop db73.tmp.mrs.coll73_1436465837_121 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.037-0400 m31101| 2015-07-09T14:17:18.037-0400 I COMMAND [repl writer worker 5] CMD: drop db73.tmp.mrs.coll73_1436465837_121 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.039-0400 m31100| 2015-07-09T14:17:18.039-0400 I COMMAND [conn182] CMD: drop db73.tmp.mr.coll73_447 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.040-0400 m31100| 2015-07-09T14:17:18.040-0400 I COMMAND [conn182] CMD: drop db73.tmp.mr.coll73_447 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.070-0400 m31200| 2015-07-09T14:17:18.069-0400 I COMMAND [conn64] CMD: drop db73.tmp.mrs.coll73_1436465837_123 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.072-0400 m31101| 2015-07-09T14:17:18.071-0400 I COMMAND [repl writer worker 15] CMD: drop db73.tmp.mrs.coll73_1436465837_123 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.072-0400 m31102| 2015-07-09T14:17:18.072-0400 I COMMAND [repl writer worker 0] CMD: drop db73.tmp.mrs.coll73_1436465837_123 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.073-0400 m31100| 2015-07-09T14:17:18.073-0400 I COMMAND [conn182] CMD: drop db73.tmp.mr.coll73_447 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.079-0400 m31200| 2015-07-09T14:17:18.079-0400 I COMMAND [conn32] CMD: drop db73.tmp.mr.coll73_276 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.080-0400 m31202| 2015-07-09T14:17:18.079-0400 I COMMAND [repl writer worker 2] CMD: drop db73.tmp.mrs.coll73_1436465837_123 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.080-0400 m31100| 2015-07-09T14:17:18.080-0400 I COMMAND [conn36] CMD: drop db73.tmp.mrs.coll73_1436465837_145 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.082-0400 m31201| 2015-07-09T14:17:18.081-0400 I COMMAND [repl writer worker 1] CMD: drop db73.tmp.mrs.coll73_1436465837_123 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.090-0400 m31100| 2015-07-09T14:17:18.090-0400 I COMMAND [conn182] command db73.tmp.mrs.coll73_1436465837_122 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.091-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.091-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.091-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.092-0400 m31100| values...., out: "tmp.mrs.coll73_1436465837_122", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:18 reslen:213 locks:{ Global: { acquireCount: { r: 189, w: 74, W: 3 }, acquireWaitCount: { r: 3, W: 1 }, timeAcquiringMicros: { r: 21597, W: 86 } }, Database: { acquireCount: { r: 27, w: 66, R: 29, W: 11 }, acquireWaitCount: { r: 10, w: 12, R: 3, W: 9 }, timeAcquiringMicros: { r: 39906, w: 58559, R: 62688, W: 71085 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 1041ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.102-0400 m31100| 2015-07-09T14:17:18.102-0400 I COMMAND [conn45] CMD: drop db73.tmp.mr.coll73_452 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.104-0400 m31200| 2015-07-09T14:17:18.103-0400 I COMMAND [conn65] CMD: drop db73.tmp.mrs.coll73_1436465837_145 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.112-0400 m31102| 2015-07-09T14:17:18.112-0400 I COMMAND [repl writer worker 4] CMD: drop db73.tmp.mrs.coll73_1436465837_145 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.116-0400 m31101| 2015-07-09T14:17:18.115-0400 I COMMAND [repl writer worker 4] CMD: drop db73.tmp.mrs.coll73_1436465837_145 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.116-0400 m31100| 2015-07-09T14:17:18.116-0400 I COMMAND [conn38] CMD: drop db73.tmp.mrs.coll73_1436465837_122 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.117-0400 m31202| 2015-07-09T14:17:18.116-0400 I COMMAND [repl writer worker 13] CMD: drop db73.tmp.mrs.coll73_1436465837_145 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.119-0400 m31201| 2015-07-09T14:17:18.119-0400 I COMMAND [repl writer worker 11] CMD: drop db73.tmp.mrs.coll73_1436465837_145 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.120-0400 m31200| 2015-07-09T14:17:18.120-0400 I COMMAND [conn64] CMD: drop db73.tmp.mrs.coll73_1436465837_122 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.121-0400 m31100| 2015-07-09T14:17:18.121-0400 I COMMAND [conn49] CMD: drop db73.tmp.mr.coll73_453 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.122-0400 m31102| 2015-07-09T14:17:18.122-0400 I COMMAND [repl writer worker 5] CMD: drop db73.tmp.mrs.coll73_1436465837_122 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.123-0400 m31101| 2015-07-09T14:17:18.122-0400 I COMMAND [repl writer worker 7] CMD: drop db73.tmp.mrs.coll73_1436465837_122 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.125-0400 m31200| 2015-07-09T14:17:18.125-0400 I COMMAND [conn38] CMD: drop db73.tmp.mr.coll73_277 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.129-0400 m31202| 2015-07-09T14:17:18.128-0400 I COMMAND [repl writer worker 10] CMD: drop db73.tmp.mrs.coll73_1436465837_122 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.129-0400 m31201| 2015-07-09T14:17:18.128-0400 I COMMAND [repl writer worker 14] CMD: drop db73.tmp.mrs.coll73_1436465837_122 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.142-0400 m31200| 2015-07-09T14:17:18.141-0400 I COMMAND [conn35] CMD: drop db73.tmp.mr.coll73_278 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.147-0400 m31100| 2015-07-09T14:17:18.147-0400 I COMMAND [conn182] CMD: drop db73.tmp.mr.coll73_454 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.386-0400 m31100| 2015-07-09T14:17:18.385-0400 I COMMAND [conn185] CMD: drop db73.tmp.mrs.coll73_1436465837_146 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.389-0400 m31100| 2015-07-09T14:17:18.389-0400 I COMMAND [conn185] CMD: drop db73.tmp.mr.coll73_449 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.389-0400 m31100| 2015-07-09T14:17:18.389-0400 I COMMAND [conn185] CMD: drop db73.tmp.mr.coll73_449 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.398-0400 m31100| 2015-07-09T14:17:18.397-0400 I COMMAND [conn185] CMD: drop db73.tmp.mr.coll73_449 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.398-0400 m31200| 2015-07-09T14:17:18.398-0400 I COMMAND [conn137] CMD: drop db73.tmp.mrs.coll73_1436465838_124 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.409-0400 m31200| 2015-07-09T14:17:18.409-0400 I COMMAND [conn137] CMD: drop db73.tmp.mr.coll73_275 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.410-0400 m31200| 2015-07-09T14:17:18.410-0400 I COMMAND [conn137] CMD: drop db73.tmp.mr.coll73_275 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.427-0400 m31200| 2015-07-09T14:17:18.427-0400 I COMMAND [conn137] CMD: drop db73.tmp.mr.coll73_275 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.430-0400 m31200| 2015-07-09T14:17:18.428-0400 I COMMAND [conn137] command db73.tmp.mrs.coll73_1436465838_124 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.430-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.430-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.430-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.431-0400 m31200| values...., out: "tmp.mrs.coll73_1436465838_124", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:213 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 10456 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { w: 13, R: 10, W: 5 }, timeAcquiringMicros: { w: 152602, R: 26256, W: 16788 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 410ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.434-0400 m31100| 2015-07-09T14:17:18.433-0400 I COMMAND [conn185] command db73.tmp.mrs.coll73_1436465837_146 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.434-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.434-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.434-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.435-0400 m31100| values...., out: "tmp.mrs.coll73_1436465837_146", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:21 reslen:213 locks:{ Global: { acquireCount: { r: 195, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 3, W: 1 }, timeAcquiringMicros: { r: 6973, w: 35618, W: 16544 } }, Database: { acquireCount: { r: 27, w: 66, R: 32, W: 11 }, acquireWaitCount: { r: 14, w: 29, R: 6, W: 8 }, timeAcquiringMicros: { r: 77348, w: 358184, R: 19375, W: 95322 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 1372ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.451-0400 m31200| 2015-07-09T14:17:18.447-0400 I COMMAND [conn32] CMD: drop db73.tmp.mrs.coll73_1436465838_125 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.702-0400 m31200| 2015-07-09T14:17:18.454-0400 I COMMAND [conn32] CMD: drop db73.tmp.mr.coll73_276 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.703-0400 m31200| 2015-07-09T14:17:18.454-0400 I COMMAND [conn32] CMD: drop db73.tmp.mr.coll73_276 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.703-0400 m31200| 2015-07-09T14:17:18.457-0400 I COMMAND [conn32] CMD: drop db73.tmp.mr.coll73_276 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.704-0400 m31200| 2015-07-09T14:17:18.458-0400 I COMMAND [conn32] command db73.tmp.mrs.coll73_1436465838_125 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.704-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.705-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.705-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.705-0400 m31200| values...., out: "tmp.mrs.coll73_1436465838_125", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:213 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 16550, W: 37 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 1, w: 11, R: 12, W: 9 }, timeAcquiringMicros: { r: 67, w: 51261, R: 82969, W: 21180 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 379ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.706-0400 m31200| 2015-07-09T14:17:18.462-0400 I COMMAND [conn35] CMD: drop db73.tmp.mrs.coll73_1436465838_126 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.706-0400 m31200| 2015-07-09T14:17:18.470-0400 I COMMAND [conn35] CMD: drop db73.tmp.mr.coll73_278 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.706-0400 m31200| 2015-07-09T14:17:18.471-0400 I COMMAND [conn35] CMD: drop db73.tmp.mr.coll73_278 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.706-0400 m31200| 2015-07-09T14:17:18.472-0400 I COMMAND [conn35] CMD: drop db73.tmp.mr.coll73_278 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.707-0400 m31200| 2015-07-09T14:17:18.473-0400 I COMMAND [conn35] command db73.tmp.mrs.coll73_1436465838_126 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.707-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.707-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.707-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.708-0400 m31200| values...., out: "tmp.mrs.coll73_1436465838_126", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:213 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { r: 2, W: 1 }, timeAcquiringMicros: { r: 16604, W: 323 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 4, w: 8, R: 12, W: 8 }, timeAcquiringMicros: { r: 15075, w: 44519, R: 25977, W: 73130 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 344ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.708-0400 m31200| 2015-07-09T14:17:18.480-0400 I COMMAND [conn38] CMD: drop db73.tmp.mrs.coll73_1436465838_147 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.708-0400 m31200| 2015-07-09T14:17:18.489-0400 I COMMAND [conn38] CMD: drop db73.tmp.mr.coll73_277 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.708-0400 m31200| 2015-07-09T14:17:18.489-0400 I COMMAND [conn38] CMD: drop db73.tmp.mr.coll73_277 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.708-0400 m31200| 2015-07-09T14:17:18.491-0400 I COMMAND [conn38] CMD: drop db73.tmp.mr.coll73_277 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.708-0400 m31200| 2015-07-09T14:17:18.491-0400 I COMMAND [conn38] command db73.tmp.mrs.coll73_1436465838_147 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.708-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.709-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.709-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.709-0400 m31200| values...., out: "tmp.mrs.coll73_1436465838_147", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:213 locks:{ Global: { acquireCount: { r: 157, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 2 }, timeAcquiringMicros: { r: 14097, w: 14575 } }, Database: { acquireCount: { r: 26, w: 66, R: 14, W: 11 }, acquireWaitCount: { r: 3, w: 10, R: 12, W: 5 }, timeAcquiringMicros: { r: 2531, w: 34350, R: 52827, W: 50674 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 374ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.709-0400 m31100| 2015-07-09T14:17:18.618-0400 I COMMAND [conn185] command db73.$cmd command: mapreduce.shardedfinish { mapreduce.shardedfinish: { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.709-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.709-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.709-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.710-0400 m31100| values...., finalize: function finalizer(key, reducedValue) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.710-0400 m31100| return reducedValue; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.710-0400 m31100| }, out: { inline: 1.0 } }, inputDB: "db73", shardedOutputCollection: "tmp.mrs.coll73_1436465837_146", shards: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { result: "tmp.mrs.coll73_1436465837_146", timeMillis: 1327, counts: { input: 971, emit: 971, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465838000|39, electionId: ObjectId('559eb5880000000000000000') } }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { result: "tmp.mrs.coll73_1436465837_146", timeMillis: 849, counts: { input: 1029, emit: 1029, reduce: 80, output: 20 }, ok: 1.0, $gleStats: { lastOpTime: Timestamp 1436465837000|102, electionId: ObjectId('559eb5910000000000000000') } } }, shardCounts: { test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102: { input: 971, emit: 971, reduce: 80, output: 20 }, test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202: { input: 1029, emit: 1029, reduce: 80, output: 20 } }, counts: { emit: 2000, input: 2000, output: 40, reduce: 160 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:20897 locks:{} protocol:op_command 183ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.711-0400 m31100| 2015-07-09T14:17:18.619-0400 I COMMAND [conn36] CMD: drop db73.tmp.mrs.coll73_1436465837_146 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.711-0400 m31200| 2015-07-09T14:17:18.627-0400 I COMMAND [conn65] CMD: drop db73.tmp.mrs.coll73_1436465837_146 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.711-0400 m31101| 2015-07-09T14:17:18.627-0400 I COMMAND [repl writer worker 14] CMD: drop db73.tmp.mrs.coll73_1436465837_146 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.711-0400 m31102| 2015-07-09T14:17:18.629-0400 I COMMAND [repl writer worker 8] CMD: drop db73.tmp.mrs.coll73_1436465837_146 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.711-0400 m31202| 2015-07-09T14:17:18.631-0400 I COMMAND [repl writer worker 6] CMD: drop db73.tmp.mrs.coll73_1436465837_146 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.711-0400 m31200| 2015-07-09T14:17:18.631-0400 I COMMAND [conn60] CMD: drop db73.tmp.mr.coll73_279 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.711-0400 m31201| 2015-07-09T14:17:18.631-0400 I COMMAND [repl writer worker 11] CMD: drop db73.tmp.mrs.coll73_1436465837_146 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.711-0400 m31100| 2015-07-09T14:17:18.654-0400 I COMMAND [conn185] CMD: drop db73.tmp.mr.coll73_455 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.777-0400 m31200| 2015-07-09T14:17:18.777-0400 I COMMAND [conn60] CMD: drop db73.tmp.mrs.coll73_1436465838_148 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.782-0400 m31200| 2015-07-09T14:17:18.782-0400 I COMMAND [conn60] CMD: drop db73.tmp.mr.coll73_279 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.783-0400 m31200| 2015-07-09T14:17:18.783-0400 I COMMAND [conn60] CMD: drop db73.tmp.mr.coll73_279 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.785-0400 m31200| 2015-07-09T14:17:18.785-0400 I COMMAND [conn60] CMD: drop db73.tmp.mr.coll73_279 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.786-0400 m31200| 2015-07-09T14:17:18.785-0400 I COMMAND [conn60] command db73.tmp.mrs.coll73_1436465838_148 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.786-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.786-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.786-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.786-0400 m31200| values...., out: "tmp.mrs.coll73_1436465838_148", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:213 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 154ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.792-0400 m31100| 2015-07-09T14:17:18.792-0400 I COMMAND [conn45] CMD: drop db73.tmp.mrs.coll73_1436465838_125 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.798-0400 m31100| 2015-07-09T14:17:18.797-0400 I COMMAND [conn45] CMD: drop db73.tmp.mr.coll73_452 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.798-0400 m31100| 2015-07-09T14:17:18.797-0400 I COMMAND [conn45] CMD: drop db73.tmp.mr.coll73_452 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.798-0400 m31100| 2015-07-09T14:17:18.797-0400 I COMMAND [conn175] CMD: drop db73.tmp.mrs.coll73_1436465838_124 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.804-0400 m31100| 2015-07-09T14:17:18.804-0400 I COMMAND [conn175] CMD: drop db73.tmp.mr.coll73_451 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.805-0400 m31100| 2015-07-09T14:17:18.804-0400 I COMMAND [conn175] CMD: drop db73.tmp.mr.coll73_451 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.815-0400 m31100| 2015-07-09T14:17:18.813-0400 I COMMAND [conn175] CMD: drop db73.tmp.mr.coll73_451 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.820-0400 m31100| 2015-07-09T14:17:18.820-0400 I COMMAND [conn45] CMD: drop db73.tmp.mr.coll73_452 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.829-0400 m31100| 2015-07-09T14:17:18.828-0400 I COMMAND [conn175] command db73.tmp.mrs.coll73_1436465838_124 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.830-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.830-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.830-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.832-0400 m31100| values...., out: "tmp.mrs.coll73_1436465838_124", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:4 reslen:213 locks:{ Global: { acquireCount: { r: 159, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 4234, w: 9806, W: 12763 } }, Database: { acquireCount: { r: 26, w: 66, R: 15, W: 11 }, acquireWaitCount: { r: 2, w: 26, R: 13, W: 8 }, timeAcquiringMicros: { r: 10825, w: 185859, R: 106574, W: 29927 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 810ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.835-0400 m31100| 2015-07-09T14:17:18.829-0400 I COMMAND [conn45] command db73.tmp.mrs.coll73_1436465838_125 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.836-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.836-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.836-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.837-0400 m31100| values...., out: "tmp.mrs.coll73_1436465838_125", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:12 reslen:213 locks:{ Global: { acquireCount: { r: 175, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 446, w: 7476, W: 6301 } }, Database: { acquireCount: { r: 26, w: 66, R: 23, W: 11 }, acquireWaitCount: { r: 2, w: 21, R: 15, W: 9 }, timeAcquiringMicros: { r: 16478, w: 77915, R: 105763, W: 53284 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 750ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.837-0400 m31100| 2015-07-09T14:17:18.831-0400 I COMMAND [conn182] CMD: drop db73.tmp.mrs.coll73_1436465838_126 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.838-0400 m31100| 2015-07-09T14:17:18.838-0400 I COMMAND [conn182] CMD: drop db73.tmp.mr.coll73_454 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.838-0400 m31100| 2015-07-09T14:17:18.838-0400 I COMMAND [conn182] CMD: drop db73.tmp.mr.coll73_454 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.840-0400 m31100| 2015-07-09T14:17:18.839-0400 I COMMAND [conn182] CMD: drop db73.tmp.mr.coll73_454 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.843-0400 m31100| 2015-07-09T14:17:18.842-0400 I COMMAND [conn49] CMD: drop db73.tmp.mrs.coll73_1436465838_147 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.843-0400 m31100| 2015-07-09T14:17:18.843-0400 I COMMAND [conn182] command db73.tmp.mrs.coll73_1436465838_126 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.844-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.844-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.844-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.845-0400 m31100| values...., out: "tmp.mrs.coll73_1436465838_126", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:11 reslen:213 locks:{ Global: { acquireCount: { r: 173, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 1, W: 1 }, timeAcquiringMicros: { r: 15627, w: 17867, W: 88 } }, Database: { acquireCount: { r: 26, w: 66, R: 22, W: 11 }, acquireWaitCount: { r: 5, w: 19, R: 16, W: 7 }, timeAcquiringMicros: { r: 18828, w: 68918, R: 47124, W: 84778 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 713ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.853-0400 m31100| 2015-07-09T14:17:18.851-0400 I COMMAND [conn49] CMD: drop db73.tmp.mr.coll73_453 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.854-0400 m31100| 2015-07-09T14:17:18.852-0400 I COMMAND [conn49] CMD: drop db73.tmp.mr.coll73_453 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.854-0400 m31100| 2015-07-09T14:17:18.852-0400 I COMMAND [conn49] CMD: drop db73.tmp.mr.coll73_453 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.854-0400 m31100| 2015-07-09T14:17:18.853-0400 I COMMAND [conn49] command db73.tmp.mrs.coll73_1436465838_147 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.855-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.855-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.855-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.856-0400 m31100| values...., out: "tmp.mrs.coll73_1436465838_147", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:9 reslen:213 locks:{ Global: { acquireCount: { r: 169, w: 74, W: 3 }, acquireWaitCount: { r: 3 }, timeAcquiringMicros: { r: 34686 } }, Database: { acquireCount: { r: 26, w: 66, R: 20, W: 11 }, acquireWaitCount: { r: 8, w: 21, R: 17, W: 5 }, timeAcquiringMicros: { r: 26259, w: 51451, R: 57548, W: 87637 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 736ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.886-0400 m31100| 2015-07-09T14:17:18.886-0400 I COMMAND [conn38] CMD: drop db73.tmp.mrs.coll73_1436465838_125 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.888-0400 m31200| 2015-07-09T14:17:18.888-0400 I COMMAND [conn64] CMD: drop db73.tmp.mrs.coll73_1436465838_125 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.889-0400 m31101| 2015-07-09T14:17:18.889-0400 I COMMAND [repl writer worker 6] CMD: drop db73.tmp.mrs.coll73_1436465838_125 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.890-0400 m31200| 2015-07-09T14:17:18.890-0400 I COMMAND [conn18] CMD: drop db73.tmp.mrs.coll73_1436465838_124 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.892-0400 m31100| 2015-07-09T14:17:18.889-0400 I COMMAND [conn38] CMD: drop db73.tmp.mrs.coll73_1436465838_124 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.893-0400 m31201| 2015-07-09T14:17:18.891-0400 I COMMAND [repl writer worker 4] CMD: drop db73.tmp.mrs.coll73_1436465838_125 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.893-0400 m31102| 2015-07-09T14:17:18.892-0400 I COMMAND [repl writer worker 12] CMD: drop db73.tmp.mrs.coll73_1436465838_125 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.893-0400 m31202| 2015-07-09T14:17:18.893-0400 I COMMAND [repl writer worker 7] CMD: drop db73.tmp.mrs.coll73_1436465838_125 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.894-0400 m31100| 2015-07-09T14:17:18.893-0400 I COMMAND [conn36] CMD: drop db73.tmp.mrs.coll73_1436465838_147 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.894-0400 m31102| 2015-07-09T14:17:18.894-0400 I COMMAND [repl writer worker 10] CMD: drop db73.tmp.mrs.coll73_1436465838_124 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.895-0400 m31100| 2015-07-09T14:17:18.895-0400 I COMMAND [conn45] CMD: drop db73.tmp.mr.coll73_456 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.896-0400 m31200| 2015-07-09T14:17:18.895-0400 I COMMAND [conn65] CMD: drop db73.tmp.mrs.coll73_1436465838_147 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.896-0400 m31100| 2015-07-09T14:17:18.895-0400 I COMMAND [conn38] CMD: drop db73.tmp.mrs.coll73_1436465838_126 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.896-0400 m31101| 2015-07-09T14:17:18.896-0400 I COMMAND [repl writer worker 13] CMD: drop db73.tmp.mrs.coll73_1436465838_124 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.899-0400 m31202| 2015-07-09T14:17:18.897-0400 I COMMAND [repl writer worker 6] CMD: drop db73.tmp.mrs.coll73_1436465838_124 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.899-0400 m31102| 2015-07-09T14:17:18.898-0400 I COMMAND [repl writer worker 15] CMD: drop db73.tmp.mrs.coll73_1436465838_147 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.900-0400 m31200| 2015-07-09T14:17:18.899-0400 I COMMAND [conn137] CMD: drop db73.tmp.mr.coll73_281 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.900-0400 m31101| 2015-07-09T14:17:18.899-0400 I COMMAND [repl writer worker 9] CMD: drop db73.tmp.mrs.coll73_1436465838_147 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.900-0400 m31201| 2015-07-09T14:17:18.899-0400 I COMMAND [repl writer worker 11] CMD: drop db73.tmp.mrs.coll73_1436465838_124 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.901-0400 m31200| 2015-07-09T14:17:18.900-0400 I COMMAND [conn32] CMD: drop db73.tmp.mr.coll73_280 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.902-0400 m31202| 2015-07-09T14:17:18.902-0400 I COMMAND [repl writer worker 8] CMD: drop db73.tmp.mrs.coll73_1436465838_147 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.904-0400 m31201| 2015-07-09T14:17:18.903-0400 I COMMAND [repl writer worker 12] CMD: drop db73.tmp.mrs.coll73_1436465838_147 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.919-0400 m31200| 2015-07-09T14:17:18.919-0400 I COMMAND [conn18] CMD: drop db73.tmp.mrs.coll73_1436465838_126 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.920-0400 m31100| 2015-07-09T14:17:18.919-0400 I COMMAND [conn49] CMD: drop db73.tmp.mr.coll73_457 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.920-0400 m31100| 2015-07-09T14:17:18.919-0400 I COMMAND [conn175] CMD: drop db73.tmp.mr.coll73_458 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.921-0400 m31102| 2015-07-09T14:17:18.920-0400 I COMMAND [repl writer worker 6] CMD: drop db73.tmp.mrs.coll73_1436465838_126 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.922-0400 m31101| 2015-07-09T14:17:18.921-0400 I COMMAND [repl writer worker 4] CMD: drop db73.tmp.mrs.coll73_1436465838_126 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.925-0400 m31200| 2015-07-09T14:17:18.925-0400 I COMMAND [conn38] CMD: drop db73.tmp.mr.coll73_282 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.927-0400 m31201| 2015-07-09T14:17:18.926-0400 I COMMAND [repl writer worker 5] CMD: drop db73.tmp.mrs.coll73_1436465838_126 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.927-0400 m31202| 2015-07-09T14:17:18.926-0400 I COMMAND [repl writer worker 5] CMD: drop db73.tmp.mrs.coll73_1436465838_126 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.989-0400 m31100| 2015-07-09T14:17:18.989-0400 I COMMAND [conn182] CMD: drop db73.tmp.mr.coll73_459 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:18.989-0400 m31200| 2015-07-09T14:17:18.989-0400 I COMMAND [conn35] CMD: drop db73.tmp.mr.coll73_283 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.348-0400 m31200| 2015-07-09T14:17:19.347-0400 I COMMAND [conn38] CMD: drop db73.tmp.mrs.coll73_1436465838_149 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.350-0400 m31100| 2015-07-09T14:17:19.350-0400 I COMMAND [conn185] CMD: drop db73.tmp.mrs.coll73_1436465838_148 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.353-0400 m31200| 2015-07-09T14:17:19.352-0400 I COMMAND [conn38] CMD: drop db73.tmp.mr.coll73_282 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.353-0400 m31200| 2015-07-09T14:17:19.352-0400 I COMMAND [conn38] CMD: drop db73.tmp.mr.coll73_282 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.354-0400 m31200| 2015-07-09T14:17:19.353-0400 I COMMAND [conn38] CMD: drop db73.tmp.mr.coll73_282 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.355-0400 m31100| 2015-07-09T14:17:19.355-0400 I COMMAND [conn185] CMD: drop db73.tmp.mr.coll73_455 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.355-0400 m31100| 2015-07-09T14:17:19.355-0400 I COMMAND [conn185] CMD: drop db73.tmp.mr.coll73_455 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.356-0400 m31200| 2015-07-09T14:17:19.355-0400 I COMMAND [conn38] command db73.tmp.mrs.coll73_1436465838_149 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.356-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.356-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.356-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.357-0400 m31200| values...., out: "tmp.mrs.coll73_1436465838_149", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:3 reslen:213 locks:{ Global: { acquireCount: { r: 159, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 89 } }, Database: { acquireCount: { r: 26, w: 66, R: 15, W: 11 }, acquireWaitCount: { r: 2, w: 6, R: 5, W: 8 }, timeAcquiringMicros: { r: 19475, w: 32443, R: 40849, W: 114622 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 455ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.358-0400 m31100| 2015-07-09T14:17:19.358-0400 I COMMAND [conn185] CMD: drop db73.tmp.mr.coll73_455 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.360-0400 m31100| 2015-07-09T14:17:19.360-0400 I COMMAND [conn185] command db73.tmp.mrs.coll73_1436465838_148 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.361-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.361-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.361-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.362-0400 m31100| values...., out: "tmp.mrs.coll73_1436465838_148", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:213 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 4356, W: 94 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { w: 30, R: 10, W: 9 }, timeAcquiringMicros: { w: 435016, R: 35874, W: 34657 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 729ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.363-0400 m31200| 2015-07-09T14:17:19.363-0400 I COMMAND [conn137] CMD: drop db73.tmp.mrs.coll73_1436465838_128 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.372-0400 m31200| 2015-07-09T14:17:19.371-0400 I COMMAND [conn137] CMD: drop db73.tmp.mr.coll73_281 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.372-0400 m31200| 2015-07-09T14:17:19.372-0400 I COMMAND [conn137] CMD: drop db73.tmp.mr.coll73_281 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.376-0400 m31200| 2015-07-09T14:17:19.374-0400 I COMMAND [conn137] CMD: drop db73.tmp.mr.coll73_281 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.378-0400 m31200| 2015-07-09T14:17:19.377-0400 I COMMAND [conn137] command db73.tmp.mrs.coll73_1436465838_128 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.379-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.379-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.379-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.379-0400 m31200| values...., out: "tmp.mrs.coll73_1436465838_128", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:3 reslen:213 locks:{ Global: { acquireCount: { r: 159, w: 74, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 4608, W: 452 } }, Database: { acquireCount: { r: 26, w: 66, R: 15, W: 11 }, acquireWaitCount: { r: 5, w: 9, R: 9, W: 6 }, timeAcquiringMicros: { r: 42152, w: 38842, R: 84521, W: 4289 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 478ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.383-0400 m31100| 2015-07-09T14:17:19.383-0400 I COMMAND [conn36] CMD: drop db73.tmp.mrs.coll73_1436465838_148 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.394-0400 m31200| 2015-07-09T14:17:19.393-0400 I COMMAND [conn65] CMD: drop db73.tmp.mrs.coll73_1436465838_148 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.394-0400 m31200| 2015-07-09T14:17:19.394-0400 I COMMAND [conn32] CMD: drop db73.tmp.mrs.coll73_1436465838_127 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.396-0400 m31100| 2015-07-09T14:17:19.395-0400 I COMMAND [conn45] CMD: drop db73.tmp.mrs.coll73_1436465838_127 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.396-0400 m31101| 2015-07-09T14:17:19.396-0400 I COMMAND [repl writer worker 8] CMD: drop db73.tmp.mrs.coll73_1436465838_148 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.398-0400 m31102| 2015-07-09T14:17:19.397-0400 I COMMAND [repl writer worker 15] CMD: drop db73.tmp.mrs.coll73_1436465838_148 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.400-0400 m31200| 2015-07-09T14:17:19.399-0400 I COMMAND [conn32] CMD: drop db73.tmp.mr.coll73_280 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.402-0400 m31200| 2015-07-09T14:17:19.401-0400 I COMMAND [conn32] CMD: drop db73.tmp.mr.coll73_280 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.402-0400 m31100| 2015-07-09T14:17:19.402-0400 I COMMAND [conn45] CMD: drop db73.tmp.mr.coll73_456 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.403-0400 m31100| 2015-07-09T14:17:19.402-0400 I COMMAND [conn45] CMD: drop db73.tmp.mr.coll73_456 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.408-0400 m31100| 2015-07-09T14:17:19.407-0400 I COMMAND [conn45] CMD: drop db73.tmp.mr.coll73_456 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.409-0400 m31200| 2015-07-09T14:17:19.409-0400 I COMMAND [conn32] CMD: drop db73.tmp.mr.coll73_280 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.410-0400 m31100| 2015-07-09T14:17:19.409-0400 I COMMAND [conn45] command db73.tmp.mrs.coll73_1436465838_127 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.410-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.410-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.410-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.411-0400 m31100| values...., out: "tmp.mrs.coll73_1436465838_127", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:3 reslen:213 locks:{ Global: { acquireCount: { r: 157, w: 74, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 5580, W: 724 } }, Database: { acquireCount: { r: 26, w: 66, R: 14, W: 11 }, acquireWaitCount: { r: 4, w: 9, R: 13, W: 4 }, timeAcquiringMicros: { r: 10665, w: 51747, R: 142269, W: 3489 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 516ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.412-0400 m31100| 2015-07-09T14:17:19.410-0400 I COMMAND [conn185] CMD: drop db73.tmp.mr.coll73_460 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.412-0400 m31200| 2015-07-09T14:17:19.410-0400 I COMMAND [conn60] CMD: drop db73.tmp.mr.coll73_284 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.412-0400 m31100| 2015-07-09T14:17:19.412-0400 I COMMAND [conn175] CMD: drop db73.tmp.mrs.coll73_1436465838_128 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.412-0400 m31200| 2015-07-09T14:17:19.410-0400 I COMMAND [conn32] command db73.tmp.mrs.coll73_1436465838_127 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.413-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.413-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.413-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.414-0400 m31200| values...., out: "tmp.mrs.coll73_1436465838_127", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:4 reslen:213 locks:{ Global: { acquireCount: { r: 161, w: 74, W: 3 }, acquireWaitCount: { r: 2, W: 1 }, timeAcquiringMicros: { r: 14148, W: 1103 } }, Database: { acquireCount: { r: 26, w: 66, R: 16, W: 11 }, acquireWaitCount: { r: 4, w: 7, R: 9, W: 9 }, timeAcquiringMicros: { r: 32980, w: 10220, R: 63710, W: 61193 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 518ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.416-0400 m31202| 2015-07-09T14:17:19.415-0400 I COMMAND [repl writer worker 3] CMD: drop db73.tmp.mrs.coll73_1436465838_148 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.421-0400 m31201| 2015-07-09T14:17:19.421-0400 I COMMAND [repl writer worker 15] CMD: drop db73.tmp.mrs.coll73_1436465838_148 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.424-0400 m31100| 2015-07-09T14:17:19.424-0400 I COMMAND [conn175] CMD: drop db73.tmp.mr.coll73_458 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.424-0400 m31100| 2015-07-09T14:17:19.424-0400 I COMMAND [conn175] CMD: drop db73.tmp.mr.coll73_458 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.426-0400 m31100| 2015-07-09T14:17:19.425-0400 I COMMAND [conn175] CMD: drop db73.tmp.mr.coll73_458 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.443-0400 m31200| 2015-07-09T14:17:19.443-0400 I COMMAND [conn35] CMD: drop db73.tmp.mrs.coll73_1436465838_129 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.448-0400 m31100| 2015-07-09T14:17:19.447-0400 I COMMAND [conn175] command db73.tmp.mrs.coll73_1436465838_128 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.448-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.448-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.448-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.449-0400 m31100| values...., out: "tmp.mrs.coll73_1436465838_128", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:4 reslen:213 locks:{ Global: { acquireCount: { r: 159, w: 74, W: 3 }, acquireWaitCount: { w: 2, W: 1 }, timeAcquiringMicros: { w: 13640, W: 636 } }, Database: { acquireCount: { r: 26, w: 66, R: 15, W: 11 }, acquireWaitCount: { r: 4, w: 12, R: 15, W: 7 }, timeAcquiringMicros: { r: 43830, w: 67765, R: 55839, W: 71921 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 546ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.452-0400 m31200| 2015-07-09T14:17:19.451-0400 I COMMAND [conn35] CMD: drop db73.tmp.mr.coll73_283 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.454-0400 m31200| 2015-07-09T14:17:19.453-0400 I COMMAND [conn35] CMD: drop db73.tmp.mr.coll73_283 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.458-0400 m31100| 2015-07-09T14:17:19.458-0400 I COMMAND [conn38] CMD: drop db73.tmp.mrs.coll73_1436465838_127 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.475-0400 m31200| 2015-07-09T14:17:19.474-0400 I COMMAND [conn18] CMD: drop db73.tmp.mrs.coll73_1436465838_127 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.484-0400 m31200| 2015-07-09T14:17:19.483-0400 I COMMAND [conn35] CMD: drop db73.tmp.mr.coll73_283 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.484-0400 m31100| 2015-07-09T14:17:19.484-0400 I COMMAND [conn45] CMD: drop db73.tmp.mr.coll73_461 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.489-0400 m31200| 2015-07-09T14:17:19.489-0400 I COMMAND [conn32] CMD: drop db73.tmp.mr.coll73_285 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.492-0400 m31102| 2015-07-09T14:17:19.492-0400 I COMMAND [repl writer worker 14] CMD: drop db73.tmp.mrs.coll73_1436465838_127 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.496-0400 m31200| 2015-07-09T14:17:19.495-0400 I COMMAND [conn35] command db73.tmp.mrs.coll73_1436465838_129 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.496-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.496-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.496-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.497-0400 m31200| values...., out: "tmp.mrs.coll73_1436465838_129", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:213 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { r: 3, W: 1 }, timeAcquiringMicros: { r: 20994, W: 512 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 9, w: 7, R: 4, W: 9 }, timeAcquiringMicros: { r: 98689, w: 65080, R: 14652, W: 93388 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 569ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.502-0400 m31101| 2015-07-09T14:17:19.501-0400 I COMMAND [repl writer worker 8] CMD: drop db73.tmp.mrs.coll73_1436465838_127 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.504-0400 m31100| 2015-07-09T14:17:19.504-0400 I COMMAND [conn38] CMD: drop db73.tmp.mrs.coll73_1436465838_128 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.505-0400 m31201| 2015-07-09T14:17:19.504-0400 I COMMAND [repl writer worker 0] CMD: drop db73.tmp.mrs.coll73_1436465838_127 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.506-0400 m31202| 2015-07-09T14:17:19.506-0400 I COMMAND [repl writer worker 9] CMD: drop db73.tmp.mrs.coll73_1436465838_127 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.527-0400 m31200| 2015-07-09T14:17:19.527-0400 I COMMAND [conn18] CMD: drop db73.tmp.mrs.coll73_1436465838_128 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.530-0400 m31101| 2015-07-09T14:17:19.529-0400 I COMMAND [repl writer worker 10] CMD: drop db73.tmp.mrs.coll73_1436465838_128 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.532-0400 m31102| 2015-07-09T14:17:19.529-0400 I COMMAND [repl writer worker 4] CMD: drop db73.tmp.mrs.coll73_1436465838_128 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.555-0400 m31201| 2015-07-09T14:17:19.555-0400 I COMMAND [repl writer worker 9] CMD: drop db73.tmp.mrs.coll73_1436465838_128 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.560-0400 m31200| 2015-07-09T14:17:19.559-0400 I COMMAND [conn137] CMD: drop db73.tmp.mr.coll73_286 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.561-0400 m31100| 2015-07-09T14:17:19.561-0400 I COMMAND [conn49] CMD: drop db73.tmp.mrs.coll73_1436465838_149 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.568-0400 m31100| 2015-07-09T14:17:19.568-0400 I COMMAND [conn49] CMD: drop db73.tmp.mr.coll73_457 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.568-0400 m31100| 2015-07-09T14:17:19.568-0400 I COMMAND [conn49] CMD: drop db73.tmp.mr.coll73_457 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.571-0400 m31100| 2015-07-09T14:17:19.569-0400 I COMMAND [conn49] CMD: drop db73.tmp.mr.coll73_457 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.585-0400 m31100| 2015-07-09T14:17:19.583-0400 I COMMAND [conn175] CMD: drop db73.tmp.mr.coll73_462 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.590-0400 m31100| 2015-07-09T14:17:19.589-0400 I COMMAND [conn49] command db73.tmp.mrs.coll73_1436465838_149 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.590-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.590-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.590-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.591-0400 m31100| values...., out: "tmp.mrs.coll73_1436465838_149", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:4 reslen:213 locks:{ Global: { acquireCount: { r: 159, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 1, W: 1 }, timeAcquiringMicros: { r: 20216, w: 5641, W: 26547 } }, Database: { acquireCount: { r: 26, w: 66, R: 15, W: 11 }, acquireWaitCount: { r: 12, w: 19, R: 15, W: 7 }, timeAcquiringMicros: { r: 122389, w: 90887, R: 44165, W: 65159 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 689ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.644-0400 m31100| 2015-07-09T14:17:19.643-0400 I COMMAND [conn36] CMD: drop db73.tmp.mrs.coll73_1436465838_149 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.649-0400 m31200| 2015-07-09T14:17:19.648-0400 I COMMAND [conn65] CMD: drop db73.tmp.mrs.coll73_1436465838_149 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.651-0400 m31102| 2015-07-09T14:17:19.651-0400 I COMMAND [repl writer worker 12] CMD: drop db73.tmp.mrs.coll73_1436465838_149 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.655-0400 m31101| 2015-07-09T14:17:19.655-0400 I COMMAND [repl writer worker 6] CMD: drop db73.tmp.mrs.coll73_1436465838_149 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.677-0400 m31202| 2015-07-09T14:17:19.676-0400 I COMMAND [repl writer worker 15] CMD: drop db73.tmp.mrs.coll73_1436465838_128 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.684-0400 m31200| 2015-07-09T14:17:19.684-0400 I COMMAND [conn38] CMD: drop db73.tmp.mr.coll73_287 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.697-0400 m31100| 2015-07-09T14:17:19.696-0400 I COMMAND [conn49] CMD: drop db73.tmp.mr.coll73_463 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.707-0400 m31201| 2015-07-09T14:17:19.707-0400 I COMMAND [repl writer worker 10] CMD: drop db73.tmp.mrs.coll73_1436465838_149 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.713-0400 m31202| 2015-07-09T14:17:19.713-0400 I COMMAND [repl writer worker 4] CMD: drop db73.tmp.mrs.coll73_1436465838_149 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.943-0400 m31200| 2015-07-09T14:17:19.943-0400 I COMMAND [conn32] CMD: drop db73.tmp.mrs.coll73_1436465839_130 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.950-0400 m31200| 2015-07-09T14:17:19.950-0400 I COMMAND [conn32] CMD: drop db73.tmp.mr.coll73_285 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.950-0400 m31200| 2015-07-09T14:17:19.950-0400 I COMMAND [conn32] CMD: drop db73.tmp.mr.coll73_285 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.951-0400 m31200| 2015-07-09T14:17:19.951-0400 I COMMAND [conn32] CMD: drop db73.tmp.mr.coll73_285 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.952-0400 m31200| 2015-07-09T14:17:19.952-0400 I COMMAND [conn32] command db73.tmp.mrs.coll73_1436465839_130 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.952-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.952-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.952-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.953-0400 m31200| values...., out: "tmp.mrs.coll73_1436465839_130", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:213 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 986 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 2, w: 15, R: 12, W: 6 }, timeAcquiringMicros: { r: 22497, w: 140403, R: 82046, W: 10045 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 468ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.958-0400 m31200| 2015-07-09T14:17:19.958-0400 I COMMAND [conn60] CMD: drop db73.tmp.mrs.coll73_1436465839_150 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.968-0400 m31200| 2015-07-09T14:17:19.967-0400 I COMMAND [conn60] CMD: drop db73.tmp.mr.coll73_284 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.968-0400 m31200| 2015-07-09T14:17:19.968-0400 I COMMAND [conn60] CMD: drop db73.tmp.mr.coll73_284 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.972-0400 m31200| 2015-07-09T14:17:19.971-0400 I COMMAND [conn60] CMD: drop db73.tmp.mr.coll73_284 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.973-0400 m31200| 2015-07-09T14:17:19.972-0400 I COMMAND [conn60] command db73.tmp.mrs.coll73_1436465839_150 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.973-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.973-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.974-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.974-0400 m31200| values...., out: "tmp.mrs.coll73_1436465839_150", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:213 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 7001, w: 8591, W: 6649 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 4, w: 22, R: 12, W: 8 }, timeAcquiringMicros: { r: 5121, w: 216413, R: 101338, W: 8264 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 564ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.981-0400 m31200| 2015-07-09T14:17:19.981-0400 I COMMAND [conn137] CMD: drop db73.tmp.mrs.coll73_1436465839_131 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.989-0400 m31200| 2015-07-09T14:17:19.989-0400 I COMMAND [conn137] CMD: drop db73.tmp.mr.coll73_286 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.990-0400 m31200| 2015-07-09T14:17:19.989-0400 I COMMAND [conn137] CMD: drop db73.tmp.mr.coll73_286 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.991-0400 m31200| 2015-07-09T14:17:19.990-0400 I COMMAND [conn137] CMD: drop db73.tmp.mr.coll73_286 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.994-0400 m31200| 2015-07-09T14:17:19.993-0400 I COMMAND [conn137] command db73.tmp.mrs.coll73_1436465839_131 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.994-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.994-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.994-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:19.995-0400 m31200| values...., out: "tmp.mrs.coll73_1436465839_131", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:213 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { w: 2 }, timeAcquiringMicros: { w: 23265 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 2, w: 15, R: 11, W: 7 }, timeAcquiringMicros: { r: 5709, w: 76906, R: 80768, W: 41371 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 437ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.003-0400 m31200| 2015-07-09T14:17:20.003-0400 I COMMAND [conn38] CMD: drop db73.tmp.mrs.coll73_1436465839_151 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.009-0400 m31200| 2015-07-09T14:17:20.009-0400 I COMMAND [conn38] CMD: drop db73.tmp.mr.coll73_287 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.009-0400 m31200| 2015-07-09T14:17:20.009-0400 I COMMAND [conn38] CMD: drop db73.tmp.mr.coll73_287 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.010-0400 m31200| 2015-07-09T14:17:20.010-0400 I COMMAND [conn38] CMD: drop db73.tmp.mr.coll73_287 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.011-0400 m31200| 2015-07-09T14:17:20.010-0400 I COMMAND [conn38] command db73.tmp.mrs.coll73_1436465839_151 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.011-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.011-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.011-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.012-0400 m31200| values...., out: "tmp.mrs.coll73_1436465839_151", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:213 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 1 }, timeAcquiringMicros: { r: 10204, w: 4782 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { w: 6, R: 11, W: 5 }, timeAcquiringMicros: { w: 26363, R: 28190, W: 65542 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 326ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.033-0400 m31100| 2015-07-09T14:17:20.033-0400 I COMMAND [conn182] CMD: drop db73.tmp.mrs.coll73_1436465838_129 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.042-0400 m31100| 2015-07-09T14:17:20.041-0400 I COMMAND [conn182] CMD: drop db73.tmp.mr.coll73_459 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.042-0400 m31100| 2015-07-09T14:17:20.041-0400 I COMMAND [conn182] CMD: drop db73.tmp.mr.coll73_459 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.043-0400 m31100| 2015-07-09T14:17:20.042-0400 I COMMAND [conn182] CMD: drop db73.tmp.mr.coll73_459 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.056-0400 m31100| 2015-07-09T14:17:20.055-0400 I COMMAND [conn182] command db73.tmp.mrs.coll73_1436465838_129 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.056-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.056-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.056-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.057-0400 m31100| values...., out: "tmp.mrs.coll73_1436465838_129", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:4 reslen:213 locks:{ Global: { acquireCount: { r: 159, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 4, W: 1 }, timeAcquiringMicros: { r: 7888, w: 23799, W: 269867 } }, Database: { acquireCount: { r: 26, w: 66, R: 15, W: 11 }, acquireWaitCount: { r: 17, w: 28, R: 15, W: 7 }, timeAcquiringMicros: { r: 103834, w: 254884, R: 98496, W: 86136 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 1129ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.075-0400 m31100| 2015-07-09T14:17:20.075-0400 I COMMAND [conn38] CMD: drop db73.tmp.mrs.coll73_1436465838_129 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.079-0400 m31200| 2015-07-09T14:17:20.077-0400 I COMMAND [conn18] CMD: drop db73.tmp.mrs.coll73_1436465838_129 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.081-0400 m31102| 2015-07-09T14:17:20.080-0400 I COMMAND [repl writer worker 2] CMD: drop db73.tmp.mrs.coll73_1436465838_129 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.081-0400 m31200| 2015-07-09T14:17:20.081-0400 I COMMAND [conn35] CMD: drop db73.tmp.mr.coll73_288 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.082-0400 m31202| 2015-07-09T14:17:20.082-0400 I COMMAND [repl writer worker 4] CMD: drop db73.tmp.mrs.coll73_1436465838_129 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.084-0400 m31201| 2015-07-09T14:17:20.083-0400 I COMMAND [repl writer worker 1] CMD: drop db73.tmp.mrs.coll73_1436465838_129 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.090-0400 m31100| 2015-07-09T14:17:20.090-0400 I COMMAND [conn182] CMD: drop db73.tmp.mr.coll73_464 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.163-0400 m31100| 2015-07-09T14:17:20.162-0400 I COMMAND [conn185] CMD: drop db73.tmp.mrs.coll73_1436465839_150 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.169-0400 m31100| 2015-07-09T14:17:20.168-0400 I COMMAND [conn185] CMD: drop db73.tmp.mr.coll73_460 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.170-0400 m31100| 2015-07-09T14:17:20.168-0400 I COMMAND [conn185] CMD: drop db73.tmp.mr.coll73_460 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.186-0400 m31100| 2015-07-09T14:17:20.185-0400 I COMMAND [conn185] CMD: drop db73.tmp.mr.coll73_460 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.199-0400 m31100| 2015-07-09T14:17:20.198-0400 I COMMAND [conn185] command db73.tmp.mrs.coll73_1436465839_150 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.199-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.199-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.199-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.199-0400 m31100| values...., out: "tmp.mrs.coll73_1436465839_150", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:213 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 4, W: 1 }, timeAcquiringMicros: { r: 2038, w: 269369, W: 31974 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 6, w: 22, R: 11, W: 9 }, timeAcquiringMicros: { r: 3379, w: 204199, R: 51928, W: 37695 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 790ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.225-0400 m31100| 2015-07-09T14:17:20.224-0400 I COMMAND [conn36] CMD: drop db73.tmp.mrs.coll73_1436465839_150 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.231-0400 m31101| 2015-07-09T14:17:20.230-0400 I COMMAND [repl writer worker 10] CMD: drop db73.tmp.mrs.coll73_1436465838_129 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.232-0400 m31200| 2015-07-09T14:17:20.231-0400 I COMMAND [conn35] CMD: drop db73.tmp.mrs.coll73_1436465840_132 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.234-0400 m31200| 2015-07-09T14:17:20.233-0400 I COMMAND [conn65] CMD: drop db73.tmp.mrs.coll73_1436465839_150 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.236-0400 m31102| 2015-07-09T14:17:20.235-0400 I COMMAND [repl writer worker 13] CMD: drop db73.tmp.mrs.coll73_1436465839_150 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.238-0400 m31200| 2015-07-09T14:17:20.237-0400 I COMMAND [conn35] CMD: drop db73.tmp.mr.coll73_288 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.238-0400 m31200| 2015-07-09T14:17:20.238-0400 I COMMAND [conn35] CMD: drop db73.tmp.mr.coll73_288 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.239-0400 m31200| 2015-07-09T14:17:20.239-0400 I COMMAND [conn35] CMD: drop db73.tmp.mr.coll73_288 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.242-0400 m31200| 2015-07-09T14:17:20.240-0400 I COMMAND [conn35] command db73.tmp.mrs.coll73_1436465840_132 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.243-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.243-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.243-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.243-0400 m31200| values...., out: "tmp.mrs.coll73_1436465840_132", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:213 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 517 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 159ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.243-0400 m31200| 2015-07-09T14:17:20.241-0400 I COMMAND [conn60] CMD: drop db73.tmp.mr.coll73_289 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.245-0400 m31100| 2015-07-09T14:17:20.245-0400 I COMMAND [conn185] CMD: drop db73.tmp.mr.coll73_465 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.246-0400 m31202| 2015-07-09T14:17:20.246-0400 I COMMAND [repl writer worker 14] CMD: drop db73.tmp.mrs.coll73_1436465839_150 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.254-0400 m31201| 2015-07-09T14:17:20.254-0400 I COMMAND [repl writer worker 15] CMD: drop db73.tmp.mrs.coll73_1436465839_150 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.266-0400 m31101| 2015-07-09T14:17:20.266-0400 I COMMAND [repl writer worker 5] CMD: drop db73.tmp.mrs.coll73_1436465839_150 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.311-0400 m31100| 2015-07-09T14:17:20.310-0400 I COMMAND [conn45] CMD: drop db73.tmp.mrs.coll73_1436465839_130 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.314-0400 m31100| 2015-07-09T14:17:20.314-0400 I COMMAND [conn45] CMD: drop db73.tmp.mr.coll73_461 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.315-0400 m31100| 2015-07-09T14:17:20.314-0400 I COMMAND [conn45] CMD: drop db73.tmp.mr.coll73_461 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.317-0400 m31100| 2015-07-09T14:17:20.317-0400 I COMMAND [conn45] CMD: drop db73.tmp.mr.coll73_461 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.345-0400 m31100| 2015-07-09T14:17:20.345-0400 I COMMAND [conn45] command db73.tmp.mrs.coll73_1436465839_130 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.346-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.346-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.346-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.346-0400 m31100| values...., out: "tmp.mrs.coll73_1436465839_130", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:213 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 4, w: 2, W: 1 }, timeAcquiringMicros: { r: 254637, w: 16439, W: 8262 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 8, w: 24, R: 10, W: 7 }, timeAcquiringMicros: { r: 81412, w: 127689, R: 80341, W: 48325 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 862ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.364-0400 m31100| 2015-07-09T14:17:20.364-0400 I COMMAND [conn38] CMD: drop db73.tmp.mrs.coll73_1436465839_130 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.371-0400 m31200| 2015-07-09T14:17:20.371-0400 I COMMAND [conn18] CMD: drop db73.tmp.mrs.coll73_1436465839_130 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.374-0400 m31101| 2015-07-09T14:17:20.374-0400 I COMMAND [repl writer worker 14] CMD: drop db73.tmp.mrs.coll73_1436465839_130 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.375-0400 m31102| 2015-07-09T14:17:20.375-0400 I COMMAND [repl writer worker 10] CMD: drop db73.tmp.mrs.coll73_1436465839_130 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.380-0400 m31200| 2015-07-09T14:17:20.379-0400 I COMMAND [conn32] CMD: drop db73.tmp.mr.coll73_290 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.381-0400 m31202| 2015-07-09T14:17:20.380-0400 I COMMAND [repl writer worker 8] CMD: drop db73.tmp.mrs.coll73_1436465839_130 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.383-0400 m31201| 2015-07-09T14:17:20.383-0400 I COMMAND [repl writer worker 6] CMD: drop db73.tmp.mrs.coll73_1436465839_130 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.384-0400 m31100| 2015-07-09T14:17:20.383-0400 I COMMAND [conn45] CMD: drop db73.tmp.mr.coll73_466 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.467-0400 m31100| 2015-07-09T14:17:20.466-0400 I COMMAND [conn175] CMD: drop db73.tmp.mrs.coll73_1436465839_131 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.467-0400 m31200| 2015-07-09T14:17:20.467-0400 I COMMAND [conn60] CMD: drop db73.tmp.mrs.coll73_1436465840_152 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.472-0400 m31100| 2015-07-09T14:17:20.470-0400 I COMMAND [conn175] CMD: drop db73.tmp.mr.coll73_462 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.472-0400 m31100| 2015-07-09T14:17:20.470-0400 I COMMAND [conn175] CMD: drop db73.tmp.mr.coll73_462 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.474-0400 m31100| 2015-07-09T14:17:20.473-0400 I COMMAND [conn175] CMD: drop db73.tmp.mr.coll73_462 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.475-0400 m31200| 2015-07-09T14:17:20.474-0400 I COMMAND [conn60] CMD: drop db73.tmp.mr.coll73_289 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.476-0400 m31200| 2015-07-09T14:17:20.476-0400 I COMMAND [conn60] CMD: drop db73.tmp.mr.coll73_289 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.477-0400 m31200| 2015-07-09T14:17:20.477-0400 I COMMAND [conn60] CMD: drop db73.tmp.mr.coll73_289 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.480-0400 m31200| 2015-07-09T14:17:20.477-0400 I COMMAND [conn60] command db73.tmp.mrs.coll73_1436465840_152 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.480-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.480-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.480-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.481-0400 m31200| values...., out: "tmp.mrs.coll73_1436465840_152", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:213 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { w: 9, R: 1 }, timeAcquiringMicros: { w: 64108, R: 1498 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 237ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.491-0400 m31100| 2015-07-09T14:17:20.490-0400 I COMMAND [conn175] command db73.tmp.mrs.coll73_1436465839_131 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.492-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.492-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.492-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.492-0400 m31100| values...., out: "tmp.mrs.coll73_1436465839_131", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:213 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 4, w: 3, W: 1 }, timeAcquiringMicros: { r: 18576, w: 42980, W: 9602 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 12, w: 26, R: 12, W: 9 }, timeAcquiringMicros: { r: 59954, w: 203423, R: 312555, W: 49761 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 932ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.495-0400 m31100| 2015-07-09T14:17:20.495-0400 I COMMAND [conn49] CMD: drop db73.tmp.mrs.coll73_1436465839_151 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.500-0400 m31100| 2015-07-09T14:17:20.499-0400 I COMMAND [conn49] CMD: drop db73.tmp.mr.coll73_463 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.500-0400 m31100| 2015-07-09T14:17:20.500-0400 I COMMAND [conn49] CMD: drop db73.tmp.mr.coll73_463 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.504-0400 m31100| 2015-07-09T14:17:20.504-0400 I COMMAND [conn38] CMD: drop db73.tmp.mrs.coll73_1436465839_131 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.531-0400 m31200| 2015-07-09T14:17:20.530-0400 I COMMAND [conn32] CMD: drop db73.tmp.mrs.coll73_1436465840_133 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.533-0400 m31100| 2015-07-09T14:17:20.533-0400 I COMMAND [conn49] CMD: drop db73.tmp.mr.coll73_463 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.534-0400 m31200| 2015-07-09T14:17:20.534-0400 I COMMAND [conn18] CMD: drop db73.tmp.mrs.coll73_1436465839_131 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.534-0400 m31101| 2015-07-09T14:17:20.534-0400 I COMMAND [repl writer worker 11] CMD: drop db73.tmp.mrs.coll73_1436465839_131 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.537-0400 m31200| 2015-07-09T14:17:20.535-0400 I COMMAND [conn32] CMD: drop db73.tmp.mr.coll73_290 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.537-0400 m31200| 2015-07-09T14:17:20.536-0400 I COMMAND [conn32] CMD: drop db73.tmp.mr.coll73_290 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.538-0400 m31200| 2015-07-09T14:17:20.538-0400 I COMMAND [conn32] CMD: drop db73.tmp.mr.coll73_290 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.539-0400 m31200| 2015-07-09T14:17:20.538-0400 I COMMAND [conn32] command db73.tmp.mrs.coll73_1436465840_133 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.539-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.539-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.539-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.540-0400 m31200| values...., out: "tmp.mrs.coll73_1436465840_133", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:213 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { R: 6, W: 4 }, timeAcquiringMicros: { R: 2960, W: 3010 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 159ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.541-0400 m31200| 2015-07-09T14:17:20.539-0400 I COMMAND [conn137] CMD: drop db73.tmp.mr.coll73_291 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.541-0400 m31100| 2015-07-09T14:17:20.540-0400 I COMMAND [conn175] CMD: drop db73.tmp.mr.coll73_467 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.546-0400 m31201| 2015-07-09T14:17:20.545-0400 I COMMAND [repl writer worker 13] CMD: drop db73.tmp.mrs.coll73_1436465839_131 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.546-0400 m31100| 2015-07-09T14:17:20.546-0400 I COMMAND [conn49] command db73.tmp.mrs.coll73_1436465839_151 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.546-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.547-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.547-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.547-0400 m31100| values...., out: "tmp.mrs.coll73_1436465839_151", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:213 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 5, w: 3, W: 1 }, timeAcquiringMicros: { r: 39708, w: 30934, W: 810 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 9, w: 29, R: 11, W: 9 }, timeAcquiringMicros: { r: 47896, w: 206271, R: 27954, W: 76931 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 862ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.552-0400 m31202| 2015-07-09T14:17:20.552-0400 I COMMAND [repl writer worker 2] CMD: drop db73.tmp.mrs.coll73_1436465839_131 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.583-0400 m31100| 2015-07-09T14:17:20.582-0400 I COMMAND [conn36] CMD: drop db73.tmp.mrs.coll73_1436465839_151 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.593-0400 m31200| 2015-07-09T14:17:20.593-0400 I COMMAND [conn65] CMD: drop db73.tmp.mrs.coll73_1436465839_151 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.595-0400 m31200| 2015-07-09T14:17:20.595-0400 I COMMAND [conn38] CMD: drop db73.tmp.mr.coll73_292 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.597-0400 m31202| 2015-07-09T14:17:20.597-0400 I COMMAND [repl writer worker 4] CMD: drop db73.tmp.mrs.coll73_1436465839_151 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.602-0400 m31201| 2015-07-09T14:17:20.602-0400 I COMMAND [repl writer worker 4] CMD: drop db73.tmp.mrs.coll73_1436465839_151 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.605-0400 m31101| 2015-07-09T14:17:20.605-0400 I COMMAND [repl writer worker 10] CMD: drop db73.tmp.mrs.coll73_1436465839_151 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.629-0400 m31100| 2015-07-09T14:17:20.629-0400 I COMMAND [conn49] CMD: drop db73.tmp.mr.coll73_468 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.722-0400 m31100| 2015-07-09T14:17:20.721-0400 I COMMAND [conn182] CMD: drop db73.tmp.mrs.coll73_1436465840_132 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.727-0400 m31100| 2015-07-09T14:17:20.727-0400 I COMMAND [conn182] CMD: drop db73.tmp.mr.coll73_464 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.727-0400 m31100| 2015-07-09T14:17:20.727-0400 I COMMAND [conn182] CMD: drop db73.tmp.mr.coll73_464 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.735-0400 m31200| 2015-07-09T14:17:20.735-0400 I COMMAND [conn137] CMD: drop db73.tmp.mrs.coll73_1436465840_134 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.743-0400 m31200| 2015-07-09T14:17:20.743-0400 I COMMAND [conn137] CMD: drop db73.tmp.mr.coll73_291 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.743-0400 m31200| 2015-07-09T14:17:20.743-0400 I COMMAND [conn137] CMD: drop db73.tmp.mr.coll73_291 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.747-0400 m31200| 2015-07-09T14:17:20.747-0400 I COMMAND [conn137] CMD: drop db73.tmp.mr.coll73_291 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.755-0400 m31100| 2015-07-09T14:17:20.755-0400 I COMMAND [conn182] CMD: drop db73.tmp.mr.coll73_464 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.757-0400 m31100| 2015-07-09T14:17:20.757-0400 I COMMAND [conn182] command db73.tmp.mrs.coll73_1436465840_132 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.757-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.757-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.757-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.758-0400 m31100| values...., out: "tmp.mrs.coll73_1436465840_132", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:213 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 4, w: 1, W: 1 }, timeAcquiringMicros: { r: 23009, w: 13621, W: 13814 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 11, w: 31, R: 10, W: 8 }, timeAcquiringMicros: { r: 68043, w: 233264, R: 57992, W: 51927 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 675ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.777-0400 m31200| 2015-07-09T14:17:20.777-0400 I COMMAND [conn137] command db73.tmp.mrs.coll73_1436465840_134 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.777-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.777-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.777-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.778-0400 m31200| values...., out: "tmp.mrs.coll73_1436465840_134", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:213 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { w: 2, R: 4, W: 3 }, timeAcquiringMicros: { w: 6759, R: 24244, W: 29840 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 237ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.785-0400 m31100| 2015-07-09T14:17:20.785-0400 I COMMAND [conn185] CMD: drop db73.tmp.mrs.coll73_1436465840_152 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.789-0400 m31100| 2015-07-09T14:17:20.789-0400 I COMMAND [conn185] CMD: drop db73.tmp.mr.coll73_465 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.790-0400 m31100| 2015-07-09T14:17:20.789-0400 I COMMAND [conn185] CMD: drop db73.tmp.mr.coll73_465 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.791-0400 m31100| 2015-07-09T14:17:20.790-0400 I COMMAND [conn185] CMD: drop db73.tmp.mr.coll73_465 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.794-0400 m31100| 2015-07-09T14:17:20.794-0400 I COMMAND [conn185] command db73.tmp.mrs.coll73_1436465840_152 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.795-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.795-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.795-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.796-0400 m31100| values...., out: "tmp.mrs.coll73_1436465840_152", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:213 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 2, W: 1 }, timeAcquiringMicros: { r: 7230, w: 23274, W: 86 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 9, w: 27, R: 11, W: 9 }, timeAcquiringMicros: { r: 56899, w: 202025, R: 46835, W: 9840 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 553ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.800-0400 m31102| 2015-07-09T14:17:20.799-0400 I COMMAND [repl writer worker 11] CMD: drop db73.tmp.mrs.coll73_1436465839_131 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.802-0400 m31100| 2015-07-09T14:17:20.802-0400 I COMMAND [conn45] CMD: drop db73.tmp.mrs.coll73_1436465840_133 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.811-0400 m31100| 2015-07-09T14:17:20.810-0400 I COMMAND [conn45] CMD: drop db73.tmp.mr.coll73_466 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.811-0400 m31100| 2015-07-09T14:17:20.811-0400 I COMMAND [conn45] CMD: drop db73.tmp.mr.coll73_466 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.812-0400 m31102| 2015-07-09T14:17:20.812-0400 I COMMAND [repl writer worker 7] CMD: drop db73.tmp.mrs.coll73_1436465839_151 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.816-0400 m31100| 2015-07-09T14:17:20.815-0400 I COMMAND [conn45] CMD: drop db73.tmp.mr.coll73_466 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.818-0400 m31100| 2015-07-09T14:17:20.817-0400 I COMMAND [conn45] command db73.tmp.mrs.coll73_1436465840_133 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.819-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.819-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.819-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.819-0400 m31100| values...., out: "tmp.mrs.coll73_1436465840_133", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:213 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 2 }, timeAcquiringMicros: { r: 14792, w: 23041 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 3, w: 16, R: 12, W: 5 }, timeAcquiringMicros: { r: 779, w: 124150, R: 38770, W: 34168 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 438ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.828-0400 m31200| 2015-07-09T14:17:20.827-0400 I COMMAND [conn38] CMD: drop db73.tmp.mrs.coll73_1436465840_153 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.832-0400 m31200| 2015-07-09T14:17:20.832-0400 I COMMAND [conn38] CMD: drop db73.tmp.mr.coll73_292 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.834-0400 m31200| 2015-07-09T14:17:20.833-0400 I COMMAND [conn38] CMD: drop db73.tmp.mr.coll73_292 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.836-0400 m31200| 2015-07-09T14:17:20.836-0400 I COMMAND [conn38] CMD: drop db73.tmp.mr.coll73_292 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.837-0400 m31100| 2015-07-09T14:17:20.837-0400 I COMMAND [conn38] CMD: drop db73.tmp.mrs.coll73_1436465840_132 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.838-0400 m31200| 2015-07-09T14:17:20.838-0400 I COMMAND [conn38] command db73.tmp.mrs.coll73_1436465840_153 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.838-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.838-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.838-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.839-0400 m31200| values...., out: "tmp.mrs.coll73_1436465840_153", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:213 locks:{ Global: { acquireCount: { r: 157, w: 74, W: 3 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 1322 } }, Database: { acquireCount: { r: 26, w: 66, R: 14, W: 11 }, acquireWaitCount: { w: 2, R: 4, W: 2 }, timeAcquiringMicros: { w: 11393, R: 5548, W: 8841 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 242ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.845-0400 m31200| 2015-07-09T14:17:20.844-0400 I COMMAND [conn18] CMD: drop db73.tmp.mrs.coll73_1436465840_132 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.847-0400 m31101| 2015-07-09T14:17:20.847-0400 I COMMAND [repl writer worker 12] CMD: drop db73.tmp.mrs.coll73_1436465840_132 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.848-0400 m31201| 2015-07-09T14:17:20.848-0400 I COMMAND [repl writer worker 15] CMD: drop db73.tmp.mrs.coll73_1436465840_132 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.849-0400 m31200| 2015-07-09T14:17:20.849-0400 I COMMAND [conn35] CMD: drop db73.tmp.mr.coll73_293 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.849-0400 m31202| 2015-07-09T14:17:20.849-0400 I COMMAND [repl writer worker 7] CMD: drop db73.tmp.mrs.coll73_1436465840_132 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.852-0400 m31100| 2015-07-09T14:17:20.851-0400 I COMMAND [conn36] CMD: drop db73.tmp.mrs.coll73_1436465840_152 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.856-0400 m31100| 2015-07-09T14:17:20.856-0400 I COMMAND [conn182] CMD: drop db73.tmp.mr.coll73_469 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.858-0400 m31100| 2015-07-09T14:17:20.858-0400 I COMMAND [conn38] CMD: drop db73.tmp.mrs.coll73_1436465840_133 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.868-0400 m31200| 2015-07-09T14:17:20.867-0400 I COMMAND [conn65] CMD: drop db73.tmp.mrs.coll73_1436465840_152 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.868-0400 m31200| 2015-07-09T14:17:20.868-0400 I COMMAND [conn18] CMD: drop db73.tmp.mrs.coll73_1436465840_133 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.870-0400 m31101| 2015-07-09T14:17:20.869-0400 I COMMAND [repl writer worker 10] CMD: drop db73.tmp.mrs.coll73_1436465840_152 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.872-0400 m31101| 2015-07-09T14:17:20.871-0400 I COMMAND [repl writer worker 2] CMD: drop db73.tmp.mrs.coll73_1436465840_133 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.886-0400 m31102| 2015-07-09T14:17:20.885-0400 I COMMAND [repl writer worker 1] CMD: drop db73.tmp.mrs.coll73_1436465840_132 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.889-0400 m31102| 2015-07-09T14:17:20.888-0400 I COMMAND [repl writer worker 15] CMD: drop db73.tmp.mrs.coll73_1436465840_152 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.892-0400 m31200| 2015-07-09T14:17:20.892-0400 I COMMAND [conn60] CMD: drop db73.tmp.mr.coll73_294 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.893-0400 m31200| 2015-07-09T14:17:20.892-0400 I COMMAND [conn32] CMD: drop db73.tmp.mr.coll73_295 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.900-0400 m31100| 2015-07-09T14:17:20.899-0400 I COMMAND [conn45] CMD: drop db73.tmp.mr.coll73_471 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.901-0400 m31102| 2015-07-09T14:17:20.901-0400 I COMMAND [repl writer worker 5] CMD: drop db73.tmp.mrs.coll73_1436465840_133 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.905-0400 m31202| 2015-07-09T14:17:20.905-0400 I COMMAND [repl writer worker 8] CMD: drop db73.tmp.mrs.coll73_1436465840_152 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.906-0400 m31100| 2015-07-09T14:17:20.906-0400 I COMMAND [conn185] CMD: drop db73.tmp.mr.coll73_470 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.907-0400 m31202| 2015-07-09T14:17:20.907-0400 I COMMAND [repl writer worker 0] CMD: drop db73.tmp.mrs.coll73_1436465840_133 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.909-0400 m31201| 2015-07-09T14:17:20.908-0400 I COMMAND [repl writer worker 9] CMD: drop db73.tmp.mrs.coll73_1436465840_152 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:20.911-0400 m31201| 2015-07-09T14:17:20.910-0400 I COMMAND [repl writer worker 5] CMD: drop db73.tmp.mrs.coll73_1436465840_133 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.194-0400 m31200| 2015-07-09T14:17:21.194-0400 I COMMAND [conn35] CMD: drop db73.tmp.mrs.coll73_1436465840_135 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.198-0400 m31200| 2015-07-09T14:17:21.197-0400 I COMMAND [conn35] CMD: drop db73.tmp.mr.coll73_293 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.198-0400 m31200| 2015-07-09T14:17:21.197-0400 I COMMAND [conn35] CMD: drop db73.tmp.mr.coll73_293 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.202-0400 m31200| 2015-07-09T14:17:21.202-0400 I COMMAND [conn35] CMD: drop db73.tmp.mr.coll73_293 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.207-0400 m31200| 2015-07-09T14:17:21.206-0400 I COMMAND [conn35] command db73.tmp.mrs.coll73_1436465840_135 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.207-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.207-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.208-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.208-0400 m31200| values...., out: "tmp.mrs.coll73_1436465840_135", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:213 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 70 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 1, w: 8, R: 7, W: 4 }, timeAcquiringMicros: { r: 9003, w: 50309, R: 73133, W: 6760 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 357ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.208-0400 m31100| 2015-07-09T14:17:21.207-0400 I COMMAND [conn175] CMD: drop db73.tmp.mrs.coll73_1436465840_134 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.215-0400 m31100| 2015-07-09T14:17:21.215-0400 I COMMAND [conn175] CMD: drop db73.tmp.mr.coll73_467 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.216-0400 m31100| 2015-07-09T14:17:21.215-0400 I COMMAND [conn175] CMD: drop db73.tmp.mr.coll73_467 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.218-0400 m31100| 2015-07-09T14:17:21.218-0400 I COMMAND [conn175] CMD: drop db73.tmp.mr.coll73_467 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.223-0400 m31200| 2015-07-09T14:17:21.223-0400 I COMMAND [conn32] CMD: drop db73.tmp.mrs.coll73_1436465840_136 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.229-0400 m31100| 2015-07-09T14:17:21.228-0400 I COMMAND [conn175] command db73.tmp.mrs.coll73_1436465840_134 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.229-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.229-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.230-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.231-0400 m31100| values...., out: "tmp.mrs.coll73_1436465840_134", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:213 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 2, W: 1 }, timeAcquiringMicros: { r: 6895, W: 1884 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 1, w: 30, R: 9, W: 7 }, timeAcquiringMicros: { r: 832, w: 361663, R: 18115, W: 25835 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 689ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.233-0400 m31200| 2015-07-09T14:17:21.232-0400 I COMMAND [conn32] CMD: drop db73.tmp.mr.coll73_295 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.233-0400 m31200| 2015-07-09T14:17:21.232-0400 I COMMAND [conn32] CMD: drop db73.tmp.mr.coll73_295 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.234-0400 m31200| 2015-07-09T14:17:21.234-0400 I COMMAND [conn32] CMD: drop db73.tmp.mr.coll73_295 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.239-0400 m31100| 2015-07-09T14:17:21.239-0400 I COMMAND [conn49] CMD: drop db73.tmp.mrs.coll73_1436465840_153 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.240-0400 m31200| 2015-07-09T14:17:21.240-0400 I COMMAND [conn32] command db73.tmp.mrs.coll73_1436465840_136 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.241-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.241-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.241-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.241-0400 m31200| values...., out: "tmp.mrs.coll73_1436465840_136", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:213 locks:{ Global: { acquireCount: { r: 157, w: 74, W: 3 }, acquireWaitCount: { w: 1, W: 1 }, timeAcquiringMicros: { w: 4118, W: 532 } }, Database: { acquireCount: { r: 26, w: 66, R: 14, W: 11 }, acquireWaitCount: { r: 2, w: 5, R: 5, W: 7 }, timeAcquiringMicros: { r: 1133, w: 33405, R: 13705, W: 38199 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 348ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.242-0400 m31200| 2015-07-09T14:17:21.242-0400 I COMMAND [conn60] CMD: drop db73.tmp.mrs.coll73_1436465840_154 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.248-0400 m31200| 2015-07-09T14:17:21.247-0400 I COMMAND [conn60] CMD: drop db73.tmp.mr.coll73_294 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.250-0400 m31200| 2015-07-09T14:17:21.247-0400 I COMMAND [conn60] CMD: drop db73.tmp.mr.coll73_294 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.250-0400 m31200| 2015-07-09T14:17:21.249-0400 I COMMAND [conn60] CMD: drop db73.tmp.mr.coll73_294 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.251-0400 m31200| 2015-07-09T14:17:21.249-0400 I COMMAND [conn60] command db73.tmp.mrs.coll73_1436465840_154 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.251-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.251-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.251-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.252-0400 m31200| values...., out: "tmp.mrs.coll73_1436465840_154", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:213 locks:{ Global: { acquireCount: { r: 157, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 1 }, timeAcquiringMicros: { r: 10779, w: 3912 } }, Database: { acquireCount: { r: 26, w: 66, R: 14, W: 11 }, acquireWaitCount: { r: 3, w: 7, R: 7, W: 5 }, timeAcquiringMicros: { r: 11479, w: 22005, R: 30995, W: 43529 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 364ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.255-0400 m31100| 2015-07-09T14:17:21.254-0400 I COMMAND [conn49] CMD: drop db73.tmp.mr.coll73_468 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.256-0400 m31100| 2015-07-09T14:17:21.254-0400 I COMMAND [conn49] CMD: drop db73.tmp.mr.coll73_468 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.259-0400 m31100| 2015-07-09T14:17:21.258-0400 I COMMAND [conn38] CMD: drop db73.tmp.mrs.coll73_1436465840_134 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.259-0400 m31100| 2015-07-09T14:17:21.259-0400 I COMMAND [conn49] CMD: drop db73.tmp.mr.coll73_468 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.262-0400 m31100| 2015-07-09T14:17:21.262-0400 I COMMAND [conn49] command db73.tmp.mrs.coll73_1436465840_153 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.262-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.263-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.263-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.263-0400 m31100| values...., out: "tmp.mrs.coll73_1436465840_153", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:213 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 2, W: 1 }, timeAcquiringMicros: { r: 14743, W: 1107 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 3, w: 21, R: 11, W: 9 }, timeAcquiringMicros: { r: 26208, w: 285134, R: 32260, W: 42279 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 666ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.264-0400 m31200| 2015-07-09T14:17:21.264-0400 I COMMAND [conn18] CMD: drop db73.tmp.mrs.coll73_1436465840_134 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.265-0400 m31100| 2015-07-09T14:17:21.265-0400 I COMMAND [conn182] CMD: drop db73.tmp.mrs.coll73_1436465840_135 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.268-0400 m31101| 2015-07-09T14:17:21.268-0400 I COMMAND [repl writer worker 8] CMD: drop db73.tmp.mrs.coll73_1436465840_134 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.270-0400 m31102| 2015-07-09T14:17:21.270-0400 I COMMAND [repl writer worker 4] CMD: drop db73.tmp.mrs.coll73_1436465840_134 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.272-0400 m31202| 2015-07-09T14:17:21.272-0400 I COMMAND [repl writer worker 6] CMD: drop db73.tmp.mrs.coll73_1436465840_134 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.273-0400 m31100| 2015-07-09T14:17:21.273-0400 I COMMAND [conn182] CMD: drop db73.tmp.mr.coll73_469 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.274-0400 m31100| 2015-07-09T14:17:21.273-0400 I COMMAND [conn182] CMD: drop db73.tmp.mr.coll73_469 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.274-0400 m31200| 2015-07-09T14:17:21.274-0400 I COMMAND [conn137] CMD: drop db73.tmp.mr.coll73_296 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.275-0400 m31201| 2015-07-09T14:17:21.274-0400 I COMMAND [repl writer worker 1] CMD: drop db73.tmp.mrs.coll73_1436465840_134 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.275-0400 m31100| 2015-07-09T14:17:21.275-0400 I COMMAND [conn182] CMD: drop db73.tmp.mr.coll73_469 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.276-0400 m31100| 2015-07-09T14:17:21.276-0400 I COMMAND [conn175] CMD: drop db73.tmp.mr.coll73_472 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.278-0400 m31100| 2015-07-09T14:17:21.278-0400 I COMMAND [conn182] command db73.tmp.mrs.coll73_1436465840_135 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.278-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.278-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.278-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.279-0400 m31100| values...., out: "tmp.mrs.coll73_1436465840_135", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:213 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 2, W: 1 }, timeAcquiringMicros: { r: 27308, W: 46 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 6, w: 9, R: 11, W: 7 }, timeAcquiringMicros: { r: 23861, w: 40406, R: 112754, W: 23283 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 429ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.293-0400 m31100| 2015-07-09T14:17:21.292-0400 I COMMAND [conn36] CMD: drop db73.tmp.mrs.coll73_1436465840_153 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.298-0400 m31200| 2015-07-09T14:17:21.297-0400 I COMMAND [conn65] CMD: drop db73.tmp.mrs.coll73_1436465840_153 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.306-0400 m31102| 2015-07-09T14:17:21.305-0400 I COMMAND [repl writer worker 15] CMD: drop db73.tmp.mrs.coll73_1436465840_153 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.308-0400 m31101| 2015-07-09T14:17:21.307-0400 I COMMAND [repl writer worker 7] CMD: drop db73.tmp.mrs.coll73_1436465840_153 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.313-0400 m31200| 2015-07-09T14:17:21.312-0400 I COMMAND [conn38] CMD: drop db73.tmp.mr.coll73_297 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.314-0400 m31100| 2015-07-09T14:17:21.313-0400 I COMMAND [conn38] CMD: drop db73.tmp.mrs.coll73_1436465840_135 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.331-0400 m31201| 2015-07-09T14:17:21.330-0400 I COMMAND [repl writer worker 7] CMD: drop db73.tmp.mrs.coll73_1436465840_153 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.337-0400 m31200| 2015-07-09T14:17:21.336-0400 I COMMAND [conn18] CMD: drop db73.tmp.mrs.coll73_1436465840_135 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.339-0400 m31100| 2015-07-09T14:17:21.338-0400 I COMMAND [conn49] CMD: drop db73.tmp.mr.coll73_473 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.341-0400 m31202| 2015-07-09T14:17:21.340-0400 I COMMAND [repl writer worker 15] CMD: drop db73.tmp.mrs.coll73_1436465840_153 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.347-0400 m31102| 2015-07-09T14:17:21.346-0400 I COMMAND [repl writer worker 8] CMD: drop db73.tmp.mrs.coll73_1436465840_135 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.351-0400 m31101| 2015-07-09T14:17:21.350-0400 I COMMAND [repl writer worker 8] CMD: drop db73.tmp.mrs.coll73_1436465840_135 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.354-0400 m31201| 2015-07-09T14:17:21.353-0400 I COMMAND [repl writer worker 13] CMD: drop db73.tmp.mrs.coll73_1436465840_135 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.355-0400 m31202| 2015-07-09T14:17:21.354-0400 I COMMAND [repl writer worker 10] CMD: drop db73.tmp.mrs.coll73_1436465840_135 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.357-0400 m31100| 2015-07-09T14:17:21.357-0400 I COMMAND [conn182] CMD: drop db73.tmp.mr.coll73_474 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.363-0400 m31200| 2015-07-09T14:17:21.362-0400 I COMMAND [conn35] CMD: drop db73.tmp.mr.coll73_298 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.533-0400 m31100| 2015-07-09T14:17:21.532-0400 I COMMAND [conn45] CMD: drop db73.tmp.mrs.coll73_1436465840_136 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.538-0400 m31100| 2015-07-09T14:17:21.537-0400 I COMMAND [conn45] CMD: drop db73.tmp.mr.coll73_471 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.538-0400 m31100| 2015-07-09T14:17:21.538-0400 I COMMAND [conn45] CMD: drop db73.tmp.mr.coll73_471 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.580-0400 m31200| 2015-07-09T14:17:21.579-0400 I COMMAND [conn137] CMD: drop db73.tmp.mrs.coll73_1436465841_137 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.580-0400 m31100| 2015-07-09T14:17:21.580-0400 I COMMAND [conn45] CMD: drop db73.tmp.mr.coll73_471 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.588-0400 m31200| 2015-07-09T14:17:21.588-0400 I COMMAND [conn137] CMD: drop db73.tmp.mr.coll73_296 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.589-0400 m31200| 2015-07-09T14:17:21.588-0400 I COMMAND [conn137] CMD: drop db73.tmp.mr.coll73_296 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.589-0400 m31200| 2015-07-09T14:17:21.589-0400 I COMMAND [conn137] CMD: drop db73.tmp.mr.coll73_296 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.594-0400 m31100| 2015-07-09T14:17:21.593-0400 I COMMAND [conn45] command db73.tmp.mrs.coll73_1436465840_136 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.594-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.594-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.594-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.595-0400 m31100| values...., out: "tmp.mrs.coll73_1436465840_136", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:213 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 2, W: 1 }, timeAcquiringMicros: { r: 8924, w: 25084, W: 10679 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 16, w: 27, R: 12, W: 9 }, timeAcquiringMicros: { r: 123037, w: 167290, R: 56571, W: 86535 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 698ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.612-0400 m31200| 2015-07-09T14:17:21.612-0400 I COMMAND [conn137] command db73.tmp.mrs.coll73_1436465841_137 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.612-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.612-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.612-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.613-0400 m31200| values...., out: "tmp.mrs.coll73_1436465841_137", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:213 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 1, w: 6, R: 10, W: 3 }, timeAcquiringMicros: { r: 2058, w: 55758, R: 61939, W: 22404 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 337ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.624-0400 m31100| 2015-07-09T14:17:21.624-0400 I COMMAND [conn38] CMD: drop db73.tmp.mrs.coll73_1436465840_136 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.635-0400 m31200| 2015-07-09T14:17:21.635-0400 I COMMAND [conn18] CMD: drop db73.tmp.mrs.coll73_1436465840_136 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.637-0400 m31101| 2015-07-09T14:17:21.636-0400 I COMMAND [repl writer worker 5] CMD: drop db73.tmp.mrs.coll73_1436465840_136 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.641-0400 m31200| 2015-07-09T14:17:21.641-0400 I COMMAND [conn32] CMD: drop db73.tmp.mr.coll73_299 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.642-0400 m31102| 2015-07-09T14:17:21.642-0400 I COMMAND [repl writer worker 6] CMD: drop db73.tmp.mrs.coll73_1436465840_136 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.644-0400 m31202| 2015-07-09T14:17:21.643-0400 I COMMAND [repl writer worker 10] CMD: drop db73.tmp.mrs.coll73_1436465840_136 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.645-0400 m31201| 2015-07-09T14:17:21.645-0400 I COMMAND [repl writer worker 10] CMD: drop db73.tmp.mrs.coll73_1436465840_136 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.653-0400 m31100| 2015-07-09T14:17:21.652-0400 I COMMAND [conn45] CMD: drop db73.tmp.mr.coll73_475 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.660-0400 m31100| 2015-07-09T14:17:21.660-0400 I COMMAND [conn185] CMD: drop db73.tmp.mrs.coll73_1436465840_154 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.667-0400 m31100| 2015-07-09T14:17:21.665-0400 I COMMAND [conn185] CMD: drop db73.tmp.mr.coll73_470 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.667-0400 m31100| 2015-07-09T14:17:21.665-0400 I COMMAND [conn185] CMD: drop db73.tmp.mr.coll73_470 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.668-0400 m31100| 2015-07-09T14:17:21.668-0400 I COMMAND [conn185] CMD: drop db73.tmp.mr.coll73_470 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.698-0400 m31100| 2015-07-09T14:17:21.698-0400 I COMMAND [conn185] command db73.tmp.mrs.coll73_1436465840_154 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.699-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.699-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.699-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.700-0400 m31100| values...., out: "tmp.mrs.coll73_1436465840_154", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:213 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 2, W: 1 }, timeAcquiringMicros: { r: 16173, w: 25385, W: 25777 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 13, w: 35, R: 12, W: 9 }, timeAcquiringMicros: { r: 103489, w: 272275, R: 53132, W: 87536 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 813ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.707-0400 m31200| 2015-07-09T14:17:21.706-0400 I COMMAND [conn38] CMD: drop db73.tmp.mrs.coll73_1436465841_155 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.715-0400 m31200| 2015-07-09T14:17:21.715-0400 I COMMAND [conn38] CMD: drop db73.tmp.mr.coll73_297 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.716-0400 m31200| 2015-07-09T14:17:21.715-0400 I COMMAND [conn38] CMD: drop db73.tmp.mr.coll73_297 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.717-0400 m31200| 2015-07-09T14:17:21.716-0400 I COMMAND [conn38] CMD: drop db73.tmp.mr.coll73_297 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.717-0400 m31200| 2015-07-09T14:17:21.716-0400 I COMMAND [conn38] command db73.tmp.mrs.coll73_1436465841_155 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.718-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.718-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.718-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.719-0400 m31200| values...., out: "tmp.mrs.coll73_1436465841_155", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:213 locks:{ Global: { acquireCount: { r: 157, w: 74, W: 3 }, acquireWaitCount: { r: 1, W: 1 }, timeAcquiringMicros: { r: 1186, W: 4135 } }, Database: { acquireCount: { r: 26, w: 66, R: 14, W: 11 }, acquireWaitCount: { r: 4, w: 11, R: 10, W: 4 }, timeAcquiringMicros: { r: 3237, w: 88848, R: 57821, W: 17173 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 404ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.746-0400 m31100| 2015-07-09T14:17:21.746-0400 I COMMAND [conn36] CMD: drop db73.tmp.mrs.coll73_1436465840_154 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.751-0400 m31200| 2015-07-09T14:17:21.751-0400 I COMMAND [conn35] CMD: drop db73.tmp.mrs.coll73_1436465841_138 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.754-0400 m31200| 2015-07-09T14:17:21.754-0400 I COMMAND [conn65] CMD: drop db73.tmp.mrs.coll73_1436465840_154 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.755-0400 m31102| 2015-07-09T14:17:21.755-0400 I COMMAND [repl writer worker 11] CMD: drop db73.tmp.mrs.coll73_1436465840_154 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.756-0400 m31101| 2015-07-09T14:17:21.755-0400 I COMMAND [repl writer worker 12] CMD: drop db73.tmp.mrs.coll73_1436465840_154 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.764-0400 m31200| 2015-07-09T14:17:21.764-0400 I COMMAND [conn35] CMD: drop db73.tmp.mr.coll73_298 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.766-0400 m31200| 2015-07-09T14:17:21.765-0400 I COMMAND [conn35] CMD: drop db73.tmp.mr.coll73_298 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.778-0400 m31100| 2015-07-09T14:17:21.778-0400 I COMMAND [conn185] CMD: drop db73.tmp.mr.coll73_476 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.790-0400 m31200| 2015-07-09T14:17:21.790-0400 I COMMAND [conn35] CMD: drop db73.tmp.mr.coll73_298 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.791-0400 m31200| 2015-07-09T14:17:21.791-0400 I COMMAND [conn60] CMD: drop db73.tmp.mr.coll73_300 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.810-0400 m31200| 2015-07-09T14:17:21.810-0400 I COMMAND [conn35] command db73.tmp.mrs.coll73_1436465841_138 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.811-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.811-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.811-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.813-0400 m31200| values...., out: "tmp.mrs.coll73_1436465841_138", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:213 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 1 }, timeAcquiringMicros: { r: 1863, w: 15154 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { w: 12, R: 10, W: 9 }, timeAcquiringMicros: { w: 107134, R: 27016, W: 91404 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 455ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.824-0400 m31202| 2015-07-09T14:17:21.823-0400 I COMMAND [repl writer worker 7] CMD: drop db73.tmp.mrs.coll73_1436465840_154 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.824-0400 m31201| 2015-07-09T14:17:21.823-0400 I COMMAND [repl writer worker 1] CMD: drop db73.tmp.mrs.coll73_1436465840_154 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.883-0400 m31100| 2015-07-09T14:17:21.882-0400 I COMMAND [conn175] CMD: drop db73.tmp.mrs.coll73_1436465841_137 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.888-0400 m31100| 2015-07-09T14:17:21.888-0400 I COMMAND [conn175] CMD: drop db73.tmp.mr.coll73_472 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.889-0400 m31100| 2015-07-09T14:17:21.888-0400 I COMMAND [conn175] CMD: drop db73.tmp.mr.coll73_472 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.906-0400 m31100| 2015-07-09T14:17:21.905-0400 I COMMAND [conn175] CMD: drop db73.tmp.mr.coll73_472 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.919-0400 m31200| 2015-07-09T14:17:21.919-0400 I COMMAND [conn32] CMD: drop db73.tmp.mrs.coll73_1436465841_139 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.926-0400 m31100| 2015-07-09T14:17:21.926-0400 I COMMAND [conn175] command db73.tmp.mrs.coll73_1436465841_137 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.926-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.926-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.927-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.927-0400 m31100| values...., out: "tmp.mrs.coll73_1436465841_137", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:213 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 2, W: 1 }, timeAcquiringMicros: { r: 14892, w: 5583, W: 2587 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 4, w: 28, R: 13, W: 9 }, timeAcquiringMicros: { r: 11689, w: 238908, R: 120865, W: 39283 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 651ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.927-0400 m31200| 2015-07-09T14:17:21.926-0400 I COMMAND [conn32] CMD: drop db73.tmp.mr.coll73_299 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.927-0400 m31200| 2015-07-09T14:17:21.926-0400 I COMMAND [conn32] CMD: drop db73.tmp.mr.coll73_299 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.935-0400 m31200| 2015-07-09T14:17:21.935-0400 I COMMAND [conn32] CMD: drop db73.tmp.mr.coll73_299 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.939-0400 m31200| 2015-07-09T14:17:21.938-0400 I COMMAND [conn32] command db73.tmp.mrs.coll73_1436465841_139 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.939-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.939-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.939-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.940-0400 m31200| values...., out: "tmp.mrs.coll73_1436465841_139", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:213 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 2 }, timeAcquiringMicros: { r: 15434 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { w: 6, R: 11, W: 7 }, timeAcquiringMicros: { w: 37281, R: 28180, W: 12342 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 297ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.970-0400 m31100| 2015-07-09T14:17:21.970-0400 I COMMAND [conn38] CMD: drop db73.tmp.mrs.coll73_1436465841_137 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.972-0400 m31100| 2015-07-09T14:17:21.972-0400 I COMMAND [conn49] CMD: drop db73.tmp.mrs.coll73_1436465841_155 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.975-0400 m31200| 2015-07-09T14:17:21.975-0400 I COMMAND [conn60] CMD: drop db73.tmp.mrs.coll73_1436465841_156 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.980-0400 m31100| 2015-07-09T14:17:21.980-0400 I COMMAND [conn49] CMD: drop db73.tmp.mr.coll73_473 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.980-0400 m31100| 2015-07-09T14:17:21.980-0400 I COMMAND [conn49] CMD: drop db73.tmp.mr.coll73_473 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.981-0400 m31200| 2015-07-09T14:17:21.981-0400 I COMMAND [conn18] CMD: drop db73.tmp.mrs.coll73_1436465841_137 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.982-0400 m31200| 2015-07-09T14:17:21.982-0400 I COMMAND [conn60] CMD: drop db73.tmp.mr.coll73_300 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.982-0400 m31200| 2015-07-09T14:17:21.982-0400 I COMMAND [conn60] CMD: drop db73.tmp.mr.coll73_300 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.987-0400 m31100| 2015-07-09T14:17:21.987-0400 I COMMAND [conn49] CMD: drop db73.tmp.mr.coll73_473 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:21.987-0400 m31100| 2015-07-09T14:17:21.987-0400 I COMMAND [conn182] CMD: drop db73.tmp.mrs.coll73_1436465841_138 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.001-0400 m31100| 2015-07-09T14:17:22.000-0400 I COMMAND [conn182] CMD: drop db73.tmp.mr.coll73_474 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.001-0400 m31100| 2015-07-09T14:17:22.000-0400 I COMMAND [conn182] CMD: drop db73.tmp.mr.coll73_474 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.001-0400 m31102| 2015-07-09T14:17:22.001-0400 I COMMAND [repl writer worker 4] CMD: drop db73.tmp.mrs.coll73_1436465841_137 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.001-0400 m31200| 2015-07-09T14:17:22.001-0400 I COMMAND [conn60] CMD: drop db73.tmp.mr.coll73_300 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.002-0400 m31100| 2015-07-09T14:17:22.001-0400 I COMMAND [conn182] CMD: drop db73.tmp.mr.coll73_474 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.002-0400 m31101| 2015-07-09T14:17:22.001-0400 I COMMAND [repl writer worker 11] CMD: drop db73.tmp.mrs.coll73_1436465841_137 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.003-0400 m31200| 2015-07-09T14:17:22.002-0400 I COMMAND [conn137] CMD: drop db73.tmp.mr.coll73_301 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.004-0400 m31200| 2015-07-09T14:17:22.003-0400 I COMMAND [conn60] command db73.tmp.mrs.coll73_1436465841_156 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.004-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.004-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.004-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.004-0400 m31200| values...., out: "tmp.mrs.coll73_1436465841_156", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:213 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 2642 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 1, w: 1, R: 8, W: 4 }, timeAcquiringMicros: { r: 6402, w: 63, R: 19576, W: 21950 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 233ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.007-0400 m31100| 2015-07-09T14:17:22.007-0400 I COMMAND [conn182] command db73.tmp.mrs.coll73_1436465841_138 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.007-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.007-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.007-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.008-0400 m31100| values...., out: "tmp.mrs.coll73_1436465841_138", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:213 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { r: 4, w: 1, W: 1 }, timeAcquiringMicros: { r: 28657, w: 160, W: 4879 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 6, w: 31, R: 11, W: 9 }, timeAcquiringMicros: { r: 5074, w: 213262, R: 49798, W: 67687 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 651ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.011-0400 m31100| 2015-07-09T14:17:22.010-0400 I COMMAND [conn45] CMD: drop db73.tmp.mrs.coll73_1436465841_139 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.014-0400 m31100| 2015-07-09T14:17:22.013-0400 I COMMAND [conn49] command db73.tmp.mrs.coll73_1436465841_155 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.015-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.015-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.015-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.016-0400 m31100| values...., out: "tmp.mrs.coll73_1436465841_155", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:213 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 4, W: 1 }, timeAcquiringMicros: { r: 11569, w: 32295, W: 16016 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 8, w: 30, R: 12, W: 9 }, timeAcquiringMicros: { r: 33126, w: 218749, R: 60311, W: 66545 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 700ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.021-0400 m31100| 2015-07-09T14:17:22.021-0400 I COMMAND [conn45] CMD: drop db73.tmp.mr.coll73_475 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.022-0400 m31100| 2015-07-09T14:17:22.021-0400 I COMMAND [conn45] CMD: drop db73.tmp.mr.coll73_475 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.024-0400 m31100| 2015-07-09T14:17:22.023-0400 I COMMAND [conn45] CMD: drop db73.tmp.mr.coll73_475 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.038-0400 m31100| 2015-07-09T14:17:22.037-0400 I COMMAND [conn175] CMD: drop db73.tmp.mr.coll73_477 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.047-0400 m31100| 2015-07-09T14:17:22.046-0400 I COMMAND [conn45] command db73.tmp.mrs.coll73_1436465841_139 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.047-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.047-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.047-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.049-0400 m31100| values...., out: "tmp.mrs.coll73_1436465841_139", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:213 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 5, w: 4 }, timeAcquiringMicros: { r: 48106, w: 19312 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 3, w: 11, R: 10, W: 6 }, timeAcquiringMicros: { r: 1917, w: 68496, R: 28414, W: 9591 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 405ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.052-0400 m31201| 2015-07-09T14:17:22.052-0400 I COMMAND [repl writer worker 3] CMD: drop db73.tmp.mrs.coll73_1436465841_137 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.055-0400 m31100| 2015-07-09T14:17:22.055-0400 I COMMAND [conn38] CMD: drop db73.tmp.mrs.coll73_1436465841_138 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.064-0400 m31200| 2015-07-09T14:17:22.064-0400 I COMMAND [conn18] CMD: drop db73.tmp.mrs.coll73_1436465841_138 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.066-0400 m31202| 2015-07-09T14:17:22.066-0400 I COMMAND [repl writer worker 9] CMD: drop db73.tmp.mrs.coll73_1436465841_137 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.067-0400 m31101| 2015-07-09T14:17:22.067-0400 I COMMAND [repl writer worker 1] CMD: drop db73.tmp.mrs.coll73_1436465841_138 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.073-0400 m31200| 2015-07-09T14:17:22.072-0400 I COMMAND [conn35] CMD: drop db73.tmp.mr.coll73_302 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.075-0400 m31201| 2015-07-09T14:17:22.074-0400 I COMMAND [repl writer worker 7] CMD: drop db73.tmp.mrs.coll73_1436465841_138 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.084-0400 m31100| 2015-07-09T14:17:22.084-0400 I COMMAND [conn36] CMD: drop db73.tmp.mrs.coll73_1436465841_155 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.090-0400 m31100| 2015-07-09T14:17:22.090-0400 I COMMAND [conn182] CMD: drop db73.tmp.mr.coll73_478 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.097-0400 m31202| 2015-07-09T14:17:22.096-0400 I COMMAND [repl writer worker 10] CMD: drop db73.tmp.mrs.coll73_1436465841_138 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.103-0400 m31200| 2015-07-09T14:17:22.103-0400 I COMMAND [conn65] CMD: drop db73.tmp.mrs.coll73_1436465841_155 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.108-0400 m31101| 2015-07-09T14:17:22.107-0400 I COMMAND [repl writer worker 10] CMD: drop db73.tmp.mrs.coll73_1436465841_155 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.114-0400 m31102| 2015-07-09T14:17:22.114-0400 I COMMAND [repl writer worker 5] CMD: drop db73.tmp.mrs.coll73_1436465841_138 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.115-0400 m31201| 2015-07-09T14:17:22.114-0400 I COMMAND [repl writer worker 15] CMD: drop db73.tmp.mrs.coll73_1436465841_155 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.115-0400 m31202| 2015-07-09T14:17:22.115-0400 I COMMAND [repl writer worker 6] CMD: drop db73.tmp.mrs.coll73_1436465841_155 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.118-0400 m31200| 2015-07-09T14:17:22.117-0400 I COMMAND [conn38] CMD: drop db73.tmp.mr.coll73_303 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.122-0400 m31100| 2015-07-09T14:17:22.122-0400 I COMMAND [conn38] CMD: drop db73.tmp.mrs.coll73_1436465841_139 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.129-0400 m31200| 2015-07-09T14:17:22.128-0400 I COMMAND [conn18] CMD: drop db73.tmp.mrs.coll73_1436465841_139 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.142-0400 m31102| 2015-07-09T14:17:22.141-0400 I COMMAND [repl writer worker 15] CMD: drop db73.tmp.mrs.coll73_1436465841_155 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.145-0400 m31101| 2015-07-09T14:17:22.145-0400 I COMMAND [repl writer worker 11] CMD: drop db73.tmp.mrs.coll73_1436465841_139 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.153-0400 m31100| 2015-07-09T14:17:22.153-0400 I COMMAND [conn49] CMD: drop db73.tmp.mr.coll73_479 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.154-0400 m31102| 2015-07-09T14:17:22.154-0400 I COMMAND [repl writer worker 0] CMD: drop db73.tmp.mrs.coll73_1436465841_139 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.154-0400 m31200| 2015-07-09T14:17:22.154-0400 I COMMAND [conn32] CMD: drop db73.tmp.mr.coll73_304 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.158-0400 m31201| 2015-07-09T14:17:22.158-0400 I COMMAND [repl writer worker 10] CMD: drop db73.tmp.mrs.coll73_1436465841_139 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.160-0400 m31202| 2015-07-09T14:17:22.159-0400 I COMMAND [repl writer worker 13] CMD: drop db73.tmp.mrs.coll73_1436465841_139 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.166-0400 m31100| 2015-07-09T14:17:22.166-0400 I COMMAND [conn45] CMD: drop db73.tmp.mr.coll73_480 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.369-0400 m31100| 2015-07-09T14:17:22.369-0400 I COMMAND [conn185] CMD: drop db73.tmp.mrs.coll73_1436465841_156 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.375-0400 m31100| 2015-07-09T14:17:22.374-0400 I COMMAND [conn185] CMD: drop db73.tmp.mr.coll73_476 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.378-0400 m31100| 2015-07-09T14:17:22.377-0400 I COMMAND [conn185] CMD: drop db73.tmp.mr.coll73_476 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.410-0400 m31100| 2015-07-09T14:17:22.410-0400 I COMMAND [conn185] CMD: drop db73.tmp.mr.coll73_476 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.413-0400 m31100| 2015-07-09T14:17:22.413-0400 I COMMAND [conn185] command db73.tmp.mrs.coll73_1436465841_156 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.413-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.413-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.413-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.414-0400 m31100| values...., out: "tmp.mrs.coll73_1436465841_156", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:213 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 3, w: 1, W: 1 }, timeAcquiringMicros: { r: 21519, w: 5370, W: 8630 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 11, w: 20, R: 11, W: 9 }, timeAcquiringMicros: { r: 96389, w: 227353, R: 41257, W: 55603 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 643ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.425-0400 m31100| 2015-07-09T14:17:22.425-0400 I COMMAND [conn175] CMD: drop db73.tmp.mrs.coll73_1436465842_140 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.443-0400 m31100| 2015-07-09T14:17:22.443-0400 I COMMAND [conn175] CMD: drop db73.tmp.mr.coll73_477 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.444-0400 m31100| 2015-07-09T14:17:22.444-0400 I COMMAND [conn175] CMD: drop db73.tmp.mr.coll73_477 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.450-0400 m31100| 2015-07-09T14:17:22.449-0400 I COMMAND [conn36] CMD: drop db73.tmp.mrs.coll73_1436465841_156 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.454-0400 m31100| 2015-07-09T14:17:22.453-0400 I COMMAND [conn175] CMD: drop db73.tmp.mr.coll73_477 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.467-0400 m31200| 2015-07-09T14:17:22.467-0400 I COMMAND [conn65] CMD: drop db73.tmp.mrs.coll73_1436465841_156 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.470-0400 m31102| 2015-07-09T14:17:22.470-0400 I COMMAND [repl writer worker 8] CMD: drop db73.tmp.mrs.coll73_1436465841_156 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.471-0400 m31101| 2015-07-09T14:17:22.471-0400 I COMMAND [repl writer worker 11] CMD: drop db73.tmp.mrs.coll73_1436465841_156 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.475-0400 m31202| 2015-07-09T14:17:22.474-0400 I COMMAND [repl writer worker 5] CMD: drop db73.tmp.mrs.coll73_1436465841_156 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.477-0400 m31201| 2015-07-09T14:17:22.477-0400 I COMMAND [repl writer worker 8] CMD: drop db73.tmp.mrs.coll73_1436465841_156 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.483-0400 m31100| 2015-07-09T14:17:22.482-0400 I COMMAND [conn175] command db73.tmp.mrs.coll73_1436465842_140 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.484-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.484-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.484-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.484-0400 m31100| values...., out: "tmp.mrs.coll73_1436465842_140", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:213 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 13816 } }, Database: { acquireCount: { r: 27, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 2, w: 8, R: 12, W: 8 }, timeAcquiringMicros: { r: 1376, w: 97206, R: 92824, W: 41507 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 481ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.484-0400 m31200| 2015-07-09T14:17:22.484-0400 I COMMAND [conn60] CMD: drop db73.tmp.mr.coll73_305 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.486-0400 m31100| 2015-07-09T14:17:22.486-0400 I COMMAND [conn185] CMD: drop db73.tmp.mr.coll73_481 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.509-0400 m31200| 2015-07-09T14:17:22.508-0400 I COMMAND [conn137] CMD: drop db73.tmp.mrs.coll73_1436465842_140 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.519-0400 m31200| 2015-07-09T14:17:22.519-0400 I COMMAND [conn137] CMD: drop db73.tmp.mr.coll73_301 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.520-0400 m31200| 2015-07-09T14:17:22.520-0400 I COMMAND [conn137] CMD: drop db73.tmp.mr.coll73_301 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.559-0400 m31200| 2015-07-09T14:17:22.558-0400 I COMMAND [conn137] CMD: drop db73.tmp.mr.coll73_301 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.580-0400 m31200| 2015-07-09T14:17:22.579-0400 I COMMAND [conn137] command db73.tmp.mrs.coll73_1436465842_140 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.580-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.580-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.580-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.581-0400 m31200| values...., out: "tmp.mrs.coll73_1436465842_140", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:213 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { W: 1 }, timeAcquiringMicros: { W: 27106 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 1, w: 21, R: 11, W: 5 }, timeAcquiringMicros: { r: 498, w: 137388, R: 82715, W: 58141 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 578ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.590-0400 m31200| 2015-07-09T14:17:22.589-0400 I COMMAND [conn38] CMD: drop db73.tmp.mrs.coll73_1436465842_157 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.602-0400 m31200| 2015-07-09T14:17:22.602-0400 I COMMAND [conn38] CMD: drop db73.tmp.mr.coll73_303 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.603-0400 m31200| 2015-07-09T14:17:22.602-0400 I COMMAND [conn38] CMD: drop db73.tmp.mr.coll73_303 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.618-0400 m31200| 2015-07-09T14:17:22.617-0400 I COMMAND [conn38] CMD: drop db73.tmp.mr.coll73_303 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.629-0400 m31100| 2015-07-09T14:17:22.628-0400 I COMMAND [conn49] CMD: drop db73.tmp.mrs.coll73_1436465842_157 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.633-0400 m31200| 2015-07-09T14:17:22.633-0400 I COMMAND [conn38] command db73.tmp.mrs.coll73_1436465842_157 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.633-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.634-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.634-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.634-0400 m31200| values...., out: "tmp.mrs.coll73_1436465842_157", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:213 locks:{ Global: { acquireCount: { r: 157, w: 74, W: 3 }, acquireWaitCount: { r: 4, w: 3, W: 1 }, timeAcquiringMicros: { r: 35130, w: 422, W: 1625 } }, Database: { acquireCount: { r: 26, w: 66, R: 14, W: 11 }, acquireWaitCount: { r: 5, w: 15, R: 13, W: 8 }, timeAcquiringMicros: { r: 10424, w: 100925, R: 58943, W: 61736 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 518ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.636-0400 m31200| 2015-07-09T14:17:22.635-0400 I COMMAND [conn35] CMD: drop db73.tmp.mrs.coll73_1436465842_141 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.636-0400 m31100| 2015-07-09T14:17:22.635-0400 I COMMAND [conn38] CMD: drop db73.tmp.mrs.coll73_1436465842_140 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.637-0400 m31100| 2015-07-09T14:17:22.636-0400 I COMMAND [conn49] CMD: drop db73.tmp.mr.coll73_479 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.637-0400 m31100| 2015-07-09T14:17:22.637-0400 I COMMAND [conn49] CMD: drop db73.tmp.mr.coll73_479 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.637-0400 m31200| 2015-07-09T14:17:22.637-0400 I COMMAND [conn18] CMD: drop db73.tmp.mrs.coll73_1436465842_140 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.647-0400 m31100| 2015-07-09T14:17:22.647-0400 I COMMAND [conn49] CMD: drop db73.tmp.mr.coll73_479 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.649-0400 m31200| 2015-07-09T14:17:22.649-0400 I COMMAND [conn35] CMD: drop db73.tmp.mr.coll73_302 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.650-0400 m31200| 2015-07-09T14:17:22.649-0400 I COMMAND [conn35] CMD: drop db73.tmp.mr.coll73_302 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.654-0400 m31102| 2015-07-09T14:17:22.654-0400 I COMMAND [repl writer worker 13] CMD: drop db73.tmp.mrs.coll73_1436465842_140 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.661-0400 m31100| 2015-07-09T14:17:22.661-0400 I COMMAND [conn49] command db73.tmp.mrs.coll73_1436465842_157 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.662-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.662-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.662-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.662-0400 m31100| values...., out: "tmp.mrs.coll73_1436465842_157", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:213 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 2, W: 1 }, timeAcquiringMicros: { r: 25576, W: 7188 } }, Database: { acquireCount: { r: 27, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 2, w: 22, R: 10, W: 9 }, timeAcquiringMicros: { r: 11114, w: 92334, R: 42033, W: 94247 } }, Collection: { acquireCount: { r: 27, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 546ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.671-0400 m31100| 2015-07-09T14:17:22.663-0400 I COMMAND [conn175] CMD: drop db73.tmp.mr.coll73_482 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.671-0400 m31200| 2015-07-09T14:17:22.664-0400 I COMMAND [conn35] CMD: drop db73.tmp.mr.coll73_302 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.672-0400 m31101| 2015-07-09T14:17:22.664-0400 I COMMAND [repl writer worker 14] CMD: drop db73.tmp.mrs.coll73_1436465842_140 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.672-0400 m31200| 2015-07-09T14:17:22.665-0400 I COMMAND [conn137] CMD: drop db73.tmp.mr.coll73_306 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.672-0400 m31201| 2015-07-09T14:17:22.672-0400 I COMMAND [repl writer worker 8] CMD: drop db73.tmp.mrs.coll73_1436465842_140 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.673-0400 m31202| 2015-07-09T14:17:22.672-0400 I COMMAND [repl writer worker 11] CMD: drop db73.tmp.mrs.coll73_1436465842_140 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.678-0400 m31200| 2015-07-09T14:17:22.677-0400 I COMMAND [conn35] command db73.tmp.mrs.coll73_1436465842_141 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.678-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.678-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.678-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.679-0400 m31200| values...., out: "tmp.mrs.coll73_1436465842_141", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:4 reslen:213 locks:{ Global: { acquireCount: { r: 161, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 1, W: 1 }, timeAcquiringMicros: { r: 17270, w: 11118, W: 564 } }, Database: { acquireCount: { r: 26, w: 66, R: 16, W: 11 }, acquireWaitCount: { r: 10, w: 18, R: 15, W: 6 }, timeAcquiringMicros: { r: 57261, w: 98668, R: 91268, W: 44153 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 610ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.688-0400 m31100| 2015-07-09T14:17:22.687-0400 I COMMAND [conn36] CMD: drop db73.tmp.mrs.coll73_1436465842_157 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.705-0400 m31100| 2015-07-09T14:17:22.704-0400 I COMMAND [conn182] CMD: drop db73.tmp.mrs.coll73_1436465842_141 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.705-0400 m31200| 2015-07-09T14:17:22.704-0400 I COMMAND [conn65] CMD: drop db73.tmp.mrs.coll73_1436465842_157 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.708-0400 m31100| 2015-07-09T14:17:22.708-0400 I COMMAND [conn182] CMD: drop db73.tmp.mr.coll73_478 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.709-0400 m31100| 2015-07-09T14:17:22.708-0400 I COMMAND [conn182] CMD: drop db73.tmp.mr.coll73_478 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.709-0400 m31100| 2015-07-09T14:17:22.708-0400 I COMMAND [conn45] CMD: drop db73.tmp.mrs.coll73_1436465842_142 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.714-0400 m31202| 2015-07-09T14:17:22.714-0400 I COMMAND [repl writer worker 7] CMD: drop db73.tmp.mrs.coll73_1436465842_157 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.714-0400 m31201| 2015-07-09T14:17:22.714-0400 I COMMAND [repl writer worker 3] CMD: drop db73.tmp.mrs.coll73_1436465842_157 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.717-0400 m31100| 2015-07-09T14:17:22.715-0400 I COMMAND [conn45] CMD: drop db73.tmp.mr.coll73_480 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.717-0400 m31100| 2015-07-09T14:17:22.716-0400 I COMMAND [conn45] CMD: drop db73.tmp.mr.coll73_480 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.725-0400 m31102| 2015-07-09T14:17:22.724-0400 I COMMAND [repl writer worker 9] CMD: drop db73.tmp.mrs.coll73_1436465842_157 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.725-0400 m31101| 2015-07-09T14:17:22.724-0400 I COMMAND [repl writer worker 2] CMD: drop db73.tmp.mrs.coll73_1436465842_157 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.731-0400 m31100| 2015-07-09T14:17:22.730-0400 I COMMAND [conn45] CMD: drop db73.tmp.mr.coll73_480 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.732-0400 m31100| 2015-07-09T14:17:22.731-0400 I COMMAND [conn49] CMD: drop db73.tmp.mr.coll73_483 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.735-0400 m31100| 2015-07-09T14:17:22.735-0400 I COMMAND [conn182] CMD: drop db73.tmp.mr.coll73_478 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.742-0400 m31200| 2015-07-09T14:17:22.742-0400 I COMMAND [conn38] CMD: drop db73.tmp.mr.coll73_307 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.747-0400 m31100| 2015-07-09T14:17:22.747-0400 I COMMAND [conn45] command db73.tmp.mrs.coll73_1436465842_142 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.747-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.748-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.748-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.749-0400 m31100| values...., out: "tmp.mrs.coll73_1436465842_142", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:213 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 3, W: 1 }, timeAcquiringMicros: { r: 37614, W: 11569 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 6, w: 20, R: 10, W: 9 }, timeAcquiringMicros: { r: 33386, w: 170366, R: 34545, W: 81292 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 602ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.750-0400 m31100| 2015-07-09T14:17:22.749-0400 I COMMAND [conn182] command db73.tmp.mrs.coll73_1436465842_141 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.750-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.750-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.750-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.751-0400 m31100| values...., out: "tmp.mrs.coll73_1436465842_141", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:213 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 3, w: 2, W: 1 }, timeAcquiringMicros: { r: 28111, w: 22798, W: 7469 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 7, w: 32, R: 12, W: 9 }, timeAcquiringMicros: { r: 62564, w: 147060, R: 92917, W: 69650 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 681ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.773-0400 m31100| 2015-07-09T14:17:22.773-0400 I COMMAND [conn38] CMD: drop db73.tmp.mrs.coll73_1436465842_141 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.781-0400 m31200| 2015-07-09T14:17:22.781-0400 I COMMAND [conn18] CMD: drop db73.tmp.mrs.coll73_1436465842_141 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.784-0400 m31102| 2015-07-09T14:17:22.783-0400 I COMMAND [repl writer worker 13] CMD: drop db73.tmp.mrs.coll73_1436465842_141 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.786-0400 m31101| 2015-07-09T14:17:22.785-0400 I COMMAND [repl writer worker 13] CMD: drop db73.tmp.mrs.coll73_1436465842_141 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.795-0400 m31201| 2015-07-09T14:17:22.793-0400 I COMMAND [repl writer worker 14] CMD: drop db73.tmp.mrs.coll73_1436465842_141 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.795-0400 m31202| 2015-07-09T14:17:22.794-0400 I COMMAND [repl writer worker 6] CMD: drop db73.tmp.mrs.coll73_1436465842_141 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.797-0400 m31100| 2015-07-09T14:17:22.797-0400 I COMMAND [conn182] CMD: drop db73.tmp.mr.coll73_484 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.801-0400 m31200| 2015-07-09T14:17:22.801-0400 I COMMAND [conn35] CMD: drop db73.tmp.mr.coll73_308 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.822-0400 m31200| 2015-07-09T14:17:22.821-0400 I COMMAND [conn32] CMD: drop db73.tmp.mrs.coll73_1436465842_142 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.830-0400 m31200| 2015-07-09T14:17:22.829-0400 I COMMAND [conn32] CMD: drop db73.tmp.mr.coll73_304 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.831-0400 m31200| 2015-07-09T14:17:22.830-0400 I COMMAND [conn32] CMD: drop db73.tmp.mr.coll73_304 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.858-0400 m31200| 2015-07-09T14:17:22.856-0400 I COMMAND [conn32] CMD: drop db73.tmp.mr.coll73_304 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.879-0400 m31200| 2015-07-09T14:17:22.878-0400 I COMMAND [conn32] command db73.tmp.mrs.coll73_1436465842_142 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.880-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.880-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.881-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.881-0400 m31200| values...., out: "tmp.mrs.coll73_1436465842_142", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:213 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 7, W: 1 }, timeAcquiringMicros: { r: 30070, w: 35167, W: 20820 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 15, w: 28, R: 12, W: 9 }, timeAcquiringMicros: { r: 87542, w: 192474, R: 37062, W: 99964 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 734ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.955-0400 m31100| 2015-07-09T14:17:22.955-0400 I COMMAND [conn38] CMD: drop db73.tmp.mrs.coll73_1436465842_142 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.964-0400 m31200| 2015-07-09T14:17:22.964-0400 I COMMAND [conn18] CMD: drop db73.tmp.mrs.coll73_1436465842_142 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.966-0400 m31102| 2015-07-09T14:17:22.966-0400 I COMMAND [repl writer worker 3] CMD: drop db73.tmp.mrs.coll73_1436465842_142 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.967-0400 m31101| 2015-07-09T14:17:22.966-0400 I COMMAND [repl writer worker 8] CMD: drop db73.tmp.mrs.coll73_1436465842_142 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.979-0400 m31201| 2015-07-09T14:17:22.978-0400 I COMMAND [repl writer worker 12] CMD: drop db73.tmp.mrs.coll73_1436465842_142 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.979-0400 m31100| 2015-07-09T14:17:22.979-0400 I COMMAND [conn45] CMD: drop db73.tmp.mr.coll73_485 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.979-0400 m31202| 2015-07-09T14:17:22.979-0400 I COMMAND [repl writer worker 2] CMD: drop db73.tmp.mrs.coll73_1436465842_142 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:22.985-0400 m31200| 2015-07-09T14:17:22.985-0400 I COMMAND [conn32] CMD: drop db73.tmp.mr.coll73_309 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.106-0400 m31200| 2015-07-09T14:17:23.106-0400 I COMMAND [conn60] CMD: drop db73.tmp.mrs.coll73_1436465842_158 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.111-0400 m31200| 2015-07-09T14:17:23.110-0400 I COMMAND [conn60] CMD: drop db73.tmp.mr.coll73_305 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.111-0400 m31200| 2015-07-09T14:17:23.111-0400 I COMMAND [conn60] CMD: drop db73.tmp.mr.coll73_305 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.121-0400 m31100| 2015-07-09T14:17:23.121-0400 I COMMAND [conn185] CMD: drop db73.tmp.mrs.coll73_1436465842_158 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.122-0400 m31200| 2015-07-09T14:17:23.122-0400 I COMMAND [conn60] CMD: drop db73.tmp.mr.coll73_305 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.134-0400 m31100| 2015-07-09T14:17:23.133-0400 I COMMAND [conn185] CMD: drop db73.tmp.mr.coll73_481 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.134-0400 m31100| 2015-07-09T14:17:23.133-0400 I COMMAND [conn185] CMD: drop db73.tmp.mr.coll73_481 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.137-0400 m31100| 2015-07-09T14:17:23.135-0400 I COMMAND [conn185] CMD: drop db73.tmp.mr.coll73_481 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.138-0400 m31200| 2015-07-09T14:17:23.138-0400 I COMMAND [conn60] command db73.tmp.mrs.coll73_1436465842_158 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.138-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.139-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.139-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.140-0400 m31200| values...., out: "tmp.mrs.coll73_1436465842_158", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:213 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 7, W: 1 }, timeAcquiringMicros: { r: 24664, w: 21783, W: 21147 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 4, w: 32, R: 12, W: 8 }, timeAcquiringMicros: { r: 19208, w: 292196, R: 53115, W: 22314 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 665ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.140-0400 m31200| 2015-07-09T14:17:23.138-0400 I COMMAND [conn137] CMD: drop db73.tmp.mrs.coll73_1436465842_143 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.143-0400 m31200| 2015-07-09T14:17:23.143-0400 I COMMAND [conn137] CMD: drop db73.tmp.mr.coll73_306 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.144-0400 m31200| 2015-07-09T14:17:23.144-0400 I COMMAND [conn137] CMD: drop db73.tmp.mr.coll73_306 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.145-0400 m31200| 2015-07-09T14:17:23.145-0400 I COMMAND [conn137] CMD: drop db73.tmp.mr.coll73_306 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.150-0400 m31200| 2015-07-09T14:17:23.148-0400 I COMMAND [conn137] command db73.tmp.mrs.coll73_1436465842_143 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.150-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.151-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.151-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.152-0400 m31200| values...., out: "tmp.mrs.coll73_1436465842_143", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:213 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 3, w: 2, W: 1 }, timeAcquiringMicros: { r: 25632, w: 9445, W: 15811 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 3, w: 24, R: 12, W: 9 }, timeAcquiringMicros: { r: 12081, w: 134956, R: 55515, W: 27675 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 491ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.154-0400 m31200| 2015-07-09T14:17:23.152-0400 I COMMAND [conn35] CMD: drop db73.tmp.mrs.coll73_1436465842_144 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.158-0400 m31100| 2015-07-09T14:17:23.157-0400 I COMMAND [conn185] command db73.tmp.mrs.coll73_1436465842_158 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.159-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.159-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.159-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.160-0400 m31100| values...., out: "tmp.mrs.coll73_1436465842_158", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:213 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 8321, w: 7583, W: 374 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 6, w: 35, R: 11, W: 7 }, timeAcquiringMicros: { r: 51431, w: 344204, R: 54476, W: 29336 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 684ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.165-0400 m31200| 2015-07-09T14:17:23.164-0400 I COMMAND [conn35] CMD: drop db73.tmp.mr.coll73_308 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.165-0400 m31200| 2015-07-09T14:17:23.165-0400 I COMMAND [conn35] CMD: drop db73.tmp.mr.coll73_308 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.185-0400 m31200| 2015-07-09T14:17:23.185-0400 I COMMAND [conn35] CMD: drop db73.tmp.mr.coll73_308 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.187-0400 m31100| 2015-07-09T14:17:23.187-0400 I COMMAND [conn36] CMD: drop db73.tmp.mrs.coll73_1436465842_158 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.188-0400 m31200| 2015-07-09T14:17:23.187-0400 I COMMAND [conn35] command db73.tmp.mrs.coll73_1436465842_144 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.188-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.188-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.188-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.188-0400 m31200| values...., out: "tmp.mrs.coll73_1436465842_144", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:213 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 3, w: 5, W: 1 }, timeAcquiringMicros: { r: 16689, w: 22581, W: 268 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 5, w: 17, R: 12, W: 7 }, timeAcquiringMicros: { r: 10610, w: 64124, R: 43098, W: 52786 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 394ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.191-0400 m31200| 2015-07-09T14:17:23.190-0400 I COMMAND [conn38] CMD: drop db73.tmp.mrs.coll73_1436465842_159 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.196-0400 m31200| 2015-07-09T14:17:23.195-0400 I COMMAND [conn65] CMD: drop db73.tmp.mrs.coll73_1436465842_158 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.198-0400 m31200| 2015-07-09T14:17:23.197-0400 I COMMAND [conn38] CMD: drop db73.tmp.mr.coll73_307 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.198-0400 m31200| 2015-07-09T14:17:23.197-0400 I COMMAND [conn38] CMD: drop db73.tmp.mr.coll73_307 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.198-0400 m31100| 2015-07-09T14:17:23.198-0400 I COMMAND [conn175] CMD: drop db73.tmp.mrs.coll73_1436465842_143 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.199-0400 m31102| 2015-07-09T14:17:23.199-0400 I COMMAND [repl writer worker 6] CMD: drop db73.tmp.mrs.coll73_1436465842_158 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.200-0400 m31101| 2015-07-09T14:17:23.199-0400 I COMMAND [repl writer worker 2] CMD: drop db73.tmp.mrs.coll73_1436465842_158 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.203-0400 m31100| 2015-07-09T14:17:23.203-0400 I COMMAND [conn175] CMD: drop db73.tmp.mr.coll73_482 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.204-0400 m31200| 2015-07-09T14:17:23.203-0400 I COMMAND [conn38] CMD: drop db73.tmp.mr.coll73_307 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.205-0400 m31100| 2015-07-09T14:17:23.204-0400 I COMMAND [conn175] CMD: drop db73.tmp.mr.coll73_482 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.205-0400 m31100| 2015-07-09T14:17:23.204-0400 I COMMAND [conn175] CMD: drop db73.tmp.mr.coll73_482 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.206-0400 m31200| 2015-07-09T14:17:23.206-0400 I COMMAND [conn38] command db73.tmp.mrs.coll73_1436465842_159 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.207-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.207-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.207-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.210-0400 m31200| values...., out: "tmp.mrs.coll73_1436465842_159", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:213 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { r: 6, w: 3 }, timeAcquiringMicros: { r: 39760, w: 15162 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 9, w: 19, R: 13, W: 9 }, timeAcquiringMicros: { r: 36091, w: 58785, R: 61593, W: 40065 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 491ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.210-0400 m31200| 2015-07-09T14:17:23.206-0400 I COMMAND [conn60] CMD: drop db73.tmp.mr.coll73_310 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.211-0400 m31100| 2015-07-09T14:17:23.207-0400 I COMMAND [conn185] CMD: drop db73.tmp.mr.coll73_486 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.211-0400 m31100| 2015-07-09T14:17:23.208-0400 I COMMAND [conn175] command db73.tmp.mrs.coll73_1436465842_143 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.212-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.212-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.212-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.213-0400 m31100| values...., out: "tmp.mrs.coll73_1436465842_143", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:3 reslen:213 locks:{ Global: { acquireCount: { r: 157, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 12387, w: 11415, W: 2186 } }, Database: { acquireCount: { r: 26, w: 66, R: 14, W: 11 }, acquireWaitCount: { r: 5, w: 27, R: 14, W: 5 }, timeAcquiringMicros: { r: 15210, w: 198527, R: 50403, W: 14563 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 550ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.235-0400 m31100| 2015-07-09T14:17:23.235-0400 I COMMAND [conn38] CMD: drop db73.tmp.mrs.coll73_1436465842_143 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.239-0400 m31200| 2015-07-09T14:17:23.239-0400 I COMMAND [conn18] CMD: drop db73.tmp.mrs.coll73_1436465842_143 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.241-0400 m31101| 2015-07-09T14:17:23.241-0400 I COMMAND [repl writer worker 5] CMD: drop db73.tmp.mrs.coll73_1436465842_143 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.242-0400 m31102| 2015-07-09T14:17:23.241-0400 I COMMAND [repl writer worker 10] CMD: drop db73.tmp.mrs.coll73_1436465842_143 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.255-0400 m31201| 2015-07-09T14:17:23.255-0400 I COMMAND [repl writer worker 0] CMD: drop db73.tmp.mrs.coll73_1436465842_158 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.258-0400 m31202| 2015-07-09T14:17:23.258-0400 I COMMAND [repl writer worker 15] CMD: drop db73.tmp.mrs.coll73_1436465842_158 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.270-0400 m31100| 2015-07-09T14:17:23.268-0400 I COMMAND [conn175] CMD: drop db73.tmp.mr.coll73_487 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.270-0400 m31200| 2015-07-09T14:17:23.270-0400 I COMMAND [conn137] CMD: drop db73.tmp.mr.coll73_311 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.276-0400 m31100| 2015-07-09T14:17:23.275-0400 I COMMAND [conn49] CMD: drop db73.tmp.mrs.coll73_1436465842_159 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.282-0400 m31201| 2015-07-09T14:17:23.282-0400 I COMMAND [repl writer worker 4] CMD: drop db73.tmp.mrs.coll73_1436465842_143 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.286-0400 m31202| 2015-07-09T14:17:23.286-0400 I COMMAND [repl writer worker 8] CMD: drop db73.tmp.mrs.coll73_1436465842_143 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.288-0400 m31100| 2015-07-09T14:17:23.287-0400 I COMMAND [conn49] CMD: drop db73.tmp.mr.coll73_483 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.288-0400 m31100| 2015-07-09T14:17:23.287-0400 I COMMAND [conn49] CMD: drop db73.tmp.mr.coll73_483 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.289-0400 m31100| 2015-07-09T14:17:23.289-0400 I COMMAND [conn49] CMD: drop db73.tmp.mr.coll73_483 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.301-0400 m31100| 2015-07-09T14:17:23.300-0400 I COMMAND [conn49] command db73.tmp.mrs.coll73_1436465842_159 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.301-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.301-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.301-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.302-0400 m31100| values...., out: "tmp.mrs.coll73_1436465842_159", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:213 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 2, W: 1 }, timeAcquiringMicros: { r: 8822, w: 13189, W: 7949 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 9, w: 24, R: 12, W: 7 }, timeAcquiringMicros: { r: 44809, w: 157288, R: 60959, W: 39930 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 586ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.334-0400 m31100| 2015-07-09T14:17:23.334-0400 I COMMAND [conn36] CMD: drop db73.tmp.mrs.coll73_1436465842_159 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.347-0400 m31100| 2015-07-09T14:17:23.347-0400 I COMMAND [conn182] CMD: drop db73.tmp.mrs.coll73_1436465842_144 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.358-0400 m31100| 2015-07-09T14:17:23.358-0400 I COMMAND [conn182] CMD: drop db73.tmp.mr.coll73_484 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.359-0400 m31100| 2015-07-09T14:17:23.358-0400 I COMMAND [conn182] CMD: drop db73.tmp.mr.coll73_484 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.362-0400 m31200| 2015-07-09T14:17:23.361-0400 I COMMAND [conn65] CMD: drop db73.tmp.mrs.coll73_1436465842_159 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.373-0400 m31202| 2015-07-09T14:17:23.371-0400 I COMMAND [repl writer worker 13] CMD: drop db73.tmp.mrs.coll73_1436465842_159 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.374-0400 m31200| 2015-07-09T14:17:23.374-0400 I COMMAND [conn38] CMD: drop db73.tmp.mr.coll73_312 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.377-0400 m31100| 2015-07-09T14:17:23.377-0400 I COMMAND [conn49] CMD: drop db73.tmp.mr.coll73_488 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.381-0400 m31201| 2015-07-09T14:17:23.379-0400 I COMMAND [repl writer worker 2] CMD: drop db73.tmp.mrs.coll73_1436465842_159 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.388-0400 m31100| 2015-07-09T14:17:23.388-0400 I COMMAND [conn182] CMD: drop db73.tmp.mr.coll73_484 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.392-0400 m31102| 2015-07-09T14:17:23.392-0400 I COMMAND [repl writer worker 4] CMD: drop db73.tmp.mrs.coll73_1436465842_159 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.396-0400 m31101| 2015-07-09T14:17:23.396-0400 I COMMAND [repl writer worker 3] CMD: drop db73.tmp.mrs.coll73_1436465842_159 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.404-0400 m31100| 2015-07-09T14:17:23.403-0400 I COMMAND [conn182] command db73.tmp.mrs.coll73_1436465842_144 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.404-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.404-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.404-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.405-0400 m31100| values...., out: "tmp.mrs.coll73_1436465842_144", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:213 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 3, W: 1 }, timeAcquiringMicros: { r: 8914, w: 33653, W: 21404 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 10, w: 22, R: 13, W: 9 }, timeAcquiringMicros: { r: 67634, w: 102990, R: 48460, W: 80358 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 610ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.434-0400 m31100| 2015-07-09T14:17:23.433-0400 I COMMAND [conn38] CMD: drop db73.tmp.mrs.coll73_1436465842_144 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.448-0400 m31200| 2015-07-09T14:17:23.447-0400 I COMMAND [conn18] CMD: drop db73.tmp.mrs.coll73_1436465842_144 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.450-0400 m31101| 2015-07-09T14:17:23.450-0400 I COMMAND [repl writer worker 12] CMD: drop db73.tmp.mrs.coll73_1436465842_144 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.454-0400 m31102| 2015-07-09T14:17:23.454-0400 I COMMAND [repl writer worker 3] CMD: drop db73.tmp.mrs.coll73_1436465842_144 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.465-0400 m31202| 2015-07-09T14:17:23.465-0400 I COMMAND [repl writer worker 8] CMD: drop db73.tmp.mrs.coll73_1436465842_144 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.467-0400 m31200| 2015-07-09T14:17:23.466-0400 I COMMAND [conn35] CMD: drop db73.tmp.mr.coll73_313 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.473-0400 m31201| 2015-07-09T14:17:23.472-0400 I COMMAND [repl writer worker 4] CMD: drop db73.tmp.mrs.coll73_1436465842_144 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.476-0400 m31100| 2015-07-09T14:17:23.476-0400 I COMMAND [conn182] CMD: drop db73.tmp.mr.coll73_489 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.589-0400 m31200| 2015-07-09T14:17:23.589-0400 I COMMAND [conn32] CMD: drop db73.tmp.mrs.coll73_1436465842_145 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.593-0400 m31200| 2015-07-09T14:17:23.593-0400 I COMMAND [conn32] CMD: drop db73.tmp.mr.coll73_309 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.594-0400 m31200| 2015-07-09T14:17:23.593-0400 I COMMAND [conn32] CMD: drop db73.tmp.mr.coll73_309 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.595-0400 m31200| 2015-07-09T14:17:23.594-0400 I COMMAND [conn32] CMD: drop db73.tmp.mr.coll73_309 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.602-0400 m31200| 2015-07-09T14:17:23.602-0400 I COMMAND [conn32] command db73.tmp.mrs.coll73_1436465842_145 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.602-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.602-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.602-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.603-0400 m31200| values...., out: "tmp.mrs.coll73_1436465842_145", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:213 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 5, W: 1 }, timeAcquiringMicros: { r: 13922, W: 26948 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 10, w: 37, R: 12, W: 7 }, timeAcquiringMicros: { r: 51972, w: 276913, R: 50933, W: 31248 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 625ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.611-0400 m31100| 2015-07-09T14:17:23.610-0400 I COMMAND [conn45] CMD: drop db73.tmp.mrs.coll73_1436465842_145 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.621-0400 m31100| 2015-07-09T14:17:23.621-0400 I COMMAND [conn45] CMD: drop db73.tmp.mr.coll73_485 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.625-0400 m31100| 2015-07-09T14:17:23.624-0400 I COMMAND [conn45] CMD: drop db73.tmp.mr.coll73_485 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.633-0400 m31100| 2015-07-09T14:17:23.633-0400 I COMMAND [conn45] CMD: drop db73.tmp.mr.coll73_485 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.636-0400 m31200| 2015-07-09T14:17:23.636-0400 I COMMAND [conn60] CMD: drop db73.tmp.mrs.coll73_1436465843_160 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.642-0400 m31200| 2015-07-09T14:17:23.642-0400 I COMMAND [conn60] CMD: drop db73.tmp.mr.coll73_310 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.643-0400 m31200| 2015-07-09T14:17:23.642-0400 I COMMAND [conn60] CMD: drop db73.tmp.mr.coll73_310 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.643-0400 m31200| 2015-07-09T14:17:23.643-0400 I COMMAND [conn60] CMD: drop db73.tmp.mr.coll73_310 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.646-0400 m31100| 2015-07-09T14:17:23.646-0400 I COMMAND [conn45] command db73.tmp.mrs.coll73_1436465842_145 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.647-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.647-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.648-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.648-0400 m31100| values...., out: "tmp.mrs.coll73_1436465842_145", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:213 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 3, W: 1 }, timeAcquiringMicros: { r: 5914, w: 56019, W: 3401 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 9, w: 36, R: 11, W: 9 }, timeAcquiringMicros: { r: 68517, w: 274583, R: 24255, W: 57947 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 669ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.650-0400 m31200| 2015-07-09T14:17:23.650-0400 I COMMAND [conn60] command db73.tmp.mrs.coll73_1436465843_160 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.651-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.651-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.651-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.652-0400 m31200| values...., out: "tmp.mrs.coll73_1436465843_160", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:213 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { w: 6, W: 1 }, timeAcquiringMicros: { w: 22487, W: 6024 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 4, w: 21, R: 12, W: 7 }, timeAcquiringMicros: { r: 6841, w: 172368, R: 39086, W: 13502 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 446ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.668-0400 m31100| 2015-07-09T14:17:23.667-0400 I COMMAND [conn38] CMD: drop db73.tmp.mrs.coll73_1436465842_145 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.670-0400 m31200| 2015-07-09T14:17:23.670-0400 I COMMAND [conn18] CMD: drop db73.tmp.mrs.coll73_1436465842_145 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.678-0400 m31102| 2015-07-09T14:17:23.678-0400 I COMMAND [repl writer worker 8] CMD: drop db73.tmp.mrs.coll73_1436465842_145 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.682-0400 m31101| 2015-07-09T14:17:23.682-0400 I COMMAND [repl writer worker 12] CMD: drop db73.tmp.mrs.coll73_1436465842_145 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.687-0400 m31202| 2015-07-09T14:17:23.686-0400 I COMMAND [repl writer worker 4] CMD: drop db73.tmp.mrs.coll73_1436465842_145 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.689-0400 m31201| 2015-07-09T14:17:23.689-0400 I COMMAND [repl writer worker 13] CMD: drop db73.tmp.mrs.coll73_1436465842_145 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.690-0400 m30999| 2015-07-09T14:17:23.689-0400 I NETWORK [conn474] end connection 127.0.0.1:64080 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.690-0400 m31200| 2015-07-09T14:17:23.689-0400 I COMMAND [conn137] CMD: drop db73.tmp.mrs.coll73_1436465843_146 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.697-0400 m31200| 2015-07-09T14:17:23.697-0400 I COMMAND [conn137] CMD: drop db73.tmp.mr.coll73_311 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.697-0400 m31200| 2015-07-09T14:17:23.697-0400 I COMMAND [conn137] CMD: drop db73.tmp.mr.coll73_311 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.701-0400 m31200| 2015-07-09T14:17:23.700-0400 I COMMAND [conn137] CMD: drop db73.tmp.mr.coll73_311 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.710-0400 m31100| 2015-07-09T14:17:23.710-0400 I COMMAND [conn185] CMD: drop db73.tmp.mrs.coll73_1436465843_160 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.718-0400 m31100| 2015-07-09T14:17:23.718-0400 I COMMAND [conn185] CMD: drop db73.tmp.mr.coll73_486 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.718-0400 m31100| 2015-07-09T14:17:23.718-0400 I COMMAND [conn185] CMD: drop db73.tmp.mr.coll73_486 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.723-0400 m31200| 2015-07-09T14:17:23.722-0400 I COMMAND [conn137] command db73.tmp.mrs.coll73_1436465843_146 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.723-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.723-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.723-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.723-0400 m31200| values...., out: "tmp.mrs.coll73_1436465843_146", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:213 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 7, W: 1 }, timeAcquiringMicros: { r: 1080, w: 25690, W: 259 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 2, w: 16, R: 13, W: 9 }, timeAcquiringMicros: { r: 13613, w: 74927, R: 63599, W: 21774 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 463ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.724-0400 m31100| 2015-07-09T14:17:23.724-0400 I COMMAND [conn185] CMD: drop db73.tmp.mr.coll73_486 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.725-0400 m31100| 2015-07-09T14:17:23.725-0400 I COMMAND [conn185] command db73.tmp.mrs.coll73_1436465843_160 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.725-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.726-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.726-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.728-0400 m31100| values...., out: "tmp.mrs.coll73_1436465843_160", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:2 reslen:213 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 }, acquireWaitCount: { r: 2, w: 1, W: 1 }, timeAcquiringMicros: { r: 39538, w: 16290, W: 5414 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { r: 3, w: 18, R: 13, W: 9 }, timeAcquiringMicros: { r: 2230, w: 131563, R: 55940, W: 22038 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 520ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.731-0400 m31200| 2015-07-09T14:17:23.727-0400 I COMMAND [conn35] CMD: drop db73.tmp.mrs.coll73_1436465843_147 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.732-0400 m31100| 2015-07-09T14:17:23.732-0400 I COMMAND [conn49] CMD: drop db73.tmp.mrs.coll73_1436465843_161 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.736-0400 m31200| 2015-07-09T14:17:23.735-0400 I COMMAND [conn35] CMD: drop db73.tmp.mr.coll73_313 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.736-0400 m31200| 2015-07-09T14:17:23.736-0400 I COMMAND [conn35] CMD: drop db73.tmp.mr.coll73_313 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.738-0400 m31200| 2015-07-09T14:17:23.738-0400 I COMMAND [conn35] CMD: drop db73.tmp.mr.coll73_313 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.739-0400 m31100| 2015-07-09T14:17:23.738-0400 I COMMAND [conn49] CMD: drop db73.tmp.mr.coll73_488 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.739-0400 m31100| 2015-07-09T14:17:23.738-0400 I COMMAND [conn49] CMD: drop db73.tmp.mr.coll73_488 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.740-0400 m31200| 2015-07-09T14:17:23.739-0400 I COMMAND [conn35] command db73.tmp.mrs.coll73_1436465843_147 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.740-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.740-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.740-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.742-0400 m31200| values...., out: "tmp.mrs.coll73_1436465843_147", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:213 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 1, W: 1 }, timeAcquiringMicros: { r: 9429, w: 7764, W: 59 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { w: 8, R: 12, W: 7 }, timeAcquiringMicros: { w: 18933, R: 19920, W: 47561 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 281ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.743-0400 m31100| 2015-07-09T14:17:23.743-0400 I COMMAND [conn49] CMD: drop db73.tmp.mr.coll73_488 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.745-0400 m31100| 2015-07-09T14:17:23.745-0400 I COMMAND [conn49] command db73.tmp.mrs.coll73_1436465843_161 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.746-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.746-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.746-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.746-0400 m31100| values...., out: "tmp.mrs.coll73_1436465843_161", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:213 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 2, W: 1 }, timeAcquiringMicros: { r: 3323, w: 23601, W: 753 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 3, w: 14, R: 12, W: 9 }, timeAcquiringMicros: { r: 7004, w: 63552, R: 15309, W: 36950 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 373ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.747-0400 m31100| 2015-07-09T14:17:23.746-0400 I COMMAND [conn175] CMD: drop db73.tmp.mrs.coll73_1436465843_146 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.749-0400 m31200| 2015-07-09T14:17:23.747-0400 I COMMAND [conn38] CMD: drop db73.tmp.mrs.coll73_1436465843_161 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.752-0400 m31100| 2015-07-09T14:17:23.752-0400 I COMMAND [conn36] CMD: drop db73.tmp.mrs.coll73_1436465843_160 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.765-0400 m31200| 2015-07-09T14:17:23.764-0400 I COMMAND [conn38] CMD: drop db73.tmp.mr.coll73_312 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.766-0400 m31200| 2015-07-09T14:17:23.764-0400 I COMMAND [conn38] CMD: drop db73.tmp.mr.coll73_312 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.766-0400 m31100| 2015-07-09T14:17:23.765-0400 I COMMAND [conn175] CMD: drop db73.tmp.mr.coll73_487 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.766-0400 m31100| 2015-07-09T14:17:23.765-0400 I COMMAND [conn175] CMD: drop db73.tmp.mr.coll73_487 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.767-0400 m31200| 2015-07-09T14:17:23.766-0400 I COMMAND [conn38] CMD: drop db73.tmp.mr.coll73_312 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.774-0400 m31200| 2015-07-09T14:17:23.773-0400 I COMMAND [conn65] CMD: drop db73.tmp.mrs.coll73_1436465843_160 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.777-0400 m31101| 2015-07-09T14:17:23.776-0400 I COMMAND [repl writer worker 4] CMD: drop db73.tmp.mrs.coll73_1436465843_160 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.778-0400 m31100| 2015-07-09T14:17:23.777-0400 I COMMAND [conn175] CMD: drop db73.tmp.mr.coll73_487 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.791-0400 m31100| 2015-07-09T14:17:23.791-0400 I COMMAND [conn175] command db73.tmp.mrs.coll73_1436465843_146 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.792-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.793-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.793-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.794-0400 m31100| values...., out: "tmp.mrs.coll73_1436465843_146", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:213 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 3, w: 2, W: 1 }, timeAcquiringMicros: { r: 28860, w: 34797, W: 334 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 7, w: 21, R: 12, W: 9 }, timeAcquiringMicros: { r: 10385, w: 111040, R: 69092, W: 30463 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 533ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.794-0400 m31100| 2015-07-09T14:17:23.793-0400 I COMMAND [conn182] CMD: drop db73.tmp.mrs.coll73_1436465843_147 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.795-0400 m31201| 2015-07-09T14:17:23.794-0400 I COMMAND [repl writer worker 8] CMD: drop db73.tmp.mrs.coll73_1436465843_160 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.797-0400 m31202| 2015-07-09T14:17:23.796-0400 I COMMAND [repl writer worker 8] CMD: drop db73.tmp.mrs.coll73_1436465843_160 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.797-0400 m31200| 2015-07-09T14:17:23.797-0400 I COMMAND [conn60] CMD: drop db73.tmp.mr.coll73_314 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.798-0400 m31200| 2015-07-09T14:17:23.797-0400 I COMMAND [conn38] command db73.tmp.mrs.coll73_1436465843_161 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.798-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.798-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.798-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.799-0400 m31200| values...., out: "tmp.mrs.coll73_1436465843_161", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:213 locks:{ Global: { acquireCount: { r: 153, w: 74, W: 3 }, acquireWaitCount: { r: 3, w: 2 }, timeAcquiringMicros: { r: 18373, w: 21184 } }, Database: { acquireCount: { r: 26, w: 66, R: 12, W: 11 }, acquireWaitCount: { r: 3, w: 13, R: 12, W: 5 }, timeAcquiringMicros: { r: 6508, w: 51806, R: 36133, W: 28634 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 428ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.804-0400 m31102| 2015-07-09T14:17:23.802-0400 I COMMAND [repl writer worker 6] CMD: drop db73.tmp.mrs.coll73_1436465843_160 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.806-0400 m31100| 2015-07-09T14:17:23.804-0400 I COMMAND [conn182] CMD: drop db73.tmp.mr.coll73_489 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.807-0400 m31100| 2015-07-09T14:17:23.806-0400 I COMMAND [conn182] CMD: drop db73.tmp.mr.coll73_489 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.808-0400 m31100| 2015-07-09T14:17:23.808-0400 I COMMAND [conn38] CMD: drop db73.tmp.mrs.coll73_1436465843_146 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.808-0400 m31100| 2015-07-09T14:17:23.808-0400 I COMMAND [conn182] CMD: drop db73.tmp.mr.coll73_489 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.809-0400 m31100| 2015-07-09T14:17:23.809-0400 I COMMAND [conn185] CMD: drop db73.tmp.mr.coll73_490 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.812-0400 m31200| 2015-07-09T14:17:23.811-0400 I COMMAND [conn18] CMD: drop db73.tmp.mrs.coll73_1436465843_146 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.812-0400 m31100| 2015-07-09T14:17:23.811-0400 I COMMAND [conn182] command db73.tmp.mrs.coll73_1436465843_147 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.813-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.813-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.813-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.814-0400 m31100| values...., out: "tmp.mrs.coll73_1436465843_147", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:213 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 1, w: 3 }, timeAcquiringMicros: { r: 14202, w: 29047 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 1, w: 14, R: 10, W: 6 }, timeAcquiringMicros: { r: 15570, w: 40553, R: 28686, W: 43956 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 353ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.822-0400 m30999| 2015-07-09T14:17:23.822-0400 I NETWORK [conn473] end connection 127.0.0.1:64078 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.828-0400 m31201| 2015-07-09T14:17:23.828-0400 I COMMAND [repl writer worker 12] CMD: drop db73.tmp.mrs.coll73_1436465843_146 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.829-0400 m31202| 2015-07-09T14:17:23.828-0400 I COMMAND [repl writer worker 2] CMD: drop db73.tmp.mrs.coll73_1436465843_146 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.833-0400 m31100| 2015-07-09T14:17:23.833-0400 I COMMAND [conn36] CMD: drop db73.tmp.mrs.coll73_1436465843_161 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.848-0400 m31100| 2015-07-09T14:17:23.848-0400 I COMMAND [conn38] CMD: drop db73.tmp.mrs.coll73_1436465843_147 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.851-0400 m31101| 2015-07-09T14:17:23.850-0400 I COMMAND [repl writer worker 2] CMD: drop db73.tmp.mrs.coll73_1436465843_146 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.854-0400 m31200| 2015-07-09T14:17:23.853-0400 I COMMAND [conn18] CMD: drop db73.tmp.mrs.coll73_1436465843_147 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.854-0400 m31200| 2015-07-09T14:17:23.853-0400 I COMMAND [conn65] CMD: drop db73.tmp.mrs.coll73_1436465843_161 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.855-0400 m31102| 2015-07-09T14:17:23.855-0400 I COMMAND [repl writer worker 3] CMD: drop db73.tmp.mrs.coll73_1436465843_146 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.859-0400 m31202| 2015-07-09T14:17:23.857-0400 I COMMAND [repl writer worker 15] CMD: drop db73.tmp.mrs.coll73_1436465843_147 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.859-0400 m31201| 2015-07-09T14:17:23.857-0400 I COMMAND [repl writer worker 11] CMD: drop db73.tmp.mrs.coll73_1436465843_147 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.861-0400 m30999| 2015-07-09T14:17:23.860-0400 I NETWORK [conn472] end connection 127.0.0.1:64077 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.868-0400 m31201| 2015-07-09T14:17:23.862-0400 I COMMAND [repl writer worker 4] CMD: drop db73.tmp.mrs.coll73_1436465843_161 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.868-0400 m30998| 2015-07-09T14:17:23.863-0400 I NETWORK [conn473] end connection 127.0.0.1:64079 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.870-0400 m31202| 2015-07-09T14:17:23.868-0400 I COMMAND [repl writer worker 0] CMD: drop db73.tmp.mrs.coll73_1436465843_161 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.880-0400 m31101| 2015-07-09T14:17:23.876-0400 I COMMAND [repl writer worker 15] CMD: drop db73.tmp.mrs.coll73_1436465843_161 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.885-0400 m31101| 2015-07-09T14:17:23.884-0400 I COMMAND [repl writer worker 9] CMD: drop db73.tmp.mrs.coll73_1436465843_147 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.885-0400 m31102| 2015-07-09T14:17:23.885-0400 I COMMAND [repl writer worker 11] CMD: drop db73.tmp.mrs.coll73_1436465843_161 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.887-0400 m31102| 2015-07-09T14:17:23.886-0400 I COMMAND [repl writer worker 15] CMD: drop db73.tmp.mrs.coll73_1436465843_147 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.936-0400 m31100| 2015-07-09T14:17:23.936-0400 I COMMAND [conn185] CMD: drop db73.tmp.mrs.coll73_1436465843_162 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.941-0400 m31100| 2015-07-09T14:17:23.940-0400 I COMMAND [conn185] CMD: drop db73.tmp.mr.coll73_490 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.941-0400 m31100| 2015-07-09T14:17:23.940-0400 I COMMAND [conn185] CMD: drop db73.tmp.mr.coll73_490 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.944-0400 m31100| 2015-07-09T14:17:23.942-0400 I COMMAND [conn185] CMD: drop db73.tmp.mr.coll73_490 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.951-0400 m31100| 2015-07-09T14:17:23.950-0400 I COMMAND [conn185] command db73.tmp.mrs.coll73_1436465843_162 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.951-0400 m31100| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.951-0400 m31100| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.951-0400 m31100| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.951-0400 m31100| values...., out: "tmp.mrs.coll73_1436465843_162", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:213 locks:{ Global: { acquireCount: { r: 151, w: 74, W: 3 }, acquireWaitCount: { r: 1 }, timeAcquiringMicros: { r: 9385 } }, Database: { acquireCount: { r: 26, w: 66, R: 11, W: 11 }, acquireWaitCount: { r: 1, R: 1, W: 3 }, timeAcquiringMicros: { r: 2594, R: 1277, W: 3499 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 155ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.959-0400 m31200| 2015-07-09T14:17:23.959-0400 I COMMAND [conn60] CMD: drop db73.tmp.mrs.coll73_1436465843_162 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.966-0400 m31200| 2015-07-09T14:17:23.965-0400 I COMMAND [conn60] CMD: drop db73.tmp.mr.coll73_314 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.966-0400 m31200| 2015-07-09T14:17:23.966-0400 I COMMAND [conn60] CMD: drop db73.tmp.mr.coll73_314 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.967-0400 m31200| 2015-07-09T14:17:23.967-0400 I COMMAND [conn60] CMD: drop db73.tmp.mr.coll73_314 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.967-0400 m31200| 2015-07-09T14:17:23.967-0400 I COMMAND [conn60] command db73.tmp.mrs.coll73_1436465843_162 command: mapReduce { mapreduce: "coll73", map: function mapper() { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.967-0400 m31200| if (this.hasOwnProperty('key') && this.has..., reduce: function reducer(key, values) { [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.967-0400 m31200| var res = {}; [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.967-0400 m31200| [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.968-0400 m31200| values...., out: "tmp.mrs.coll73_1436465843_162", shardedFirstPass: true } planSummary: COUNT ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:1 reslen:213 locks:{ Global: { acquireCount: { r: 155, w: 74, W: 3 } }, Database: { acquireCount: { r: 26, w: 66, R: 13, W: 11 }, acquireWaitCount: { w: 1, R: 1 }, timeAcquiringMicros: { w: 914, R: 4498 } }, Collection: { acquireCount: { r: 26, w: 47 } }, Metadata: { acquireCount: { w: 22 } }, oplog: { acquireCount: { w: 22 } } } protocol:op_query 170ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.981-0400 m31100| 2015-07-09T14:17:23.980-0400 I COMMAND [conn36] CMD: drop db73.tmp.mrs.coll73_1436465843_162 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.981-0400 m31200| 2015-07-09T14:17:23.981-0400 I COMMAND [conn65] CMD: drop db73.tmp.mrs.coll73_1436465843_162 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.982-0400 m31102| 2015-07-09T14:17:23.982-0400 I COMMAND [repl writer worker 13] CMD: drop db73.tmp.mrs.coll73_1436465843_162 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.982-0400 m31101| 2015-07-09T14:17:23.982-0400 I COMMAND [repl writer worker 11] CMD: drop db73.tmp.mrs.coll73_1436465843_162 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.985-0400 m31202| 2015-07-09T14:17:23.985-0400 I COMMAND [repl writer worker 12] CMD: drop db73.tmp.mrs.coll73_1436465843_162 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.986-0400 m31201| 2015-07-09T14:17:23.985-0400 I COMMAND [repl writer worker 15] CMD: drop db73.tmp.mrs.coll73_1436465843_162 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:23.987-0400 m30998| 2015-07-09T14:17:23.986-0400 I NETWORK [conn472] end connection 127.0.0.1:64076 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.002-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.002-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.002-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.003-0400 jstests/concurrency/fsm_workloads/map_reduce_inline.js: Workload completed in 7053 ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.003-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.003-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.003-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.003-0400 m30999| 2015-07-09T14:17:24.003-0400 I COMMAND [conn1] DROP: db73.coll73 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.003-0400 m30999| 2015-07-09T14:17:24.003-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:24.003-0400-559ebab4ca4787b9985d1f04", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465844003), what: "dropCollection.start", ns: "db73.coll73", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.060-0400 m30999| 2015-07-09T14:17:24.059-0400 I SHARDING [conn1] distributed lock 'db73.coll73/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559ebab4ca4787b9985d1f05 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.061-0400 m31100| 2015-07-09T14:17:24.060-0400 I COMMAND [conn38] CMD: drop db73.coll73 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.064-0400 m31200| 2015-07-09T14:17:24.064-0400 I COMMAND [conn18] CMD: drop db73.coll73 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.065-0400 m31102| 2015-07-09T14:17:24.065-0400 I COMMAND [repl writer worker 1] CMD: drop db73.coll73 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.068-0400 m31201| 2015-07-09T14:17:24.068-0400 I COMMAND [repl writer worker 14] CMD: drop db73.coll73 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.069-0400 m31202| 2015-07-09T14:17:24.068-0400 I COMMAND [repl writer worker 9] CMD: drop db73.coll73 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.077-0400 m31101| 2015-07-09T14:17:24.076-0400 I COMMAND [repl writer worker 12] CMD: drop db73.coll73 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.120-0400 m31100| 2015-07-09T14:17:24.120-0400 I SHARDING [conn38] remotely refreshing metadata for db73.coll73 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559ebaabca4787b9985d1f02, current metadata version is 2|3||559ebaabca4787b9985d1f02 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.122-0400 m31100| 2015-07-09T14:17:24.121-0400 W SHARDING [conn38] no chunks found when reloading db73.coll73, previous version was 0|0||559ebaabca4787b9985d1f02, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.122-0400 m31100| 2015-07-09T14:17:24.122-0400 I SHARDING [conn38] dropping metadata for db73.coll73 at shard version 2|3||559ebaabca4787b9985d1f02, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.123-0400 m31200| 2015-07-09T14:17:24.123-0400 I SHARDING [conn18] remotely refreshing metadata for db73.coll73 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559ebaabca4787b9985d1f02, current metadata version is 2|5||559ebaabca4787b9985d1f02 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.125-0400 m31200| 2015-07-09T14:17:24.125-0400 W SHARDING [conn18] no chunks found when reloading db73.coll73, previous version was 0|0||559ebaabca4787b9985d1f02, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.125-0400 m31200| 2015-07-09T14:17:24.125-0400 I SHARDING [conn18] dropping metadata for db73.coll73 at shard version 2|5||559ebaabca4787b9985d1f02, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.126-0400 m30999| 2015-07-09T14:17:24.126-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:24.126-0400-559ebab4ca4787b9985d1f06", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465844126), what: "dropCollection", ns: "db73.coll73", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.179-0400 m30999| 2015-07-09T14:17:24.179-0400 I SHARDING [conn1] distributed lock 'db73.coll73/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.236-0400 m30999| 2015-07-09T14:17:24.235-0400 I COMMAND [conn1] DROP DATABASE: db73 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.236-0400 m30999| 2015-07-09T14:17:24.236-0400 I SHARDING [conn1] DBConfig::dropDatabase: db73 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.236-0400 m30999| 2015-07-09T14:17:24.236-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:24.236-0400-559ebab4ca4787b9985d1f07", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465844236), what: "dropDatabase.start", ns: "db73", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.341-0400 m30999| 2015-07-09T14:17:24.341-0400 I SHARDING [conn1] DBConfig::dropDatabase: db73 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.341-0400 m31100| 2015-07-09T14:17:24.341-0400 I COMMAND [conn160] dropDatabase db73 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.342-0400 m31100| 2015-07-09T14:17:24.341-0400 I COMMAND [conn160] dropDatabase db73 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.342-0400 m30999| 2015-07-09T14:17:24.342-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:24.342-0400-559ebab4ca4787b9985d1f08", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465844342), what: "dropDatabase", ns: "db73", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.343-0400 m31101| 2015-07-09T14:17:24.342-0400 I COMMAND [repl writer worker 14] dropDatabase db73 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.343-0400 m31102| 2015-07-09T14:17:24.342-0400 I COMMAND [repl writer worker 4] dropDatabase db73 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.343-0400 m31102| 2015-07-09T14:17:24.343-0400 I COMMAND [repl writer worker 4] dropDatabase db73 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.343-0400 m31101| 2015-07-09T14:17:24.342-0400 I COMMAND [repl writer worker 14] dropDatabase db73 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.428-0400 m31100| 2015-07-09T14:17:24.427-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.432-0400 m31102| 2015-07-09T14:17:24.431-0400 I COMMAND [repl writer worker 8] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.432-0400 m31101| 2015-07-09T14:17:24.432-0400 I COMMAND [repl writer worker 1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.462-0400 m31200| 2015-07-09T14:17:24.461-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.465-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.466-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.466-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.466-0400 jstests/concurrency/fsm_workloads/touch_no_data_no_index.js [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.466-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.466-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.466-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.467-0400 m31201| 2015-07-09T14:17:24.465-0400 I COMMAND [repl writer worker 1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.467-0400 m31202| 2015-07-09T14:17:24.466-0400 I COMMAND [repl writer worker 11] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.474-0400 m30999| 2015-07-09T14:17:24.474-0400 I SHARDING [conn1] distributed lock 'db74/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559ebab4ca4787b9985d1f09 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.478-0400 m30999| 2015-07-09T14:17:24.478-0400 I SHARDING [conn1] Placing [db74] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.478-0400 m30999| 2015-07-09T14:17:24.478-0400 I SHARDING [conn1] Enabling sharding for database [db74] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.533-0400 m30999| 2015-07-09T14:17:24.532-0400 I SHARDING [conn1] distributed lock 'db74/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.554-0400 m31100| 2015-07-09T14:17:24.553-0400 I INDEX [conn144] build index on: db74.coll74 properties: { v: 1, key: { tid: 1.0 }, name: "tid_1", ns: "db74.coll74" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.554-0400 m31100| 2015-07-09T14:17:24.553-0400 I INDEX [conn144] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.562-0400 m31100| 2015-07-09T14:17:24.562-0400 I INDEX [conn144] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.563-0400 m30999| 2015-07-09T14:17:24.563-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db74.coll74", key: { tid: 1.0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.568-0400 m30999| 2015-07-09T14:17:24.567-0400 I SHARDING [conn1] distributed lock 'db74.coll74/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559ebab4ca4787b9985d1f0a [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.570-0400 m30999| 2015-07-09T14:17:24.570-0400 I SHARDING [conn1] enable sharding on: db74.coll74 with shard key: { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.571-0400 m30999| 2015-07-09T14:17:24.570-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:24.570-0400-559ebab4ca4787b9985d1f0b", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465844570), what: "shardCollection.start", ns: "db74.coll74", details: { shardKey: { tid: 1.0 }, collection: "db74.coll74", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 1 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.576-0400 m31101| 2015-07-09T14:17:24.575-0400 I INDEX [repl writer worker 5] build index on: db74.coll74 properties: { v: 1, key: { tid: 1.0 }, name: "tid_1", ns: "db74.coll74" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.576-0400 m31101| 2015-07-09T14:17:24.575-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.579-0400 m31102| 2015-07-09T14:17:24.579-0400 I INDEX [repl writer worker 5] build index on: db74.coll74 properties: { v: 1, key: { tid: 1.0 }, name: "tid_1", ns: "db74.coll74" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.579-0400 m31102| 2015-07-09T14:17:24.579-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.581-0400 m31101| 2015-07-09T14:17:24.580-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.585-0400 m31102| 2015-07-09T14:17:24.584-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.624-0400 m30999| 2015-07-09T14:17:24.624-0400 I SHARDING [conn1] going to create 1 chunk(s) for: db74.coll74 using new epoch 559ebab4ca4787b9985d1f0c [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.679-0400 m30999| 2015-07-09T14:17:24.678-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db74.coll74: 0ms sequenceNumber: 320 version: 1|0||559ebab4ca4787b9985d1f0c based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.736-0400 m30999| 2015-07-09T14:17:24.735-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db74.coll74: 0ms sequenceNumber: 321 version: 1|0||559ebab4ca4787b9985d1f0c based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.738-0400 m31100| 2015-07-09T14:17:24.737-0400 I SHARDING [conn182] remotely refreshing metadata for db74.coll74 with requested shard version 1|0||559ebab4ca4787b9985d1f0c, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.739-0400 m31100| 2015-07-09T14:17:24.739-0400 I SHARDING [conn182] collection db74.coll74 was previously unsharded, new metadata loaded with shard version 1|0||559ebab4ca4787b9985d1f0c [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.739-0400 m31100| 2015-07-09T14:17:24.739-0400 I SHARDING [conn182] collection version was loaded at version 1|0||559ebab4ca4787b9985d1f0c, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.740-0400 m30999| 2015-07-09T14:17:24.739-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:24.739-0400-559ebab4ca4787b9985d1f0d", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465844739), what: "shardCollection", ns: "db74.coll74", details: { version: "1|0||559ebab4ca4787b9985d1f0c" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.793-0400 m30999| 2015-07-09T14:17:24.793-0400 I SHARDING [conn1] distributed lock 'db74.coll74/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.799-0400 m31100| 2015-07-09T14:17:24.799-0400 I INDEX [conn182] build index on: db74.coll74 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db74.coll74" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.800-0400 m31100| 2015-07-09T14:17:24.799-0400 I INDEX [conn182] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.805-0400 m31100| 2015-07-09T14:17:24.805-0400 I INDEX [conn182] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.814-0400 m31200| 2015-07-09T14:17:24.814-0400 I INDEX [conn35] build index on: db74.coll74 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db74.coll74" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.814-0400 m31200| 2015-07-09T14:17:24.814-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.828-0400 m31200| 2015-07-09T14:17:24.827-0400 I INDEX [conn35] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.828-0400 m31101| 2015-07-09T14:17:24.827-0400 I INDEX [repl writer worker 2] build index on: db74.coll74 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db74.coll74" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.829-0400 m31101| 2015-07-09T14:17:24.828-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.829-0400 m31102| 2015-07-09T14:17:24.828-0400 I INDEX [repl writer worker 3] build index on: db74.coll74 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db74.coll74" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.829-0400 m31102| 2015-07-09T14:17:24.828-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.829-0400 Using 10 threads (requested 10) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.871-0400 m31202| 2015-07-09T14:17:24.870-0400 I INDEX [repl writer worker 10] build index on: db74.coll74 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db74.coll74" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.871-0400 m31202| 2015-07-09T14:17:24.870-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.872-0400 m31102| 2015-07-09T14:17:24.871-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.876-0400 m31201| 2015-07-09T14:17:24.876-0400 I INDEX [repl writer worker 6] build index on: db74.coll74 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db74.coll74" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.877-0400 m31201| 2015-07-09T14:17:24.876-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.900-0400 m31201| 2015-07-09T14:17:24.899-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.903-0400 m31202| 2015-07-09T14:17:24.902-0400 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.943-0400 m31101| 2015-07-09T14:17:24.936-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.950-0400 m30998| 2015-07-09T14:17:24.948-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64084 #474 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.964-0400 m30999| 2015-07-09T14:17:24.963-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64085 #475 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.965-0400 m30999| 2015-07-09T14:17:24.965-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64086 #476 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.965-0400 m30998| 2015-07-09T14:17:24.965-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64087 #475 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.975-0400 m30998| 2015-07-09T14:17:24.975-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64088 #476 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.979-0400 m30998| 2015-07-09T14:17:24.979-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64089 #477 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.984-0400 m30999| 2015-07-09T14:17:24.984-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64091 #477 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.988-0400 m30998| 2015-07-09T14:17:24.984-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64090 #478 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.988-0400 m30999| 2015-07-09T14:17:24.987-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64092 #478 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:24.992-0400 m30999| 2015-07-09T14:17:24.991-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64093 #479 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.006-0400 setting random seed: 6601529698818 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.006-0400 setting random seed: 7167467246763 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.006-0400 setting random seed: 2978139822371 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.008-0400 setting random seed: 3596017900854 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.010-0400 setting random seed: 1909699714742 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.016-0400 setting random seed: 3622072446160 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.016-0400 setting random seed: 2688342840410 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.016-0400 setting random seed: 6220453823916 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.018-0400 setting random seed: 6171630956232 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.018-0400 setting random seed: 6323120621964 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.025-0400 m30998| 2015-07-09T14:17:25.024-0400 I SHARDING [conn478] ChunkManager: time to load chunks for db74.coll74: 0ms sequenceNumber: 91 version: 1|0||559ebab4ca4787b9985d1f0c based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.129-0400 m31100| 2015-07-09T14:17:25.129-0400 I COMMAND [conn144] command db74.$cmd command: insert { insert: "coll74", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559ebab4ca4787b9985d1f0c') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 106, w: 106 } }, Database: { acquireCount: { w: 106 } }, Collection: { acquireCount: { w: 6 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 111ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.130-0400 m31100| 2015-07-09T14:17:25.129-0400 I SHARDING [conn38] request split points lookup for chunk db74.coll74 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.131-0400 m31100| 2015-07-09T14:17:25.131-0400 W SHARDING [conn38] possible low cardinality key detected in db74.coll74 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.131-0400 m31100| 2015-07-09T14:17:25.131-0400 W SHARDING [conn38] possible low cardinality key detected in db74.coll74 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.132-0400 m31100| 2015-07-09T14:17:25.131-0400 W SHARDING [conn38] possible low cardinality key detected in db74.coll74 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.132-0400 m31100| 2015-07-09T14:17:25.131-0400 W SHARDING [conn38] possible low cardinality key detected in db74.coll74 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.132-0400 m31100| 2015-07-09T14:17:25.131-0400 W SHARDING [conn38] possible low cardinality key detected in db74.coll74 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.132-0400 m31100| 2015-07-09T14:17:25.131-0400 W SHARDING [conn38] possible low cardinality key detected in db74.coll74 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.132-0400 m31100| 2015-07-09T14:17:25.131-0400 W SHARDING [conn38] possible low cardinality key detected in db74.coll74 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.132-0400 m31100| 2015-07-09T14:17:25.131-0400 W SHARDING [conn38] possible low cardinality key detected in db74.coll74 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.132-0400 m31100| 2015-07-09T14:17:25.131-0400 W SHARDING [conn38] possible low cardinality key detected in db74.coll74 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.133-0400 m31100| 2015-07-09T14:17:25.131-0400 W SHARDING [conn38] possible low cardinality key detected in db74.coll74 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.133-0400 m31100| 2015-07-09T14:17:25.132-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db74.coll74", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebab4ca4787b9985d1f0c') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.135-0400 m31100| 2015-07-09T14:17:25.135-0400 I SHARDING [conn38] distributed lock 'db74.coll74/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559ebab5792e00bb67274ae6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.135-0400 m31100| 2015-07-09T14:17:25.135-0400 I SHARDING [conn38] remotely refreshing metadata for db74.coll74 based on current shard version 1|0||559ebab4ca4787b9985d1f0c, current metadata version is 1|0||559ebab4ca4787b9985d1f0c [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.137-0400 m31100| 2015-07-09T14:17:25.137-0400 I COMMAND [conn26] command db74.$cmd command: insert { insert: "coll74", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559ebab4ca4787b9985d1f0c') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 106, w: 106 } }, Database: { acquireCount: { w: 106 } }, Collection: { acquireCount: { w: 6 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 111ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.139-0400 m31100| 2015-07-09T14:17:25.138-0400 I COMMAND [conn145] command db74.$cmd command: insert { insert: "coll74", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559ebab4ca4787b9985d1f0c') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 106, w: 106 } }, Database: { acquireCount: { w: 106 } }, Collection: { acquireCount: { w: 6 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 113ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.141-0400 m31100| 2015-07-09T14:17:25.140-0400 I COMMAND [conn23] command db74.$cmd command: insert { insert: "coll74", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559ebab4ca4787b9985d1f0c') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 106, w: 106 } }, Database: { acquireCount: { w: 106 } }, Collection: { acquireCount: { w: 6 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 111ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.150-0400 m31100| 2015-07-09T14:17:25.148-0400 I COMMAND [conn25] command db74.$cmd command: insert { insert: "coll74", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559ebab4ca4787b9985d1f0c') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 105, w: 105 } }, Database: { acquireCount: { w: 105 } }, Collection: { acquireCount: { w: 5 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 102ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.151-0400 m31100| 2015-07-09T14:17:25.148-0400 I SHARDING [conn38] metadata of collection db74.coll74 already up to date (shard version : 1|0||559ebab4ca4787b9985d1f0c, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.151-0400 m31100| 2015-07-09T14:17:25.148-0400 I SHARDING [conn34] request split points lookup for chunk db74.coll74 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.151-0400 m31100| 2015-07-09T14:17:25.148-0400 I SHARDING [conn38] splitChunk accepted at version 1|0||559ebab4ca4787b9985d1f0c [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.151-0400 m31100| 2015-07-09T14:17:25.148-0400 I SHARDING [conn187] request split points lookup for chunk db74.coll74 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.151-0400 m31100| 2015-07-09T14:17:25.148-0400 I SHARDING [conn15] request split points lookup for chunk db74.coll74 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.151-0400 m31100| 2015-07-09T14:17:25.149-0400 W SHARDING [conn34] possible low cardinality key detected in db74.coll74 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.153-0400 m31100| 2015-07-09T14:17:25.150-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db74.coll74", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebab4ca4787b9985d1f0c') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.154-0400 m31100| 2015-07-09T14:17:25.153-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db74.coll74", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebab4ca4787b9985d1f0c') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.154-0400 m31100| 2015-07-09T14:17:25.153-0400 I SHARDING [conn187] received splitChunk request: { splitChunk: "db74.coll74", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 8.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebab4ca4787b9985d1f0c') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.159-0400 m31100| 2015-07-09T14:17:25.156-0400 I COMMAND [conn67] command db74.$cmd command: insert { insert: "coll74", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559ebab4ca4787b9985d1f0c') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 106, w: 106 } }, Database: { acquireCount: { w: 106 } }, Collection: { acquireCount: { w: 6 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 9173 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 122ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.160-0400 m31100| 2015-07-09T14:17:25.157-0400 I SHARDING [conn36] request split points lookup for chunk db74.coll74 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.161-0400 m31100| 2015-07-09T14:17:25.158-0400 W SHARDING [conn36] possible low cardinality key detected in db74.coll74 - key is { tid: 0.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.162-0400 m31100| 2015-07-09T14:17:25.158-0400 W SHARDING [conn36] possible low cardinality key detected in db74.coll74 - key is { tid: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.162-0400 m31100| 2015-07-09T14:17:25.158-0400 W SHARDING [conn36] possible low cardinality key detected in db74.coll74 - key is { tid: 2.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.162-0400 m31100| 2015-07-09T14:17:25.158-0400 W SHARDING [conn36] possible low cardinality key detected in db74.coll74 - key is { tid: 3.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.163-0400 m31100| 2015-07-09T14:17:25.158-0400 W SHARDING [conn36] possible low cardinality key detected in db74.coll74 - key is { tid: 4.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.163-0400 m31100| 2015-07-09T14:17:25.158-0400 W SHARDING [conn36] possible low cardinality key detected in db74.coll74 - key is { tid: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.163-0400 m31100| 2015-07-09T14:17:25.159-0400 W SHARDING [conn36] possible low cardinality key detected in db74.coll74 - key is { tid: 6.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.163-0400 m31100| 2015-07-09T14:17:25.159-0400 W SHARDING [conn36] possible low cardinality key detected in db74.coll74 - key is { tid: 7.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.163-0400 m31100| 2015-07-09T14:17:25.159-0400 W SHARDING [conn36] possible low cardinality key detected in db74.coll74 - key is { tid: 8.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.163-0400 m31100| 2015-07-09T14:17:25.159-0400 W SHARDING [conn36] possible low cardinality key detected in db74.coll74 - key is { tid: 9.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.164-0400 m30998| 2015-07-09T14:17:25.159-0400 I SHARDING [conn475] ChunkManager: time to load chunks for db74.coll74: 0ms sequenceNumber: 92 version: 1|10||559ebab4ca4787b9985d1f0c based on: 1|0||559ebab4ca4787b9985d1f0c [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.164-0400 m31100| 2015-07-09T14:17:25.160-0400 W SHARDING [conn34] could not acquire collection lock for db74.coll74 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db74.coll74 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.166-0400 m30999| 2015-07-09T14:17:25.160-0400 W SHARDING [conn479] splitChunk failed - cmd: { splitChunk: "db74.coll74", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebab4ca4787b9985d1f0c') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db74.coll74 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.166-0400 m31100| 2015-07-09T14:17:25.160-0400 W SHARDING [conn15] could not acquire collection lock for db74.coll74 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db74.coll74 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.166-0400 m30999| 2015-07-09T14:17:25.160-0400 W SHARDING [conn477] splitChunk failed - cmd: { splitChunk: "db74.coll74", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 4.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebab4ca4787b9985d1f0c') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db74.coll74 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.166-0400 m31100| 2015-07-09T14:17:25.160-0400 W SHARDING [conn187] could not acquire collection lock for db74.coll74 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db74.coll74 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.167-0400 m30999| 2015-07-09T14:17:25.161-0400 W SHARDING [conn476] splitChunk failed - cmd: { splitChunk: "db74.coll74", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 3.0 }, { tid: 5.0 }, { tid: 8.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebab4ca4787b9985d1f0c') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db74.coll74 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.168-0400 m31100| 2015-07-09T14:17:25.165-0400 I COMMAND [conn66] command db74.$cmd command: insert { insert: "coll74", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559ebab4ca4787b9985d1f0c') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 106, w: 106 } }, Database: { acquireCount: { w: 106 } }, Collection: { acquireCount: { w: 6 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 9168 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 127ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.168-0400 m30999| 2015-07-09T14:17:25.167-0400 I SHARDING [conn479] ChunkManager: time to load chunks for db74.coll74: 0ms sequenceNumber: 322 version: 1|10||559ebab4ca4787b9985d1f0c based on: 1|0||559ebab4ca4787b9985d1f0c [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.173-0400 m31100| 2015-07-09T14:17:25.171-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:25.171-0400-559ebab5792e00bb67274ae7", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465845171), what: "multi-split", ns: "db74.coll74", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 1, of: 10, chunk: { min: { tid: MinKey }, max: { tid: 0.0 }, lastmod: Timestamp 1000|1, lastmodEpoch: ObjectId('559ebab4ca4787b9985d1f0c') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.177-0400 m31100| 2015-07-09T14:17:25.172-0400 I SHARDING [conn36] received splitChunk request: { splitChunk: "db74.coll74", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebab4ca4787b9985d1f0c') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.178-0400 m30998| 2015-07-09T14:17:25.174-0400 W SHARDING [conn474] splitChunk failed - cmd: { splitChunk: "db74.coll74", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebab4ca4787b9985d1f0c') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db74.coll74 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.178-0400 m31100| 2015-07-09T14:17:25.174-0400 W SHARDING [conn36] could not acquire collection lock for db74.coll74 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db74.coll74 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.224-0400 m31100| 2015-07-09T14:17:25.224-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:25.224-0400-559ebab5792e00bb67274ae8", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465845224), what: "multi-split", ns: "db74.coll74", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 2, of: 10, chunk: { min: { tid: 0.0 }, max: { tid: 2.0 }, lastmod: Timestamp 1000|2, lastmodEpoch: ObjectId('559ebab4ca4787b9985d1f0c') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.264-0400 m31100| 2015-07-09T14:17:25.263-0400 I COMMAND [conn25] command db74.$cmd command: insert { insert: "coll74", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559ebab4ca4787b9985d1f0c') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 106, w: 106 } }, Database: { acquireCount: { w: 106 } }, Collection: { acquireCount: { w: 6 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 1329 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 106ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.267-0400 m31100| 2015-07-09T14:17:25.266-0400 I COMMAND [conn70] command db74.$cmd command: insert { insert: "coll74", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559ebab4ca4787b9985d1f0c') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 107, w: 107 } }, Database: { acquireCount: { w: 107 } }, Collection: { acquireCount: { w: 7 }, acquireWaitCount: { w: 2 }, timeAcquiringMicros: { w: 16110 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 139ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.270-0400 m31100| 2015-07-09T14:17:25.269-0400 I COMMAND [conn26] command db74.$cmd command: insert { insert: "coll74", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|0, ObjectId('559ebab4ca4787b9985d1f0c') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 106, w: 106 } }, Database: { acquireCount: { w: 106 } }, Collection: { acquireCount: { w: 6 }, acquireWaitCount: { w: 1 }, timeAcquiringMicros: { w: 1247 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 113ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.280-0400 m31100| 2015-07-09T14:17:25.279-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:25.279-0400-559ebab5792e00bb67274ae9", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465845279), what: "multi-split", ns: "db74.coll74", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 3, of: 10, chunk: { min: { tid: 2.0 }, max: { tid: 3.0 }, lastmod: Timestamp 1000|3, lastmodEpoch: ObjectId('559ebab4ca4787b9985d1f0c') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.288-0400 m31100| 2015-07-09T14:17:25.287-0400 I COMMAND [conn23] command db74.$cmd command: insert { insert: "coll74", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559ebab4ca4787b9985d1f0c') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 105, w: 105 } }, Database: { acquireCount: { w: 105 } }, Collection: { acquireCount: { w: 5 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 103ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.290-0400 m31100| 2015-07-09T14:17:25.289-0400 I COMMAND [conn68] command db74.$cmd command: insert { insert: "coll74", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559ebab4ca4787b9985d1f0c') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 105, w: 105 } }, Database: { acquireCount: { w: 105 } }, Collection: { acquireCount: { w: 5 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 101ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.294-0400 m31100| 2015-07-09T14:17:25.293-0400 I COMMAND [conn66] command db74.$cmd command: insert { insert: "coll74", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559ebab4ca4787b9985d1f0c') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 106, w: 106 } }, Database: { acquireCount: { w: 106 } }, Collection: { acquireCount: { w: 6 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 113ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.299-0400 m31100| 2015-07-09T14:17:25.298-0400 I COMMAND [conn145] command db74.$cmd command: insert { insert: "coll74", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559ebab4ca4787b9985d1f0c') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 106, w: 106 } }, Database: { acquireCount: { w: 106 } }, Collection: { acquireCount: { w: 6 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 111ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.300-0400 m31100| 2015-07-09T14:17:25.299-0400 I COMMAND [conn133] command db74.$cmd command: insert { insert: "coll74", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559ebab4ca4787b9985d1f0c') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 105, w: 105 } }, Database: { acquireCount: { w: 105 } }, Collection: { acquireCount: { w: 5 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 106ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.332-0400 m31100| 2015-07-09T14:17:25.331-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:25.331-0400-559ebab5792e00bb67274aea", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465845331), what: "multi-split", ns: "db74.coll74", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 4, of: 10, chunk: { min: { tid: 3.0 }, max: { tid: 4.0 }, lastmod: Timestamp 1000|4, lastmodEpoch: ObjectId('559ebab4ca4787b9985d1f0c') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.385-0400 m31100| 2015-07-09T14:17:25.384-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:25.384-0400-559ebab5792e00bb67274aeb", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465845384), what: "multi-split", ns: "db74.coll74", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 5, of: 10, chunk: { min: { tid: 4.0 }, max: { tid: 5.0 }, lastmod: Timestamp 1000|5, lastmodEpoch: ObjectId('559ebab4ca4787b9985d1f0c') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.387-0400 m31100| 2015-07-09T14:17:25.386-0400 I COMMAND [conn67] command db74.$cmd command: insert { insert: "coll74", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559ebab4ca4787b9985d1f0c') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 106, w: 106 } }, Database: { acquireCount: { w: 106 } }, Collection: { acquireCount: { w: 6 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 118ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.396-0400 m31100| 2015-07-09T14:17:25.395-0400 I COMMAND [conn26] command db74.$cmd command: insert { insert: "coll74", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559ebab4ca4787b9985d1f0c') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 106, w: 106 } }, Database: { acquireCount: { w: 106 } }, Collection: { acquireCount: { w: 6 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 109ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.399-0400 m31100| 2015-07-09T14:17:25.398-0400 I COMMAND [conn25] command db74.$cmd command: insert { insert: "coll74", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559ebab4ca4787b9985d1f0c') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 106, w: 106 } }, Database: { acquireCount: { w: 106 } }, Collection: { acquireCount: { w: 6 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 111ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.401-0400 m30998| 2015-07-09T14:17:25.401-0400 I NETWORK [conn474] end connection 127.0.0.1:64084 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.405-0400 m31100| 2015-07-09T14:17:25.405-0400 I COMMAND [conn68] command db74.$cmd command: insert { insert: "coll74", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559ebab4ca4787b9985d1f0c') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 106, w: 106 } }, Database: { acquireCount: { w: 106 } }, Collection: { acquireCount: { w: 6 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 105ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.409-0400 m31100| 2015-07-09T14:17:25.408-0400 I COMMAND [conn133] command db74.$cmd command: insert { insert: "coll74", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559ebab4ca4787b9985d1f0c') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 105, w: 105 } }, Database: { acquireCount: { w: 105 } }, Collection: { acquireCount: { w: 5 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 101ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.409-0400 m30999| 2015-07-09T14:17:25.409-0400 I NETWORK [conn477] end connection 127.0.0.1:64091 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.410-0400 m31100| 2015-07-09T14:17:25.410-0400 I COMMAND [conn23] command db74.$cmd command: insert { insert: "coll74", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559ebab4ca4787b9985d1f0c') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 105, w: 105 } }, Database: { acquireCount: { w: 105 } }, Collection: { acquireCount: { w: 5 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 105ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.417-0400 m30999| 2015-07-09T14:17:25.415-0400 I NETWORK [conn476] end connection 127.0.0.1:64086 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.433-0400 m31100| 2015-07-09T14:17:25.432-0400 I COMMAND [conn66] command db74.$cmd command: insert { insert: "coll74", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559ebab4ca4787b9985d1f0c') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 105, w: 105 } }, Database: { acquireCount: { w: 105 } }, Collection: { acquireCount: { w: 5 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 124ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.436-0400 m30998| 2015-07-09T14:17:25.436-0400 I NETWORK [conn476] end connection 127.0.0.1:64088 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.439-0400 m31100| 2015-07-09T14:17:25.438-0400 I COMMAND [conn145] command db74.$cmd command: insert { insert: "coll74", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559ebab4ca4787b9985d1f0c') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 106, w: 106 } }, Database: { acquireCount: { w: 106 } }, Collection: { acquireCount: { w: 6 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 136ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.441-0400 m31100| 2015-07-09T14:17:25.440-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:25.440-0400-559ebab5792e00bb67274aec", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465845440), what: "multi-split", ns: "db74.coll74", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 6, of: 10, chunk: { min: { tid: 5.0 }, max: { tid: 6.0 }, lastmod: Timestamp 1000|6, lastmodEpoch: ObjectId('559ebab4ca4787b9985d1f0c') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.442-0400 m30998| 2015-07-09T14:17:25.441-0400 I NETWORK [conn475] end connection 127.0.0.1:64087 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.493-0400 m31100| 2015-07-09T14:17:25.492-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:25.492-0400-559ebab5792e00bb67274aed", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465845492), what: "multi-split", ns: "db74.coll74", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 7, of: 10, chunk: { min: { tid: 6.0 }, max: { tid: 7.0 }, lastmod: Timestamp 1000|7, lastmodEpoch: ObjectId('559ebab4ca4787b9985d1f0c') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.495-0400 m31100| 2015-07-09T14:17:25.495-0400 I COMMAND [conn70] command db74.$cmd command: insert { insert: "coll74", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559ebab4ca4787b9985d1f0c') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 105, w: 105 } }, Database: { acquireCount: { w: 105 } }, Collection: { acquireCount: { w: 5 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 105ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.503-0400 m30999| 2015-07-09T14:17:25.503-0400 I NETWORK [conn475] end connection 127.0.0.1:64085 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.512-0400 m31100| 2015-07-09T14:17:25.511-0400 I COMMAND [conn25] command db74.$cmd command: insert { insert: "coll74", documents: 100, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 1000|10, ObjectId('559ebab4ca4787b9985d1f0c') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 105, w: 105 } }, Database: { acquireCount: { w: 105 } }, Collection: { acquireCount: { w: 5 } }, Metadata: { acquireCount: { w: 100 } }, oplog: { acquireCount: { w: 100 } } } protocol:op_command 110ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.528-0400 m30999| 2015-07-09T14:17:25.528-0400 I NETWORK [conn479] end connection 127.0.0.1:64093 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.545-0400 m31100| 2015-07-09T14:17:25.544-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:25.544-0400-559ebab5792e00bb67274aee", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465845544), what: "multi-split", ns: "db74.coll74", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 8, of: 10, chunk: { min: { tid: 7.0 }, max: { tid: 8.0 }, lastmod: Timestamp 1000|8, lastmodEpoch: ObjectId('559ebab4ca4787b9985d1f0c') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.554-0400 m30998| 2015-07-09T14:17:25.554-0400 I NETWORK [conn478] end connection 127.0.0.1:64090 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.586-0400 m30998| 2015-07-09T14:17:25.586-0400 I NETWORK [conn477] end connection 127.0.0.1:64089 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.598-0400 m31100| 2015-07-09T14:17:25.597-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:25.597-0400-559ebab5792e00bb67274aef", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465845597), what: "multi-split", ns: "db74.coll74", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 9, of: 10, chunk: { min: { tid: 8.0 }, max: { tid: 9.0 }, lastmod: Timestamp 1000|9, lastmodEpoch: ObjectId('559ebab4ca4787b9985d1f0c') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.651-0400 m31100| 2015-07-09T14:17:25.650-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:25.650-0400-559ebab5792e00bb67274af0", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465845650), what: "multi-split", ns: "db74.coll74", details: { before: { min: { tid: MinKey }, max: { tid: MaxKey } }, number: 10, of: 10, chunk: { min: { tid: 9.0 }, max: { tid: MaxKey }, lastmod: Timestamp 1000|10, lastmodEpoch: ObjectId('559ebab4ca4787b9985d1f0c') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.705-0400 m31100| 2015-07-09T14:17:25.705-0400 I SHARDING [conn38] distributed lock 'db74.coll74/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.706-0400 m31100| 2015-07-09T14:17:25.705-0400 I COMMAND [conn38] command db74.coll74 command: splitChunk { splitChunk: "db74.coll74", keyPattern: { tid: 1.0 }, min: { tid: MinKey }, max: { tid: MaxKey }, from: "test-rs0", splitKeys: [ { tid: 0.0 }, { tid: 2.0 }, { tid: 3.0 }, { tid: 4.0 }, { tid: 5.0 }, { tid: 6.0 }, { tid: 7.0 }, { tid: 8.0 }, { tid: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebab4ca4787b9985d1f0c') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 4, w: 2 } }, Database: { acquireCount: { r: 1, w: 2 } }, Collection: { acquireCount: { r: 1, W: 2 }, acquireWaitCount: { W: 2 }, timeAcquiringMicros: { W: 24499 } } } protocol:op_command 572ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.707-0400 m30999| 2015-07-09T14:17:25.707-0400 I SHARDING [conn478] autosplitted db74.coll74 shard: ns: db74.coll74, shard: test-rs0, lastmod: 1|0||000000000000000000000000, min: { tid: MinKey }, max: { tid: MaxKey } into 10 (splitThreshold 921) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.763-0400 m30999| 2015-07-09T14:17:25.763-0400 I NETWORK [conn478] end connection 127.0.0.1:64092 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.781-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.782-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.782-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.782-0400 jstests/concurrency/fsm_workloads/touch_no_data_no_index.js: Workload completed in 953 ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.782-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.782-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.782-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.782-0400 m30999| 2015-07-09T14:17:25.782-0400 I COMMAND [conn1] DROP: db74.coll74 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.782-0400 m30999| 2015-07-09T14:17:25.782-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:25.782-0400-559ebab5ca4787b9985d1f0e", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465845782), what: "dropCollection.start", ns: "db74.coll74", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.838-0400 m30999| 2015-07-09T14:17:25.838-0400 I SHARDING [conn1] distributed lock 'db74.coll74/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559ebab5ca4787b9985d1f0f [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.839-0400 m31100| 2015-07-09T14:17:25.839-0400 I COMMAND [conn38] CMD: drop db74.coll74 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.843-0400 m31200| 2015-07-09T14:17:25.842-0400 I COMMAND [conn18] CMD: drop db74.coll74 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.843-0400 m31101| 2015-07-09T14:17:25.843-0400 I COMMAND [repl writer worker 0] CMD: drop db74.coll74 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.844-0400 m31102| 2015-07-09T14:17:25.843-0400 I COMMAND [repl writer worker 9] CMD: drop db74.coll74 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.846-0400 m31202| 2015-07-09T14:17:25.846-0400 I COMMAND [repl writer worker 1] CMD: drop db74.coll74 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.847-0400 m31201| 2015-07-09T14:17:25.846-0400 I COMMAND [repl writer worker 7] CMD: drop db74.coll74 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.901-0400 m31100| 2015-07-09T14:17:25.900-0400 I SHARDING [conn38] remotely refreshing metadata for db74.coll74 with requested shard version 0|0||000000000000000000000000, current shard version is 1|10||559ebab4ca4787b9985d1f0c, current metadata version is 1|10||559ebab4ca4787b9985d1f0c [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.903-0400 m31100| 2015-07-09T14:17:25.902-0400 W SHARDING [conn38] no chunks found when reloading db74.coll74, previous version was 0|0||559ebab4ca4787b9985d1f0c, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.903-0400 m31100| 2015-07-09T14:17:25.902-0400 I SHARDING [conn38] dropping metadata for db74.coll74 at shard version 1|10||559ebab4ca4787b9985d1f0c, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.905-0400 m30999| 2015-07-09T14:17:25.905-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:25.905-0400-559ebab5ca4787b9985d1f10", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465845905), what: "dropCollection", ns: "db74.coll74", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:25.960-0400 m30999| 2015-07-09T14:17:25.959-0400 I SHARDING [conn1] distributed lock 'db74.coll74/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.015-0400 m30999| 2015-07-09T14:17:26.015-0400 I COMMAND [conn1] DROP DATABASE: db74 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.016-0400 m30999| 2015-07-09T14:17:26.015-0400 I SHARDING [conn1] DBConfig::dropDatabase: db74 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.016-0400 m30999| 2015-07-09T14:17:26.015-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:26.015-0400-559ebab6ca4787b9985d1f11", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465846015), what: "dropDatabase.start", ns: "db74", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.122-0400 m30999| 2015-07-09T14:17:26.122-0400 I SHARDING [conn1] DBConfig::dropDatabase: db74 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.122-0400 m31100| 2015-07-09T14:17:26.122-0400 I COMMAND [conn160] dropDatabase db74 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.122-0400 m31100| 2015-07-09T14:17:26.122-0400 I COMMAND [conn160] dropDatabase db74 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.123-0400 m30999| 2015-07-09T14:17:26.122-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:26.122-0400-559ebab6ca4787b9985d1f12", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465846122), what: "dropDatabase", ns: "db74", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.123-0400 m31101| 2015-07-09T14:17:26.123-0400 I COMMAND [repl writer worker 12] dropDatabase db74 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.123-0400 m31101| 2015-07-09T14:17:26.123-0400 I COMMAND [repl writer worker 12] dropDatabase db74 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.124-0400 m31102| 2015-07-09T14:17:26.123-0400 I COMMAND [repl writer worker 14] dropDatabase db74 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.124-0400 m31102| 2015-07-09T14:17:26.123-0400 I COMMAND [repl writer worker 14] dropDatabase db74 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.210-0400 m31100| 2015-07-09T14:17:26.210-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.213-0400 m31101| 2015-07-09T14:17:26.213-0400 I COMMAND [repl writer worker 1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.214-0400 m31102| 2015-07-09T14:17:26.214-0400 I COMMAND [repl writer worker 15] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.239-0400 m31200| 2015-07-09T14:17:26.239-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.241-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.241-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.241-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.241-0400 jstests/concurrency/fsm_workloads/update_array.js [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.241-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.241-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.241-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.243-0400 m31201| 2015-07-09T14:17:26.242-0400 I COMMAND [repl writer worker 5] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.243-0400 m31202| 2015-07-09T14:17:26.242-0400 I COMMAND [repl writer worker 2] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.246-0400 m30999| 2015-07-09T14:17:26.245-0400 I SHARDING [conn1] distributed lock 'db75/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559ebab6ca4787b9985d1f13 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.247-0400 m30999| 2015-07-09T14:17:26.247-0400 I SHARDING [conn1] Placing [db75] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.248-0400 m30999| 2015-07-09T14:17:26.247-0400 I SHARDING [conn1] Enabling sharding for database [db75] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.300-0400 m30999| 2015-07-09T14:17:26.300-0400 I SHARDING [conn1] distributed lock 'db75/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.322-0400 m31100| 2015-07-09T14:17:26.320-0400 I INDEX [conn145] build index on: db75.coll75 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db75.coll75" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.322-0400 m31100| 2015-07-09T14:17:26.320-0400 I INDEX [conn145] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.329-0400 m31100| 2015-07-09T14:17:26.329-0400 I INDEX [conn145] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.332-0400 m30999| 2015-07-09T14:17:26.332-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db75.coll75", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.335-0400 m30999| 2015-07-09T14:17:26.335-0400 I SHARDING [conn1] distributed lock 'db75.coll75/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559ebab6ca4787b9985d1f14 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.337-0400 m30999| 2015-07-09T14:17:26.336-0400 I SHARDING [conn1] enable sharding on: db75.coll75 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.338-0400 m30999| 2015-07-09T14:17:26.336-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:26.336-0400-559ebab6ca4787b9985d1f15", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465846336), what: "shardCollection.start", ns: "db75.coll75", details: { shardKey: { _id: "hashed" }, collection: "db75.coll75", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.346-0400 m31102| 2015-07-09T14:17:26.346-0400 I INDEX [repl writer worker 13] build index on: db75.coll75 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db75.coll75" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.347-0400 m31102| 2015-07-09T14:17:26.346-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.347-0400 m31101| 2015-07-09T14:17:26.346-0400 I INDEX [repl writer worker 4] build index on: db75.coll75 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db75.coll75" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.347-0400 m31101| 2015-07-09T14:17:26.346-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.354-0400 m31102| 2015-07-09T14:17:26.353-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.357-0400 m31101| 2015-07-09T14:17:26.356-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.390-0400 m30999| 2015-07-09T14:17:26.390-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db75.coll75 using new epoch 559ebab6ca4787b9985d1f16 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.498-0400 m30999| 2015-07-09T14:17:26.498-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db75.coll75: 0ms sequenceNumber: 323 version: 1|1||559ebab6ca4787b9985d1f16 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.555-0400 m30999| 2015-07-09T14:17:26.554-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db75.coll75: 0ms sequenceNumber: 324 version: 1|1||559ebab6ca4787b9985d1f16 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.557-0400 m31100| 2015-07-09T14:17:26.557-0400 I SHARDING [conn175] remotely refreshing metadata for db75.coll75 with requested shard version 1|1||559ebab6ca4787b9985d1f16, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.559-0400 m31100| 2015-07-09T14:17:26.558-0400 I SHARDING [conn175] collection db75.coll75 was previously unsharded, new metadata loaded with shard version 1|1||559ebab6ca4787b9985d1f16 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.559-0400 m31100| 2015-07-09T14:17:26.558-0400 I SHARDING [conn175] collection version was loaded at version 1|1||559ebab6ca4787b9985d1f16, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.559-0400 m30999| 2015-07-09T14:17:26.559-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:26.559-0400-559ebab6ca4787b9985d1f17", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465846559), what: "shardCollection", ns: "db75.coll75", details: { version: "1|1||559ebab6ca4787b9985d1f16" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.613-0400 m30999| 2015-07-09T14:17:26.613-0400 I SHARDING [conn1] distributed lock 'db75.coll75/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.614-0400 m30999| 2015-07-09T14:17:26.614-0400 I SHARDING [conn1] moving chunk ns: db75.coll75 moving ( ns: db75.coll75, shard: test-rs0, lastmod: 1|1||000000000000000000000000, min: { _id: 0 }, max: { _id: MaxKey }) test-rs0 -> test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.615-0400 m31100| 2015-07-09T14:17:26.614-0400 I SHARDING [conn38] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.616-0400 m31100| 2015-07-09T14:17:26.615-0400 I SHARDING [conn38] received moveChunk request: { moveChunk: "db75.coll75", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559ebab6ca4787b9985d1f16') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.620-0400 m31100| 2015-07-09T14:17:26.619-0400 I SHARDING [conn38] distributed lock 'db75.coll75/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559ebab6792e00bb67274af2 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.620-0400 m31100| 2015-07-09T14:17:26.619-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:26.619-0400-559ebab6792e00bb67274af3", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465846619), what: "moveChunk.start", ns: "db75.coll75", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.674-0400 m31100| 2015-07-09T14:17:26.673-0400 I SHARDING [conn38] remotely refreshing metadata for db75.coll75 based on current shard version 1|1||559ebab6ca4787b9985d1f16, current metadata version is 1|1||559ebab6ca4787b9985d1f16 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.675-0400 m31100| 2015-07-09T14:17:26.675-0400 I SHARDING [conn38] metadata of collection db75.coll75 already up to date (shard version : 1|1||559ebab6ca4787b9985d1f16, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.675-0400 m31100| 2015-07-09T14:17:26.675-0400 I SHARDING [conn38] moveChunk request accepted at version 1|1||559ebab6ca4787b9985d1f16 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.676-0400 m31100| 2015-07-09T14:17:26.675-0400 I SHARDING [conn38] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.676-0400 m31200| 2015-07-09T14:17:26.675-0400 I SHARDING [conn16] remotely refreshing metadata for db75.coll75, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.678-0400 m31200| 2015-07-09T14:17:26.678-0400 I SHARDING [conn16] collection db75.coll75 was previously unsharded, new metadata loaded with shard version 0|0||559ebab6ca4787b9985d1f16 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.678-0400 m31200| 2015-07-09T14:17:26.678-0400 I SHARDING [conn16] collection version was loaded at version 1|1||559ebab6ca4787b9985d1f16, took 2ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.679-0400 m31200| 2015-07-09T14:17:26.678-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: 0 } -> { _id: MaxKey } for collection db75.coll75 from test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 at epoch 559ebab6ca4787b9985d1f16 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.681-0400 m31100| 2015-07-09T14:17:26.681-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db75.coll75", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.685-0400 m31100| 2015-07-09T14:17:26.684-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db75.coll75", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.690-0400 m31100| 2015-07-09T14:17:26.690-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db75.coll75", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.692-0400 m31200| 2015-07-09T14:17:26.692-0400 I INDEX [migrateThread] build index on: db75.coll75 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db75.coll75" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.692-0400 m31200| 2015-07-09T14:17:26.692-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.700-0400 m31100| 2015-07-09T14:17:26.700-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db75.coll75", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.701-0400 m31200| 2015-07-09T14:17:26.700-0400 I INDEX [migrateThread] build index on: db75.coll75 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db75.coll75" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.701-0400 m31200| 2015-07-09T14:17:26.700-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.714-0400 m31200| 2015-07-09T14:17:26.713-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.714-0400 m31200| 2015-07-09T14:17:26.714-0400 I SHARDING [migrateThread] Deleter starting delete for: db75.coll75 from { _id: 0 } -> { _id: MaxKey }, with opId: 101569 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.714-0400 m31200| 2015-07-09T14:17:26.714-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db75.coll75 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.717-0400 m31100| 2015-07-09T14:17:26.717-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db75.coll75", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.721-0400 m31201| 2015-07-09T14:17:26.720-0400 I INDEX [repl writer worker 11] build index on: db75.coll75 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db75.coll75" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.721-0400 m31201| 2015-07-09T14:17:26.720-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.725-0400 m31202| 2015-07-09T14:17:26.725-0400 I INDEX [repl writer worker 13] build index on: db75.coll75 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db75.coll75" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.725-0400 m31202| 2015-07-09T14:17:26.725-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.726-0400 m31201| 2015-07-09T14:17:26.725-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.727-0400 m31200| 2015-07-09T14:17:26.727-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.728-0400 m31200| 2015-07-09T14:17:26.727-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db75.coll75' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.730-0400 m31202| 2015-07-09T14:17:26.730-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.752-0400 m31100| 2015-07-09T14:17:26.751-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db75.coll75", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.752-0400 m31100| 2015-07-09T14:17:26.751-0400 I SHARDING [conn38] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.752-0400 m31100| 2015-07-09T14:17:26.752-0400 I SHARDING [conn38] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.753-0400 m31100| 2015-07-09T14:17:26.752-0400 I SHARDING [conn38] moveChunk setting version to: 2|0||559ebab6ca4787b9985d1f16 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.763-0400 m31200| 2015-07-09T14:17:26.763-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db75.coll75' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.764-0400 m31200| 2015-07-09T14:17:26.763-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:26.763-0400-559ebab6d5a107a5b9c0db79", server: "bs-osx108-8", clientAddr: "", time: new Date(1436465846763), what: "moveChunk.to", ns: "db75.coll75", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 5: 35, step 2 of 5: 11, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 35, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.818-0400 m31100| 2015-07-09T14:17:26.817-0400 I SHARDING [conn38] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db75.coll75", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.818-0400 m31100| 2015-07-09T14:17:26.817-0400 I SHARDING [conn38] moveChunk updating self version to: 2|1||559ebab6ca4787b9985d1f16 through { _id: MinKey } -> { _id: 0 } for collection 'db75.coll75' [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.820-0400 m31100| 2015-07-09T14:17:26.819-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:26.819-0400-559ebab6792e00bb67274af4", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465846819), what: "moveChunk.commit", ns: "db75.coll75", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.874-0400 m31100| 2015-07-09T14:17:26.873-0400 I SHARDING [conn38] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.874-0400 m31100| 2015-07-09T14:17:26.873-0400 I SHARDING [conn38] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.874-0400 m31100| 2015-07-09T14:17:26.873-0400 I SHARDING [conn38] Deleter starting delete for: db75.coll75 from { _id: 0 } -> { _id: MaxKey }, with opId: 240193 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.874-0400 m31100| 2015-07-09T14:17:26.873-0400 I SHARDING [conn38] rangeDeleter deleted 0 documents for db75.coll75 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.874-0400 m31100| 2015-07-09T14:17:26.873-0400 I SHARDING [conn38] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.875-0400 m31100| 2015-07-09T14:17:26.875-0400 I SHARDING [conn38] distributed lock 'db75.coll75/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.876-0400 m31100| 2015-07-09T14:17:26.875-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:26.875-0400-559ebab6792e00bb67274af5", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465846875), what: "moveChunk.from", ns: "db75.coll75", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 6: 0, step 2 of 6: 59, step 3 of 6: 4, step 4 of 6: 72, step 5 of 6: 121, step 6 of 6: 0, to: "test-rs1", from: "test-rs0", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.929-0400 m31100| 2015-07-09T14:17:26.928-0400 I COMMAND [conn38] command db75.coll75 command: moveChunk { moveChunk: "db75.coll75", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559ebab6ca4787b9985d1f16') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 313ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.931-0400 m30999| 2015-07-09T14:17:26.931-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db75.coll75: 0ms sequenceNumber: 325 version: 2|1||559ebab6ca4787b9985d1f16 based on: 1|1||559ebab6ca4787b9985d1f16 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.932-0400 m31100| 2015-07-09T14:17:26.932-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db75.coll75", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebab6ca4787b9985d1f16') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.937-0400 m31100| 2015-07-09T14:17:26.937-0400 I SHARDING [conn38] distributed lock 'db75.coll75/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559ebab6792e00bb67274af6 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.937-0400 m31100| 2015-07-09T14:17:26.937-0400 I SHARDING [conn38] remotely refreshing metadata for db75.coll75 based on current shard version 2|0||559ebab6ca4787b9985d1f16, current metadata version is 2|0||559ebab6ca4787b9985d1f16 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.939-0400 m31100| 2015-07-09T14:17:26.938-0400 I SHARDING [conn38] updating metadata for db75.coll75 from shard version 2|0||559ebab6ca4787b9985d1f16 to shard version 2|1||559ebab6ca4787b9985d1f16 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.939-0400 m31100| 2015-07-09T14:17:26.938-0400 I SHARDING [conn38] collection version was loaded at version 2|1||559ebab6ca4787b9985d1f16, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.939-0400 m31100| 2015-07-09T14:17:26.938-0400 I SHARDING [conn38] splitChunk accepted at version 2|1||559ebab6ca4787b9985d1f16 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.940-0400 m31100| 2015-07-09T14:17:26.940-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:26.940-0400-559ebab6792e00bb67274af7", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465846940), what: "split", ns: "db75.coll75", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559ebab6ca4787b9985d1f16') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559ebab6ca4787b9985d1f16') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.996-0400 m31100| 2015-07-09T14:17:26.996-0400 I SHARDING [conn38] distributed lock 'db75.coll75/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:26.999-0400 m30999| 2015-07-09T14:17:26.998-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db75.coll75: 0ms sequenceNumber: 326 version: 2|3||559ebab6ca4787b9985d1f16 based on: 2|1||559ebab6ca4787b9985d1f16 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.000-0400 m31200| 2015-07-09T14:17:26.999-0400 I SHARDING [conn18] received splitChunk request: { splitChunk: "db75.coll75", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebab6ca4787b9985d1f16') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.004-0400 m31200| 2015-07-09T14:17:27.004-0400 I SHARDING [conn18] distributed lock 'db75.coll75/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559ebab7d5a107a5b9c0db7a [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.004-0400 m31200| 2015-07-09T14:17:27.004-0400 I SHARDING [conn18] remotely refreshing metadata for db75.coll75 based on current shard version 0|0||559ebab6ca4787b9985d1f16, current metadata version is 1|1||559ebab6ca4787b9985d1f16 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.006-0400 m31200| 2015-07-09T14:17:27.005-0400 I SHARDING [conn18] updating metadata for db75.coll75 from shard version 0|0||559ebab6ca4787b9985d1f16 to shard version 2|0||559ebab6ca4787b9985d1f16 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.006-0400 m31200| 2015-07-09T14:17:27.006-0400 I SHARDING [conn18] collection version was loaded at version 2|3||559ebab6ca4787b9985d1f16, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.006-0400 m31200| 2015-07-09T14:17:27.006-0400 I SHARDING [conn18] splitChunk accepted at version 2|0||559ebab6ca4787b9985d1f16 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.007-0400 m31200| 2015-07-09T14:17:27.006-0400 I SHARDING [conn18] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:27.006-0400-559ebab7d5a107a5b9c0db7b", server: "bs-osx108-8", clientAddr: "127.0.0.1:62585", time: new Date(1436465847006), what: "split", ns: "db75.coll75", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559ebab6ca4787b9985d1f16') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559ebab6ca4787b9985d1f16') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.061-0400 m31200| 2015-07-09T14:17:27.060-0400 I SHARDING [conn18] distributed lock 'db75.coll75/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.063-0400 m30999| 2015-07-09T14:17:27.062-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db75.coll75: 0ms sequenceNumber: 327 version: 2|5||559ebab6ca4787b9985d1f16 based on: 2|3||559ebab6ca4787b9985d1f16 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.071-0400 m31100| 2015-07-09T14:17:27.070-0400 I INDEX [conn175] build index on: db75.coll75 properties: { v: 1, key: { arr: 1.0 }, name: "arr_1", ns: "db75.coll75" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.072-0400 m31100| 2015-07-09T14:17:27.071-0400 I INDEX [conn175] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.072-0400 m31200| 2015-07-09T14:17:27.072-0400 I INDEX [conn35] build index on: db75.coll75 properties: { v: 1, key: { arr: 1.0 }, name: "arr_1", ns: "db75.coll75" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.073-0400 m31200| 2015-07-09T14:17:27.072-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.077-0400 m31100| 2015-07-09T14:17:27.077-0400 I INDEX [conn175] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.081-0400 m31200| 2015-07-09T14:17:27.081-0400 I INDEX [conn35] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.083-0400 m31102| 2015-07-09T14:17:27.083-0400 I INDEX [repl writer worker 8] build index on: db75.coll75 properties: { v: 1, key: { arr: 1.0 }, name: "arr_1", ns: "db75.coll75" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.084-0400 m31102| 2015-07-09T14:17:27.083-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.084-0400 m31101| 2015-07-09T14:17:27.084-0400 I INDEX [repl writer worker 15] build index on: db75.coll75 properties: { v: 1, key: { arr: 1.0 }, name: "arr_1", ns: "db75.coll75" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.084-0400 m31101| 2015-07-09T14:17:27.084-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.097-0400 m31101| 2015-07-09T14:17:27.097-0400 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.098-0400 m31102| 2015-07-09T14:17:27.097-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.098-0400 m31202| 2015-07-09T14:17:27.097-0400 I INDEX [repl writer worker 0] build index on: db75.coll75 properties: { v: 1, key: { arr: 1.0 }, name: "arr_1", ns: "db75.coll75" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.099-0400 m31202| 2015-07-09T14:17:27.097-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.103-0400 m31201| 2015-07-09T14:17:27.102-0400 I INDEX [repl writer worker 12] build index on: db75.coll75 properties: { v: 1, key: { arr: 1.0 }, name: "arr_1", ns: "db75.coll75" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.103-0400 m31201| 2015-07-09T14:17:27.102-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.105-0400 m31202| 2015-07-09T14:17:27.105-0400 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.110-0400 m31201| 2015-07-09T14:17:27.110-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.118-0400 Using 5 threads (requested 5) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.183-0400 m30998| 2015-07-09T14:17:27.182-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64094 #479 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.192-0400 m30998| 2015-07-09T14:17:27.184-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64095 #480 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.199-0400 m30998| 2015-07-09T14:17:27.198-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64096 #481 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.204-0400 m30999| 2015-07-09T14:17:27.204-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64097 #480 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.205-0400 m30999| 2015-07-09T14:17:27.204-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64098 #481 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.210-0400 setting random seed: 4723155996762 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.210-0400 setting random seed: 2047786652110 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.211-0400 setting random seed: 2425301824696 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.211-0400 setting random seed: 7150278044864 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.211-0400 setting random seed: 8272887798957 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.222-0400 m30998| 2015-07-09T14:17:27.215-0400 I SHARDING [conn480] ChunkManager: time to load chunks for db75.coll75: 0ms sequenceNumber: 93 version: 2|5||559ebab6ca4787b9985d1f16 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.258-0400 m30998| 2015-07-09T14:17:27.258-0400 I NETWORK [conn480] end connection 127.0.0.1:64095 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.266-0400 m30999| 2015-07-09T14:17:27.265-0400 I NETWORK [conn481] end connection 127.0.0.1:64098 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.280-0400 m30999| 2015-07-09T14:17:27.279-0400 I NETWORK [conn480] end connection 127.0.0.1:64097 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.296-0400 m30998| 2015-07-09T14:17:27.296-0400 I NETWORK [conn481] end connection 127.0.0.1:64096 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.297-0400 m30998| 2015-07-09T14:17:27.297-0400 I NETWORK [conn479] end connection 127.0.0.1:64094 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.329-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.329-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.329-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.329-0400 jstests/concurrency/fsm_workloads/update_array.js: Workload completed in 212 ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.330-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.330-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.330-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.330-0400 m30999| 2015-07-09T14:17:27.330-0400 I COMMAND [conn1] DROP: db75.coll75 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.330-0400 m30999| 2015-07-09T14:17:27.330-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:27.330-0400-559ebab7ca4787b9985d1f18", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465847330), what: "dropCollection.start", ns: "db75.coll75", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.387-0400 m30999| 2015-07-09T14:17:27.386-0400 I SHARDING [conn1] distributed lock 'db75.coll75/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559ebab7ca4787b9985d1f19 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.388-0400 m31100| 2015-07-09T14:17:27.388-0400 I COMMAND [conn38] CMD: drop db75.coll75 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.390-0400 m31200| 2015-07-09T14:17:27.390-0400 I COMMAND [conn18] CMD: drop db75.coll75 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.392-0400 m31102| 2015-07-09T14:17:27.392-0400 I COMMAND [repl writer worker 8] CMD: drop db75.coll75 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.393-0400 m31101| 2015-07-09T14:17:27.392-0400 I COMMAND [repl writer worker 5] CMD: drop db75.coll75 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.394-0400 m31201| 2015-07-09T14:17:27.394-0400 I COMMAND [repl writer worker 4] CMD: drop db75.coll75 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.394-0400 m31202| 2015-07-09T14:17:27.394-0400 I COMMAND [repl writer worker 14] CMD: drop db75.coll75 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.447-0400 m31100| 2015-07-09T14:17:27.446-0400 I SHARDING [conn38] remotely refreshing metadata for db75.coll75 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559ebab6ca4787b9985d1f16, current metadata version is 2|3||559ebab6ca4787b9985d1f16 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.449-0400 m31100| 2015-07-09T14:17:27.448-0400 W SHARDING [conn38] no chunks found when reloading db75.coll75, previous version was 0|0||559ebab6ca4787b9985d1f16, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.449-0400 m31100| 2015-07-09T14:17:27.448-0400 I SHARDING [conn38] dropping metadata for db75.coll75 at shard version 2|3||559ebab6ca4787b9985d1f16, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.450-0400 m31200| 2015-07-09T14:17:27.450-0400 I SHARDING [conn18] remotely refreshing metadata for db75.coll75 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559ebab6ca4787b9985d1f16, current metadata version is 2|5||559ebab6ca4787b9985d1f16 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.452-0400 m31200| 2015-07-09T14:17:27.451-0400 W SHARDING [conn18] no chunks found when reloading db75.coll75, previous version was 0|0||559ebab6ca4787b9985d1f16, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.452-0400 m31200| 2015-07-09T14:17:27.452-0400 I SHARDING [conn18] dropping metadata for db75.coll75 at shard version 2|5||559ebab6ca4787b9985d1f16, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.453-0400 m30999| 2015-07-09T14:17:27.453-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:27.453-0400-559ebab7ca4787b9985d1f1a", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465847453), what: "dropCollection", ns: "db75.coll75", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.508-0400 m30999| 2015-07-09T14:17:27.507-0400 I SHARDING [conn1] distributed lock 'db75.coll75/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.564-0400 m30999| 2015-07-09T14:17:27.563-0400 I COMMAND [conn1] DROP DATABASE: db75 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.564-0400 m30999| 2015-07-09T14:17:27.563-0400 I SHARDING [conn1] DBConfig::dropDatabase: db75 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.564-0400 m30999| 2015-07-09T14:17:27.563-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:27.563-0400-559ebab7ca4787b9985d1f1b", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465847563), what: "dropDatabase.start", ns: "db75", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.669-0400 m30999| 2015-07-09T14:17:27.669-0400 I SHARDING [conn1] DBConfig::dropDatabase: db75 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.669-0400 m31100| 2015-07-09T14:17:27.669-0400 I COMMAND [conn160] dropDatabase db75 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.670-0400 m31100| 2015-07-09T14:17:27.669-0400 I COMMAND [conn160] dropDatabase db75 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.671-0400 m30999| 2015-07-09T14:17:27.670-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:27.670-0400-559ebab7ca4787b9985d1f1c", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465847670), what: "dropDatabase", ns: "db75", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.671-0400 m31101| 2015-07-09T14:17:27.671-0400 I COMMAND [repl writer worker 10] dropDatabase db75 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.671-0400 m31101| 2015-07-09T14:17:27.671-0400 I COMMAND [repl writer worker 10] dropDatabase db75 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.671-0400 m31102| 2015-07-09T14:17:27.671-0400 I COMMAND [repl writer worker 3] dropDatabase db75 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.672-0400 m31102| 2015-07-09T14:17:27.671-0400 I COMMAND [repl writer worker 3] dropDatabase db75 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.754-0400 m31100| 2015-07-09T14:17:27.753-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.757-0400 m31101| 2015-07-09T14:17:27.757-0400 I COMMAND [repl writer worker 3] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.757-0400 m31102| 2015-07-09T14:17:27.757-0400 I COMMAND [repl writer worker 10] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.795-0400 m31200| 2015-07-09T14:17:27.795-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.798-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.798-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.798-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.798-0400 jstests/concurrency/fsm_workloads/update_multifield_multiupdate.js [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.799-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.799-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.799-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.799-0400 m31202| 2015-07-09T14:17:27.799-0400 I COMMAND [repl writer worker 5] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.799-0400 m31201| 2015-07-09T14:17:27.799-0400 I COMMAND [repl writer worker 10] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.805-0400 m30999| 2015-07-09T14:17:27.805-0400 I SHARDING [conn1] distributed lock 'db76/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559ebab7ca4787b9985d1f1d [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.809-0400 m30999| 2015-07-09T14:17:27.808-0400 I SHARDING [conn1] Placing [db76] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.809-0400 m30999| 2015-07-09T14:17:27.808-0400 I SHARDING [conn1] Enabling sharding for database [db76] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.862-0400 m30999| 2015-07-09T14:17:27.862-0400 I SHARDING [conn1] distributed lock 'db76/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.887-0400 m31100| 2015-07-09T14:17:27.886-0400 I INDEX [conn70] build index on: db76.coll76 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db76.coll76" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.887-0400 m31100| 2015-07-09T14:17:27.886-0400 I INDEX [conn70] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.894-0400 m31100| 2015-07-09T14:17:27.893-0400 I INDEX [conn70] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.895-0400 m30999| 2015-07-09T14:17:27.895-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db76.coll76", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.898-0400 m30999| 2015-07-09T14:17:27.898-0400 I SHARDING [conn1] distributed lock 'db76.coll76/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559ebab7ca4787b9985d1f1e [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.900-0400 m30999| 2015-07-09T14:17:27.899-0400 I SHARDING [conn1] enable sharding on: db76.coll76 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.901-0400 m30999| 2015-07-09T14:17:27.900-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:27.899-0400-559ebab7ca4787b9985d1f1f", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465847900), what: "shardCollection.start", ns: "db76.coll76", details: { shardKey: { _id: "hashed" }, collection: "db76.coll76", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.903-0400 m31102| 2015-07-09T14:17:27.903-0400 I INDEX [repl writer worker 1] build index on: db76.coll76 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db76.coll76" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.903-0400 m31102| 2015-07-09T14:17:27.903-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.904-0400 m31101| 2015-07-09T14:17:27.903-0400 I INDEX [repl writer worker 14] build index on: db76.coll76 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db76.coll76" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.904-0400 m31101| 2015-07-09T14:17:27.903-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.909-0400 m31101| 2015-07-09T14:17:27.908-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.909-0400 m31102| 2015-07-09T14:17:27.908-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:27.953-0400 m30999| 2015-07-09T14:17:27.953-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db76.coll76 using new epoch 559ebab7ca4787b9985d1f20 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.061-0400 m30999| 2015-07-09T14:17:28.060-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db76.coll76: 0ms sequenceNumber: 328 version: 1|1||559ebab7ca4787b9985d1f20 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.117-0400 m30999| 2015-07-09T14:17:28.117-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db76.coll76: 1ms sequenceNumber: 329 version: 1|1||559ebab7ca4787b9985d1f20 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.119-0400 m31100| 2015-07-09T14:17:28.118-0400 I SHARDING [conn175] remotely refreshing metadata for db76.coll76 with requested shard version 1|1||559ebab7ca4787b9985d1f20, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.121-0400 m31100| 2015-07-09T14:17:28.120-0400 I SHARDING [conn175] collection db76.coll76 was previously unsharded, new metadata loaded with shard version 1|1||559ebab7ca4787b9985d1f20 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.121-0400 m31100| 2015-07-09T14:17:28.120-0400 I SHARDING [conn175] collection version was loaded at version 1|1||559ebab7ca4787b9985d1f20, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.121-0400 m30999| 2015-07-09T14:17:28.121-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:28.121-0400-559ebab8ca4787b9985d1f21", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465848121), what: "shardCollection", ns: "db76.coll76", details: { version: "1|1||559ebab7ca4787b9985d1f20" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.176-0400 m30999| 2015-07-09T14:17:28.175-0400 I SHARDING [conn1] distributed lock 'db76.coll76/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.177-0400 m30999| 2015-07-09T14:17:28.176-0400 I SHARDING [conn1] moving chunk ns: db76.coll76 moving ( ns: db76.coll76, shard: test-rs0, lastmod: 1|1||000000000000000000000000, min: { _id: 0 }, max: { _id: MaxKey }) test-rs0 -> test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.177-0400 m31100| 2015-07-09T14:17:28.177-0400 I SHARDING [conn38] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.177-0400 m31100| 2015-07-09T14:17:28.177-0400 I SHARDING [conn38] received moveChunk request: { moveChunk: "db76.coll76", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559ebab7ca4787b9985d1f20') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.181-0400 m31100| 2015-07-09T14:17:28.181-0400 I SHARDING [conn38] distributed lock 'db76.coll76/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559ebab8792e00bb67274af9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.182-0400 m31100| 2015-07-09T14:17:28.181-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:28.181-0400-559ebab8792e00bb67274afa", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465848181), what: "moveChunk.start", ns: "db76.coll76", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.235-0400 m31100| 2015-07-09T14:17:28.235-0400 I SHARDING [conn38] remotely refreshing metadata for db76.coll76 based on current shard version 1|1||559ebab7ca4787b9985d1f20, current metadata version is 1|1||559ebab7ca4787b9985d1f20 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.237-0400 m31100| 2015-07-09T14:17:28.237-0400 I SHARDING [conn38] metadata of collection db76.coll76 already up to date (shard version : 1|1||559ebab7ca4787b9985d1f20, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.237-0400 m31100| 2015-07-09T14:17:28.237-0400 I SHARDING [conn38] moveChunk request accepted at version 1|1||559ebab7ca4787b9985d1f20 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.238-0400 m31100| 2015-07-09T14:17:28.237-0400 I SHARDING [conn38] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.238-0400 m31200| 2015-07-09T14:17:28.238-0400 I SHARDING [conn16] remotely refreshing metadata for db76.coll76, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.239-0400 m31200| 2015-07-09T14:17:28.239-0400 I SHARDING [conn16] collection db76.coll76 was previously unsharded, new metadata loaded with shard version 0|0||559ebab7ca4787b9985d1f20 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.240-0400 m31200| 2015-07-09T14:17:28.239-0400 I SHARDING [conn16] collection version was loaded at version 1|1||559ebab7ca4787b9985d1f20, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.240-0400 m31200| 2015-07-09T14:17:28.239-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: 0 } -> { _id: MaxKey } for collection db76.coll76 from test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 at epoch 559ebab7ca4787b9985d1f20 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.242-0400 m31100| 2015-07-09T14:17:28.242-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db76.coll76", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.246-0400 m31100| 2015-07-09T14:17:28.245-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db76.coll76", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.251-0400 m31100| 2015-07-09T14:17:28.251-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db76.coll76", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.261-0400 m31100| 2015-07-09T14:17:28.260-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db76.coll76", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.262-0400 m31200| 2015-07-09T14:17:28.262-0400 I INDEX [migrateThread] build index on: db76.coll76 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db76.coll76" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.262-0400 m31200| 2015-07-09T14:17:28.262-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.277-0400 m31200| 2015-07-09T14:17:28.276-0400 I INDEX [migrateThread] build index on: db76.coll76 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db76.coll76" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.277-0400 m31200| 2015-07-09T14:17:28.276-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.278-0400 m31100| 2015-07-09T14:17:28.277-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db76.coll76", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.289-0400 m31200| 2015-07-09T14:17:28.288-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.289-0400 m31200| 2015-07-09T14:17:28.289-0400 I SHARDING [migrateThread] Deleter starting delete for: db76.coll76 from { _id: 0 } -> { _id: MaxKey }, with opId: 101789 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.290-0400 m31200| 2015-07-09T14:17:28.289-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db76.coll76 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.299-0400 m31201| 2015-07-09T14:17:28.299-0400 I INDEX [repl writer worker 9] build index on: db76.coll76 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db76.coll76" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.300-0400 m31202| 2015-07-09T14:17:28.299-0400 I INDEX [repl writer worker 8] build index on: db76.coll76 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db76.coll76" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.300-0400 m31201| 2015-07-09T14:17:28.299-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.300-0400 m31202| 2015-07-09T14:17:28.299-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.306-0400 m31201| 2015-07-09T14:17:28.305-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.306-0400 m31202| 2015-07-09T14:17:28.305-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.307-0400 m31200| 2015-07-09T14:17:28.307-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.307-0400 m31200| 2015-07-09T14:17:28.307-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db76.coll76' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.311-0400 m31100| 2015-07-09T14:17:28.311-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db76.coll76", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.311-0400 m31100| 2015-07-09T14:17:28.311-0400 I SHARDING [conn38] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.312-0400 m31100| 2015-07-09T14:17:28.312-0400 I SHARDING [conn38] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.312-0400 m31100| 2015-07-09T14:17:28.312-0400 I SHARDING [conn38] moveChunk setting version to: 2|0||559ebab7ca4787b9985d1f20 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.319-0400 m31200| 2015-07-09T14:17:28.318-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db76.coll76' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.320-0400 m31200| 2015-07-09T14:17:28.319-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:28.319-0400-559ebab8d5a107a5b9c0db7c", server: "bs-osx108-8", clientAddr: "", time: new Date(1436465848319), what: "moveChunk.to", ns: "db76.coll76", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 5: 49, step 2 of 5: 16, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 11, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.373-0400 m31100| 2015-07-09T14:17:28.372-0400 I SHARDING [conn38] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db76.coll76", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.373-0400 m31100| 2015-07-09T14:17:28.373-0400 I SHARDING [conn38] moveChunk updating self version to: 2|1||559ebab7ca4787b9985d1f20 through { _id: MinKey } -> { _id: 0 } for collection 'db76.coll76' [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.375-0400 m31100| 2015-07-09T14:17:28.374-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:28.374-0400-559ebab8792e00bb67274afb", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465848374), what: "moveChunk.commit", ns: "db76.coll76", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.429-0400 m31100| 2015-07-09T14:17:28.429-0400 I SHARDING [conn38] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.429-0400 m31100| 2015-07-09T14:17:28.429-0400 I SHARDING [conn38] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.430-0400 m31100| 2015-07-09T14:17:28.429-0400 I SHARDING [conn38] Deleter starting delete for: db76.coll76 from { _id: 0 } -> { _id: MaxKey }, with opId: 240342 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.430-0400 m31100| 2015-07-09T14:17:28.429-0400 I SHARDING [conn38] rangeDeleter deleted 0 documents for db76.coll76 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.430-0400 m31100| 2015-07-09T14:17:28.429-0400 I SHARDING [conn38] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.430-0400 m31100| 2015-07-09T14:17:28.430-0400 I SHARDING [conn38] distributed lock 'db76.coll76/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.431-0400 m31100| 2015-07-09T14:17:28.430-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:28.430-0400-559ebab8792e00bb67274afc", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465848430), what: "moveChunk.from", ns: "db76.coll76", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 6: 0, step 2 of 6: 59, step 3 of 6: 2, step 4 of 6: 71, step 5 of 6: 117, step 6 of 6: 0, to: "test-rs1", from: "test-rs0", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.483-0400 m31100| 2015-07-09T14:17:28.482-0400 I COMMAND [conn38] command db76.coll76 command: moveChunk { moveChunk: "db76.coll76", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559ebab7ca4787b9985d1f20') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 305ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.485-0400 m30999| 2015-07-09T14:17:28.484-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db76.coll76: 0ms sequenceNumber: 330 version: 2|1||559ebab7ca4787b9985d1f20 based on: 1|1||559ebab7ca4787b9985d1f20 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.486-0400 m31100| 2015-07-09T14:17:28.485-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db76.coll76", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebab7ca4787b9985d1f20') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.489-0400 m31100| 2015-07-09T14:17:28.489-0400 I SHARDING [conn38] distributed lock 'db76.coll76/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559ebab8792e00bb67274afd [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.490-0400 m31100| 2015-07-09T14:17:28.489-0400 I SHARDING [conn38] remotely refreshing metadata for db76.coll76 based on current shard version 2|0||559ebab7ca4787b9985d1f20, current metadata version is 2|0||559ebab7ca4787b9985d1f20 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.491-0400 m31100| 2015-07-09T14:17:28.490-0400 I SHARDING [conn38] updating metadata for db76.coll76 from shard version 2|0||559ebab7ca4787b9985d1f20 to shard version 2|1||559ebab7ca4787b9985d1f20 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.491-0400 m31100| 2015-07-09T14:17:28.490-0400 I SHARDING [conn38] collection version was loaded at version 2|1||559ebab7ca4787b9985d1f20, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.491-0400 m31100| 2015-07-09T14:17:28.490-0400 I SHARDING [conn38] splitChunk accepted at version 2|1||559ebab7ca4787b9985d1f20 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.493-0400 m31100| 2015-07-09T14:17:28.492-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:28.492-0400-559ebab8792e00bb67274afe", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465848492), what: "split", ns: "db76.coll76", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559ebab7ca4787b9985d1f20') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559ebab7ca4787b9985d1f20') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.548-0400 m31100| 2015-07-09T14:17:28.548-0400 I SHARDING [conn38] distributed lock 'db76.coll76/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.550-0400 m30999| 2015-07-09T14:17:28.550-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db76.coll76: 0ms sequenceNumber: 331 version: 2|3||559ebab7ca4787b9985d1f20 based on: 2|1||559ebab7ca4787b9985d1f20 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.551-0400 m31200| 2015-07-09T14:17:28.550-0400 I SHARDING [conn18] received splitChunk request: { splitChunk: "db76.coll76", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebab7ca4787b9985d1f20') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.554-0400 m31200| 2015-07-09T14:17:28.554-0400 I SHARDING [conn18] distributed lock 'db76.coll76/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559ebab8d5a107a5b9c0db7d [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.554-0400 m31200| 2015-07-09T14:17:28.554-0400 I SHARDING [conn18] remotely refreshing metadata for db76.coll76 based on current shard version 0|0||559ebab7ca4787b9985d1f20, current metadata version is 1|1||559ebab7ca4787b9985d1f20 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.556-0400 m31200| 2015-07-09T14:17:28.555-0400 I SHARDING [conn18] updating metadata for db76.coll76 from shard version 0|0||559ebab7ca4787b9985d1f20 to shard version 2|0||559ebab7ca4787b9985d1f20 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.556-0400 m31200| 2015-07-09T14:17:28.555-0400 I SHARDING [conn18] collection version was loaded at version 2|3||559ebab7ca4787b9985d1f20, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.556-0400 m31200| 2015-07-09T14:17:28.556-0400 I SHARDING [conn18] splitChunk accepted at version 2|0||559ebab7ca4787b9985d1f20 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.557-0400 m31200| 2015-07-09T14:17:28.557-0400 I SHARDING [conn18] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:28.557-0400-559ebab8d5a107a5b9c0db7e", server: "bs-osx108-8", clientAddr: "127.0.0.1:62585", time: new Date(1436465848557), what: "split", ns: "db76.coll76", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559ebab7ca4787b9985d1f20') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559ebab7ca4787b9985d1f20') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.611-0400 m31200| 2015-07-09T14:17:28.610-0400 I SHARDING [conn18] distributed lock 'db76.coll76/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.613-0400 m30999| 2015-07-09T14:17:28.612-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db76.coll76: 0ms sequenceNumber: 332 version: 2|5||559ebab7ca4787b9985d1f20 based on: 2|3||559ebab7ca4787b9985d1f20 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.620-0400 m31100| 2015-07-09T14:17:28.619-0400 I INDEX [conn175] build index on: db76.coll76 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db76.coll76" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.621-0400 m31100| 2015-07-09T14:17:28.619-0400 I INDEX [conn175] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.621-0400 m31200| 2015-07-09T14:17:28.621-0400 I INDEX [conn137] build index on: db76.coll76 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db76.coll76" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.621-0400 m31200| 2015-07-09T14:17:28.621-0400 I INDEX [conn137] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.627-0400 m31100| 2015-07-09T14:17:28.627-0400 I INDEX [conn175] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.632-0400 m31200| 2015-07-09T14:17:28.631-0400 I INDEX [conn137] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.638-0400 m31101| 2015-07-09T14:17:28.638-0400 I INDEX [repl writer worker 2] build index on: db76.coll76 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db76.coll76" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.639-0400 m31101| 2015-07-09T14:17:28.638-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.643-0400 m31202| 2015-07-09T14:17:28.642-0400 I INDEX [repl writer worker 2] build index on: db76.coll76 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db76.coll76" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.643-0400 m31202| 2015-07-09T14:17:28.642-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.643-0400 m31102| 2015-07-09T14:17:28.642-0400 I INDEX [repl writer worker 5] build index on: db76.coll76 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db76.coll76" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.643-0400 m31102| 2015-07-09T14:17:28.642-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.648-0400 m31201| 2015-07-09T14:17:28.647-0400 I INDEX [repl writer worker 13] build index on: db76.coll76 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db76.coll76" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.648-0400 m31201| 2015-07-09T14:17:28.647-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.648-0400 m31100| 2015-07-09T14:17:28.646-0400 I INDEX [conn175] build index on: db76.coll76 properties: { v: 1, key: { y: 1.0 }, name: "y_1", ns: "db76.coll76" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.648-0400 m31100| 2015-07-09T14:17:28.647-0400 I INDEX [conn175] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.652-0400 m31200| 2015-07-09T14:17:28.652-0400 I INDEX [conn137] build index on: db76.coll76 properties: { v: 1, key: { y: 1.0 }, name: "y_1", ns: "db76.coll76" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.652-0400 m31200| 2015-07-09T14:17:28.652-0400 I INDEX [conn137] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.653-0400 m31202| 2015-07-09T14:17:28.652-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.653-0400 m31101| 2015-07-09T14:17:28.652-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.654-0400 m31102| 2015-07-09T14:17:28.653-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.659-0400 m31100| 2015-07-09T14:17:28.659-0400 I INDEX [conn175] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.660-0400 m31201| 2015-07-09T14:17:28.659-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.662-0400 m31200| 2015-07-09T14:17:28.661-0400 I INDEX [conn137] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.668-0400 m31102| 2015-07-09T14:17:28.667-0400 I INDEX [repl writer worker 0] build index on: db76.coll76 properties: { v: 1, key: { y: 1.0 }, name: "y_1", ns: "db76.coll76" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.668-0400 m31102| 2015-07-09T14:17:28.668-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.681-0400 m31200| 2015-07-09T14:17:28.680-0400 I INDEX [conn137] build index on: db76.coll76 properties: { v: 1, key: { z: 1.0 }, name: "z_1", ns: "db76.coll76" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.681-0400 m31200| 2015-07-09T14:17:28.680-0400 I INDEX [conn137] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.682-0400 m31100| 2015-07-09T14:17:28.680-0400 I INDEX [conn175] build index on: db76.coll76 properties: { v: 1, key: { z: 1.0 }, name: "z_1", ns: "db76.coll76" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.682-0400 m31100| 2015-07-09T14:17:28.680-0400 I INDEX [conn175] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.686-0400 m31202| 2015-07-09T14:17:28.686-0400 I INDEX [repl writer worker 10] build index on: db76.coll76 properties: { v: 1, key: { y: 1.0 }, name: "y_1", ns: "db76.coll76" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.687-0400 m31202| 2015-07-09T14:17:28.686-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.688-0400 m31101| 2015-07-09T14:17:28.686-0400 I INDEX [repl writer worker 0] build index on: db76.coll76 properties: { v: 1, key: { y: 1.0 }, name: "y_1", ns: "db76.coll76" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.688-0400 m31101| 2015-07-09T14:17:28.686-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.688-0400 m31201| 2015-07-09T14:17:28.688-0400 I INDEX [repl writer worker 7] build index on: db76.coll76 properties: { v: 1, key: { y: 1.0 }, name: "y_1", ns: "db76.coll76" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.689-0400 m31201| 2015-07-09T14:17:28.688-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.692-0400 m31100| 2015-07-09T14:17:28.690-0400 I INDEX [conn175] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.692-0400 m31102| 2015-07-09T14:17:28.690-0400 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.697-0400 m31200| 2015-07-09T14:17:28.696-0400 I INDEX [conn137] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.697-0400 m31202| 2015-07-09T14:17:28.696-0400 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.703-0400 m31101| 2015-07-09T14:17:28.703-0400 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.709-0400 m31102| 2015-07-09T14:17:28.708-0400 I INDEX [repl writer worker 9] build index on: db76.coll76 properties: { v: 1, key: { z: 1.0 }, name: "z_1", ns: "db76.coll76" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.972-0400 m31102| 2015-07-09T14:17:28.708-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.972-0400 m31202| 2015-07-09T14:17:28.710-0400 I INDEX [repl writer worker 7] build index on: db76.coll76 properties: { v: 1, key: { z: 1.0 }, name: "z_1", ns: "db76.coll76" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.972-0400 m31202| 2015-07-09T14:17:28.711-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.972-0400 m31201| 2015-07-09T14:17:28.713-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.973-0400 m31100| 2015-07-09T14:17:28.713-0400 I INDEX [conn175] build index on: db76.coll76 properties: { v: 1, key: { x: 1.0, y: 1.0, z: 1.0 }, name: "x_1_y_1_z_1", ns: "db76.coll76" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.973-0400 m31100| 2015-07-09T14:17:28.713-0400 I INDEX [conn175] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.973-0400 m31102| 2015-07-09T14:17:28.715-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.973-0400 m31101| 2015-07-09T14:17:28.715-0400 I INDEX [repl writer worker 12] build index on: db76.coll76 properties: { v: 1, key: { z: 1.0 }, name: "z_1", ns: "db76.coll76" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.973-0400 m31200| 2015-07-09T14:17:28.715-0400 I INDEX [conn137] build index on: db76.coll76 properties: { v: 1, key: { x: 1.0, y: 1.0, z: 1.0 }, name: "x_1_y_1_z_1", ns: "db76.coll76" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.973-0400 m31200| 2015-07-09T14:17:28.716-0400 I INDEX [conn137] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.973-0400 m31101| 2015-07-09T14:17:28.716-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.973-0400 m31202| 2015-07-09T14:17:28.719-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.974-0400 m31100| 2015-07-09T14:17:28.729-0400 I INDEX [conn175] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.974-0400 m31200| 2015-07-09T14:17:28.729-0400 I INDEX [conn137] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.974-0400 m31101| 2015-07-09T14:17:28.731-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.974-0400 m31201| 2015-07-09T14:17:28.733-0400 I INDEX [repl writer worker 12] build index on: db76.coll76 properties: { v: 1, key: { z: 1.0 }, name: "z_1", ns: "db76.coll76" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.974-0400 m31201| 2015-07-09T14:17:28.733-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.974-0400 m31202| 2015-07-09T14:17:28.737-0400 I INDEX [repl writer worker 13] build index on: db76.coll76 properties: { v: 1, key: { x: 1.0, y: 1.0, z: 1.0 }, name: "x_1_y_1_z_1", ns: "db76.coll76" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.974-0400 m31202| 2015-07-09T14:17:28.738-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.974-0400 Using 10 threads (requested 10) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.975-0400 m31102| 2015-07-09T14:17:28.742-0400 I INDEX [repl writer worker 14] build index on: db76.coll76 properties: { v: 1, key: { x: 1.0, y: 1.0, z: 1.0 }, name: "x_1_y_1_z_1", ns: "db76.coll76" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.975-0400 m31102| 2015-07-09T14:17:28.742-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.975-0400 m31202| 2015-07-09T14:17:28.829-0400 I INDEX [repl writer worker 13] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.975-0400 m31201| 2015-07-09T14:17:28.836-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.975-0400 m31101| 2015-07-09T14:17:28.830-0400 I INDEX [repl writer worker 6] build index on: db76.coll76 properties: { v: 1, key: { x: 1.0, y: 1.0, z: 1.0 }, name: "x_1_y_1_z_1", ns: "db76.coll76" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.975-0400 m31101| 2015-07-09T14:17:28.830-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.975-0400 m30999| 2015-07-09T14:17:28.846-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64100 #482 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.976-0400 m31102| 2015-07-09T14:17:28.861-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.976-0400 m31101| 2015-07-09T14:17:28.862-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.976-0400 m30998| 2015-07-09T14:17:28.861-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64101 #482 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.976-0400 m30999| 2015-07-09T14:17:28.864-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64102 #483 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.976-0400 m31201| 2015-07-09T14:17:28.869-0400 I INDEX [repl writer worker 15] build index on: db76.coll76 properties: { v: 1, key: { x: 1.0, y: 1.0, z: 1.0 }, name: "x_1_y_1_z_1", ns: "db76.coll76" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.976-0400 m31201| 2015-07-09T14:17:28.869-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.977-0400 m31201| 2015-07-09T14:17:28.874-0400 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.977-0400 m30999| 2015-07-09T14:17:28.894-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64103 #484 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.977-0400 m30998| 2015-07-09T14:17:28.895-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64104 #483 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.977-0400 m30998| 2015-07-09T14:17:28.895-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64105 #484 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.977-0400 m30999| 2015-07-09T14:17:28.896-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64106 #485 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.977-0400 m30998| 2015-07-09T14:17:28.901-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64108 #485 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.977-0400 m30999| 2015-07-09T14:17:28.901-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64107 #486 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.978-0400 m30998| 2015-07-09T14:17:28.907-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64109 #486 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.978-0400 setting random seed: 503957704640 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.978-0400 setting random seed: 6872523673810 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.978-0400 setting random seed: 17145844176 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.978-0400 setting random seed: 2082874546758 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.978-0400 setting random seed: 126167903654 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.978-0400 setting random seed: 6505439509637 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.978-0400 setting random seed: 6195045285858 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.978-0400 setting random seed: 6921889390796 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.978-0400 m30998| 2015-07-09T14:17:28.923-0400 I SHARDING [conn482] ChunkManager: time to load chunks for db76.coll76: 0ms sequenceNumber: 94 version: 2|5||559ebab7ca4787b9985d1f20 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.978-0400 setting random seed: 8170203203335 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:28.979-0400 setting random seed: 6605302398093 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.050-0400 m30998| 2015-07-09T14:17:29.049-0400 I NETWORK [conn486] end connection 127.0.0.1:64109 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.074-0400 m31200| 2015-07-09T14:17:29.073-0400 I WRITE [conn59] update db76.coll76 update: { $unset: { x: 1.0 }, $push: { y: 3.0 }, $inc: { z: 4.0 } } nscanned:0 nscannedObjects:3 nMatched:3 nModified:3 keyUpdates:5 writeConflicts:24 numYields:24 locks:{ Global: { acquireCount: { r: 28, w: 28 } }, Database: { acquireCount: { w: 28 } }, Collection: { acquireCount: { w: 25 } }, Metadata: { acquireCount: { w: 3 } }, oplog: { acquireCount: { w: 3 } } } 104ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.074-0400 m31200| 2015-07-09T14:17:29.073-0400 I COMMAND [conn59] command db76.$cmd command: update { update: "coll76", updates: [ { q: {}, u: { $unset: { x: 1.0 }, $push: { y: 3.0 }, $inc: { z: 4.0 } }, multi: true, upsert: false } ], ordered: true, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 0|0, ObjectId('00000000ffffffffffffffff') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:140 locks:{ Global: { acquireCount: { r: 28, w: 28 } }, Database: { acquireCount: { w: 28 } }, Collection: { acquireCount: { w: 25 } }, Metadata: { acquireCount: { w: 3 } }, oplog: { acquireCount: { w: 3 } } } protocol:op_command 104ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.087-0400 m30998| 2015-07-09T14:17:29.087-0400 I NETWORK [conn482] end connection 127.0.0.1:64101 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.107-0400 m30999| 2015-07-09T14:17:29.103-0400 I NETWORK [conn482] end connection 127.0.0.1:64100 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.125-0400 m30999| 2015-07-09T14:17:29.124-0400 I NETWORK [conn484] end connection 127.0.0.1:64103 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.129-0400 m30998| 2015-07-09T14:17:29.129-0400 I NETWORK [conn483] end connection 127.0.0.1:64104 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.136-0400 m30999| 2015-07-09T14:17:29.133-0400 I NETWORK [conn485] end connection 127.0.0.1:64106 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.139-0400 m30999| 2015-07-09T14:17:29.139-0400 I NETWORK [conn483] end connection 127.0.0.1:64102 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.170-0400 m30998| 2015-07-09T14:17:29.170-0400 I NETWORK [conn485] end connection 127.0.0.1:64108 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.172-0400 m30998| 2015-07-09T14:17:29.172-0400 I NETWORK [conn484] end connection 127.0.0.1:64105 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.200-0400 m30999| 2015-07-09T14:17:29.200-0400 I NETWORK [conn486] end connection 127.0.0.1:64107 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.218-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.218-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.219-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.219-0400 jstests/concurrency/fsm_workloads/update_multifield_multiupdate.js: Workload completed in 479 ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.219-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.219-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.219-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.219-0400 m30999| 2015-07-09T14:17:29.219-0400 I COMMAND [conn1] DROP: db76.coll76 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.219-0400 m30999| 2015-07-09T14:17:29.219-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:29.219-0400-559ebab9ca4787b9985d1f22", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465849219), what: "dropCollection.start", ns: "db76.coll76", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.234-0400 m30999| 2015-07-09T14:17:29.234-0400 I SHARDING [conn1] distributed lock 'db76.coll76/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559ebab9ca4787b9985d1f23 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.235-0400 m31100| 2015-07-09T14:17:29.235-0400 I COMMAND [conn38] CMD: drop db76.coll76 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.241-0400 m31200| 2015-07-09T14:17:29.241-0400 I COMMAND [conn18] CMD: drop db76.coll76 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.242-0400 m31102| 2015-07-09T14:17:29.241-0400 I COMMAND [repl writer worker 4] CMD: drop db76.coll76 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.242-0400 m31101| 2015-07-09T14:17:29.242-0400 I COMMAND [repl writer worker 9] CMD: drop db76.coll76 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.245-0400 m31202| 2015-07-09T14:17:29.245-0400 I COMMAND [repl writer worker 11] CMD: drop db76.coll76 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.245-0400 m31201| 2015-07-09T14:17:29.245-0400 I COMMAND [repl writer worker 9] CMD: drop db76.coll76 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.297-0400 m31100| 2015-07-09T14:17:29.296-0400 I SHARDING [conn38] remotely refreshing metadata for db76.coll76 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559ebab7ca4787b9985d1f20, current metadata version is 2|3||559ebab7ca4787b9985d1f20 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.298-0400 m31100| 2015-07-09T14:17:29.298-0400 W SHARDING [conn38] no chunks found when reloading db76.coll76, previous version was 0|0||559ebab7ca4787b9985d1f20, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.299-0400 m31100| 2015-07-09T14:17:29.298-0400 I SHARDING [conn38] dropping metadata for db76.coll76 at shard version 2|3||559ebab7ca4787b9985d1f20, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.300-0400 m31200| 2015-07-09T14:17:29.299-0400 I SHARDING [conn18] remotely refreshing metadata for db76.coll76 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559ebab7ca4787b9985d1f20, current metadata version is 2|5||559ebab7ca4787b9985d1f20 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.301-0400 m31200| 2015-07-09T14:17:29.301-0400 W SHARDING [conn18] no chunks found when reloading db76.coll76, previous version was 0|0||559ebab7ca4787b9985d1f20, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.301-0400 m31200| 2015-07-09T14:17:29.301-0400 I SHARDING [conn18] dropping metadata for db76.coll76 at shard version 2|5||559ebab7ca4787b9985d1f20, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.302-0400 m30999| 2015-07-09T14:17:29.302-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:29.302-0400-559ebab9ca4787b9985d1f24", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465849302), what: "dropCollection", ns: "db76.coll76", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.357-0400 m30999| 2015-07-09T14:17:29.356-0400 I SHARDING [conn1] distributed lock 'db76.coll76/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.412-0400 m30999| 2015-07-09T14:17:29.412-0400 I COMMAND [conn1] DROP DATABASE: db76 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.413-0400 m30999| 2015-07-09T14:17:29.412-0400 I SHARDING [conn1] DBConfig::dropDatabase: db76 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.413-0400 m30999| 2015-07-09T14:17:29.412-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:29.412-0400-559ebab9ca4787b9985d1f25", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465849412), what: "dropDatabase.start", ns: "db76", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.520-0400 m30999| 2015-07-09T14:17:29.519-0400 I SHARDING [conn1] DBConfig::dropDatabase: db76 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.520-0400 m31100| 2015-07-09T14:17:29.520-0400 I COMMAND [conn160] dropDatabase db76 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.520-0400 m31100| 2015-07-09T14:17:29.520-0400 I COMMAND [conn160] dropDatabase db76 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.521-0400 m30999| 2015-07-09T14:17:29.520-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:29.520-0400-559ebab9ca4787b9985d1f26", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465849520), what: "dropDatabase", ns: "db76", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.521-0400 m31102| 2015-07-09T14:17:29.521-0400 I COMMAND [repl writer worker 7] dropDatabase db76 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.521-0400 m31101| 2015-07-09T14:17:29.521-0400 I COMMAND [repl writer worker 1] dropDatabase db76 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.521-0400 m31102| 2015-07-09T14:17:29.521-0400 I COMMAND [repl writer worker 7] dropDatabase db76 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.522-0400 m31101| 2015-07-09T14:17:29.521-0400 I COMMAND [repl writer worker 1] dropDatabase db76 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.612-0400 m31100| 2015-07-09T14:17:29.612-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.616-0400 m31102| 2015-07-09T14:17:29.616-0400 I COMMAND [repl writer worker 13] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.616-0400 m31101| 2015-07-09T14:17:29.616-0400 I COMMAND [repl writer worker 15] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.648-0400 m31200| 2015-07-09T14:17:29.648-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.651-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.651-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.651-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.651-0400 jstests/concurrency/fsm_workloads/yield_text.js [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.651-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.651-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.652-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.652-0400 m31201| 2015-07-09T14:17:29.651-0400 I COMMAND [repl writer worker 12] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.652-0400 m31202| 2015-07-09T14:17:29.651-0400 I COMMAND [repl writer worker 14] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.659-0400 m30999| 2015-07-09T14:17:29.659-0400 I SHARDING [conn1] distributed lock 'db77/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559ebab9ca4787b9985d1f27 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.663-0400 m30999| 2015-07-09T14:17:29.663-0400 I SHARDING [conn1] Placing [db77] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.663-0400 m30999| 2015-07-09T14:17:29.663-0400 I SHARDING [conn1] Enabling sharding for database [db77] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.717-0400 m30999| 2015-07-09T14:17:29.717-0400 I SHARDING [conn1] distributed lock 'db77/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.741-0400 m31100| 2015-07-09T14:17:29.740-0400 I INDEX [conn145] build index on: db77.coll77 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db77.coll77" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.741-0400 m31100| 2015-07-09T14:17:29.740-0400 I INDEX [conn145] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.747-0400 m31100| 2015-07-09T14:17:29.747-0400 I INDEX [conn145] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.748-0400 m30999| 2015-07-09T14:17:29.748-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db77.coll77", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.753-0400 m30999| 2015-07-09T14:17:29.752-0400 I SHARDING [conn1] distributed lock 'db77.coll77/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559ebab9ca4787b9985d1f28 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.754-0400 m30999| 2015-07-09T14:17:29.754-0400 I SHARDING [conn1] enable sharding on: db77.coll77 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.755-0400 m30999| 2015-07-09T14:17:29.754-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:29.754-0400-559ebab9ca4787b9985d1f29", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465849754), what: "shardCollection.start", ns: "db77.coll77", details: { shardKey: { _id: "hashed" }, collection: "db77.coll77", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.755-0400 m31101| 2015-07-09T14:17:29.755-0400 I INDEX [repl writer worker 10] build index on: db77.coll77 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db77.coll77" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.756-0400 m31101| 2015-07-09T14:17:29.755-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.760-0400 m31101| 2015-07-09T14:17:29.759-0400 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.764-0400 m31102| 2015-07-09T14:17:29.764-0400 I INDEX [repl writer worker 3] build index on: db77.coll77 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db77.coll77" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.765-0400 m31102| 2015-07-09T14:17:29.764-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.769-0400 m31102| 2015-07-09T14:17:29.769-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.809-0400 m30999| 2015-07-09T14:17:29.809-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db77.coll77 using new epoch 559ebab9ca4787b9985d1f2a [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.870-0400 m30999| 2015-07-09T14:17:29.870-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db77.coll77: 0ms sequenceNumber: 333 version: 1|1||559ebab9ca4787b9985d1f2a based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.926-0400 m30999| 2015-07-09T14:17:29.925-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db77.coll77: 0ms sequenceNumber: 334 version: 1|1||559ebab9ca4787b9985d1f2a based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.927-0400 m31100| 2015-07-09T14:17:29.927-0400 I SHARDING [conn175] remotely refreshing metadata for db77.coll77 with requested shard version 1|1||559ebab9ca4787b9985d1f2a, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.929-0400 m31100| 2015-07-09T14:17:29.929-0400 I SHARDING [conn175] collection db77.coll77 was previously unsharded, new metadata loaded with shard version 1|1||559ebab9ca4787b9985d1f2a [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.929-0400 m31100| 2015-07-09T14:17:29.929-0400 I SHARDING [conn175] collection version was loaded at version 1|1||559ebab9ca4787b9985d1f2a, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.930-0400 m30999| 2015-07-09T14:17:29.929-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:29.929-0400-559ebab9ca4787b9985d1f2b", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465849929), what: "shardCollection", ns: "db77.coll77", details: { version: "1|1||559ebab9ca4787b9985d1f2a" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.984-0400 m30999| 2015-07-09T14:17:29.984-0400 I SHARDING [conn1] distributed lock 'db77.coll77/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.985-0400 m30999| 2015-07-09T14:17:29.985-0400 I SHARDING [conn1] moving chunk ns: db77.coll77 moving ( ns: db77.coll77, shard: test-rs0, lastmod: 1|1||000000000000000000000000, min: { _id: 0 }, max: { _id: MaxKey }) test-rs0 -> test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.985-0400 m31100| 2015-07-09T14:17:29.985-0400 I SHARDING [conn38] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.987-0400 m31100| 2015-07-09T14:17:29.986-0400 I SHARDING [conn38] received moveChunk request: { moveChunk: "db77.coll77", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559ebab9ca4787b9985d1f2a') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.990-0400 m31100| 2015-07-09T14:17:29.990-0400 I SHARDING [conn38] distributed lock 'db77.coll77/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559ebab9792e00bb67274b00 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:29.990-0400 m31100| 2015-07-09T14:17:29.990-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:29.990-0400-559ebab9792e00bb67274b01", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465849990), what: "moveChunk.start", ns: "db77.coll77", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.044-0400 m31100| 2015-07-09T14:17:30.043-0400 I SHARDING [conn38] remotely refreshing metadata for db77.coll77 based on current shard version 1|1||559ebab9ca4787b9985d1f2a, current metadata version is 1|1||559ebab9ca4787b9985d1f2a [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.045-0400 m31100| 2015-07-09T14:17:30.045-0400 I SHARDING [conn38] metadata of collection db77.coll77 already up to date (shard version : 1|1||559ebab9ca4787b9985d1f2a, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.045-0400 m31100| 2015-07-09T14:17:30.045-0400 I SHARDING [conn38] moveChunk request accepted at version 1|1||559ebab9ca4787b9985d1f2a [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.046-0400 m31100| 2015-07-09T14:17:30.045-0400 I SHARDING [conn38] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.046-0400 m31200| 2015-07-09T14:17:30.046-0400 I SHARDING [conn16] remotely refreshing metadata for db77.coll77, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.048-0400 m31200| 2015-07-09T14:17:30.047-0400 I SHARDING [conn16] collection db77.coll77 was previously unsharded, new metadata loaded with shard version 0|0||559ebab9ca4787b9985d1f2a [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.048-0400 m31200| 2015-07-09T14:17:30.047-0400 I SHARDING [conn16] collection version was loaded at version 1|1||559ebab9ca4787b9985d1f2a, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.048-0400 m31200| 2015-07-09T14:17:30.048-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: 0 } -> { _id: MaxKey } for collection db77.coll77 from test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 at epoch 559ebab9ca4787b9985d1f2a [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.050-0400 m31100| 2015-07-09T14:17:30.050-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db77.coll77", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.054-0400 m31100| 2015-07-09T14:17:30.053-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db77.coll77", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.059-0400 m31100| 2015-07-09T14:17:30.059-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db77.coll77", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.063-0400 m31200| 2015-07-09T14:17:30.062-0400 I INDEX [migrateThread] build index on: db77.coll77 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db77.coll77" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.063-0400 m31200| 2015-07-09T14:17:30.062-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.069-0400 m31100| 2015-07-09T14:17:30.068-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db77.coll77", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.074-0400 m31200| 2015-07-09T14:17:30.073-0400 I INDEX [migrateThread] build index on: db77.coll77 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db77.coll77" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.074-0400 m31200| 2015-07-09T14:17:30.074-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.086-0400 m31100| 2015-07-09T14:17:30.086-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db77.coll77", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.092-0400 m31200| 2015-07-09T14:17:30.091-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.092-0400 m31200| 2015-07-09T14:17:30.092-0400 I SHARDING [migrateThread] Deleter starting delete for: db77.coll77 from { _id: 0 } -> { _id: MaxKey }, with opId: 102259 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.092-0400 m31200| 2015-07-09T14:17:30.092-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db77.coll77 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.097-0400 m31201| 2015-07-09T14:17:30.096-0400 I INDEX [repl writer worker 8] build index on: db77.coll77 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db77.coll77" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.098-0400 m31201| 2015-07-09T14:17:30.096-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.099-0400 m31202| 2015-07-09T14:17:30.099-0400 I INDEX [repl writer worker 2] build index on: db77.coll77 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db77.coll77" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.099-0400 m31202| 2015-07-09T14:17:30.099-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.105-0400 m31201| 2015-07-09T14:17:30.105-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.107-0400 m31200| 2015-07-09T14:17:30.106-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.107-0400 m31200| 2015-07-09T14:17:30.107-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db77.coll77' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.108-0400 m31202| 2015-07-09T14:17:30.107-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.120-0400 m31100| 2015-07-09T14:17:30.119-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db77.coll77", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.120-0400 m31100| 2015-07-09T14:17:30.120-0400 I SHARDING [conn38] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.121-0400 m31100| 2015-07-09T14:17:30.120-0400 I SHARDING [conn38] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.121-0400 m31100| 2015-07-09T14:17:30.121-0400 I SHARDING [conn38] moveChunk setting version to: 2|0||559ebab9ca4787b9985d1f2a [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.130-0400 m31200| 2015-07-09T14:17:30.130-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db77.coll77' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.131-0400 m31200| 2015-07-09T14:17:30.130-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:30.130-0400-559ebabad5a107a5b9c0db7f", server: "bs-osx108-8", clientAddr: "", time: new Date(1436465850130), what: "moveChunk.to", ns: "db77.coll77", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 5: 43, step 2 of 5: 13, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 23, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.184-0400 m31100| 2015-07-09T14:17:30.183-0400 I SHARDING [conn38] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db77.coll77", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.184-0400 m31100| 2015-07-09T14:17:30.183-0400 I SHARDING [conn38] moveChunk updating self version to: 2|1||559ebab9ca4787b9985d1f2a through { _id: MinKey } -> { _id: 0 } for collection 'db77.coll77' [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.186-0400 m31100| 2015-07-09T14:17:30.185-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:30.185-0400-559ebaba792e00bb67274b02", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465850185), what: "moveChunk.commit", ns: "db77.coll77", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.239-0400 m31100| 2015-07-09T14:17:30.239-0400 I SHARDING [conn38] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.240-0400 m31100| 2015-07-09T14:17:30.239-0400 I SHARDING [conn38] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.240-0400 m31100| 2015-07-09T14:17:30.239-0400 I SHARDING [conn38] Deleter starting delete for: db77.coll77 from { _id: 0 } -> { _id: MaxKey }, with opId: 240622 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.240-0400 m31100| 2015-07-09T14:17:30.239-0400 I SHARDING [conn38] rangeDeleter deleted 0 documents for db77.coll77 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.241-0400 m31100| 2015-07-09T14:17:30.239-0400 I SHARDING [conn38] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.241-0400 m31100| 2015-07-09T14:17:30.240-0400 I SHARDING [conn38] distributed lock 'db77.coll77/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.241-0400 m31100| 2015-07-09T14:17:30.240-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:30.240-0400-559ebaba792e00bb67274b03", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465850240), what: "moveChunk.from", ns: "db77.coll77", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 6: 0, step 2 of 6: 58, step 3 of 6: 2, step 4 of 6: 71, step 5 of 6: 119, step 6 of 6: 0, to: "test-rs1", from: "test-rs0", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.295-0400 m31100| 2015-07-09T14:17:30.294-0400 I COMMAND [conn38] command db77.coll77 command: moveChunk { moveChunk: "db77.coll77", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559ebab9ca4787b9985d1f2a') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 309ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.297-0400 m30999| 2015-07-09T14:17:30.297-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db77.coll77: 0ms sequenceNumber: 335 version: 2|1||559ebab9ca4787b9985d1f2a based on: 1|1||559ebab9ca4787b9985d1f2a [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.299-0400 m31100| 2015-07-09T14:17:30.298-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db77.coll77", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebab9ca4787b9985d1f2a') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.303-0400 m31100| 2015-07-09T14:17:30.302-0400 I SHARDING [conn38] distributed lock 'db77.coll77/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559ebaba792e00bb67274b04 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.303-0400 m31100| 2015-07-09T14:17:30.303-0400 I SHARDING [conn38] remotely refreshing metadata for db77.coll77 based on current shard version 2|0||559ebab9ca4787b9985d1f2a, current metadata version is 2|0||559ebab9ca4787b9985d1f2a [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.305-0400 m31100| 2015-07-09T14:17:30.304-0400 I SHARDING [conn38] updating metadata for db77.coll77 from shard version 2|0||559ebab9ca4787b9985d1f2a to shard version 2|1||559ebab9ca4787b9985d1f2a [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.305-0400 m31100| 2015-07-09T14:17:30.304-0400 I SHARDING [conn38] collection version was loaded at version 2|1||559ebab9ca4787b9985d1f2a, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.305-0400 m31100| 2015-07-09T14:17:30.304-0400 I SHARDING [conn38] splitChunk accepted at version 2|1||559ebab9ca4787b9985d1f2a [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.308-0400 m31100| 2015-07-09T14:17:30.307-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:30.307-0400-559ebaba792e00bb67274b05", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465850307), what: "split", ns: "db77.coll77", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559ebab9ca4787b9985d1f2a') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559ebab9ca4787b9985d1f2a') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.361-0400 m31100| 2015-07-09T14:17:30.361-0400 I SHARDING [conn38] distributed lock 'db77.coll77/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.364-0400 m30999| 2015-07-09T14:17:30.363-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db77.coll77: 0ms sequenceNumber: 336 version: 2|3||559ebab9ca4787b9985d1f2a based on: 2|1||559ebab9ca4787b9985d1f2a [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.364-0400 m31200| 2015-07-09T14:17:30.364-0400 I SHARDING [conn18] received splitChunk request: { splitChunk: "db77.coll77", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebab9ca4787b9985d1f2a') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.368-0400 m31200| 2015-07-09T14:17:30.368-0400 I SHARDING [conn18] distributed lock 'db77.coll77/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559ebabad5a107a5b9c0db80 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.368-0400 m31200| 2015-07-09T14:17:30.368-0400 I SHARDING [conn18] remotely refreshing metadata for db77.coll77 based on current shard version 0|0||559ebab9ca4787b9985d1f2a, current metadata version is 1|1||559ebab9ca4787b9985d1f2a [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.370-0400 m31200| 2015-07-09T14:17:30.369-0400 I SHARDING [conn18] updating metadata for db77.coll77 from shard version 0|0||559ebab9ca4787b9985d1f2a to shard version 2|0||559ebab9ca4787b9985d1f2a [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.370-0400 m31200| 2015-07-09T14:17:30.369-0400 I SHARDING [conn18] collection version was loaded at version 2|3||559ebab9ca4787b9985d1f2a, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.370-0400 m31200| 2015-07-09T14:17:30.369-0400 I SHARDING [conn18] splitChunk accepted at version 2|0||559ebab9ca4787b9985d1f2a [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.371-0400 m31200| 2015-07-09T14:17:30.370-0400 I SHARDING [conn18] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:30.370-0400-559ebabad5a107a5b9c0db81", server: "bs-osx108-8", clientAddr: "127.0.0.1:62585", time: new Date(1436465850370), what: "split", ns: "db77.coll77", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559ebab9ca4787b9985d1f2a') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559ebab9ca4787b9985d1f2a') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.425-0400 m31200| 2015-07-09T14:17:30.425-0400 I SHARDING [conn18] distributed lock 'db77.coll77/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.428-0400 m30999| 2015-07-09T14:17:30.427-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db77.coll77: 0ms sequenceNumber: 337 version: 2|5||559ebab9ca4787b9985d1f2a based on: 2|3||559ebab9ca4787b9985d1f2a [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.498-0400 m31100| 2015-07-09T14:17:30.497-0400 I INDEX [conn175] build index on: db77.coll77 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "yield_text_text", ns: "db77.coll77", weights: { yield_text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.498-0400 m31200| 2015-07-09T14:17:30.497-0400 I INDEX [conn137] build index on: db77.coll77 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "yield_text_text", ns: "db77.coll77", weights: { yield_text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.499-0400 m31100| 2015-07-09T14:17:30.497-0400 I INDEX [conn175] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.499-0400 m31200| 2015-07-09T14:17:30.497-0400 I INDEX [conn137] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.506-0400 m31200| 2015-07-09T14:17:30.506-0400 I INDEX [conn137] build index done. scanned 109 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.508-0400 m31100| 2015-07-09T14:17:30.507-0400 I INDEX [conn175] build index done. scanned 91 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.509-0400 Using 5 threads (requested 5) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.525-0400 m31201| 2015-07-09T14:17:30.524-0400 I INDEX [repl writer worker 2] build index on: db77.coll77 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "yield_text_text", ns: "db77.coll77", weights: { yield_text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.525-0400 m31201| 2015-07-09T14:17:30.524-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.539-0400 m31101| 2015-07-09T14:17:30.538-0400 I INDEX [repl writer worker 13] build index on: db77.coll77 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "yield_text_text", ns: "db77.coll77", weights: { yield_text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.539-0400 m31101| 2015-07-09T14:17:30.538-0400 I INDEX [repl writer worker 13] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.564-0400 m31201| 2015-07-09T14:17:30.561-0400 I INDEX [repl writer worker 2] build index done. scanned 109 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.565-0400 m31202| 2015-07-09T14:17:30.560-0400 I INDEX [repl writer worker 9] build index on: db77.coll77 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "yield_text_text", ns: "db77.coll77", weights: { yield_text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.565-0400 m31202| 2015-07-09T14:17:30.561-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.569-0400 m30999| 2015-07-09T14:17:30.567-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64110 #487 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.569-0400 m30999| 2015-07-09T14:17:30.568-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64111 #488 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.570-0400 m30998| 2015-07-09T14:17:30.569-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64112 #487 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.570-0400 m31102| 2015-07-09T14:17:30.569-0400 I INDEX [repl writer worker 7] build index on: db77.coll77 properties: { v: 1, key: { _fts: "text", _ftsx: 1 }, name: "yield_text_text", ns: "db77.coll77", weights: { yield_text: 1 }, default_language: "english", language_override: "language", textIndexVersion: 2 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.570-0400 m31102| 2015-07-09T14:17:30.569-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.576-0400 m31101| 2015-07-09T14:17:30.576-0400 I INDEX [repl writer worker 13] build index done. scanned 91 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.579-0400 m30999| 2015-07-09T14:17:30.577-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64113 #489 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.579-0400 m30998| 2015-07-09T14:17:30.579-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64114 #488 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.580-0400 m31202| 2015-07-09T14:17:30.580-0400 I INDEX [repl writer worker 9] build index done. scanned 109 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.581-0400 m31102| 2015-07-09T14:17:30.579-0400 I INDEX [repl writer worker 7] build index done. scanned 91 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.589-0400 setting random seed: 5317456442862 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.589-0400 setting random seed: 2527756188064 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.589-0400 setting random seed: 490476810373 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.590-0400 setting random seed: 3443755782209 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.590-0400 setting random seed: 9203458540141 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:30.593-0400 m30998| 2015-07-09T14:17:30.592-0400 I SHARDING [conn488] ChunkManager: time to load chunks for db77.coll77: 0ms sequenceNumber: 95 version: 2|5||559ebab9ca4787b9985d1f2a based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:31.740-0400 m30998| 2015-07-09T14:17:31.740-0400 I NETWORK [conn488] end connection 127.0.0.1:64114 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:32.106-0400 m30998| 2015-07-09T14:17:32.106-0400 I NETWORK [conn487] end connection 127.0.0.1:64112 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:32.216-0400 m30999| 2015-07-09T14:17:32.216-0400 I NETWORK [conn487] end connection 127.0.0.1:64110 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:32.509-0400 m30999| 2015-07-09T14:17:32.509-0400 I NETWORK [conn488] end connection 127.0.0.1:64111 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.364-0400 m30999| 2015-07-09T14:17:33.364-0400 I NETWORK [conn489] end connection 127.0.0.1:64113 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.376-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.376-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.377-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.377-0400 jstests/concurrency/fsm_workloads/yield_text.js: Workload completed in 2856 ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.377-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.377-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.377-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.377-0400 m30999| 2015-07-09T14:17:33.377-0400 I COMMAND [conn1] DROP: db77.coll77 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.377-0400 m30999| 2015-07-09T14:17:33.377-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:33.377-0400-559ebabdca4787b9985d1f2c", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465853377), what: "dropCollection.start", ns: "db77.coll77", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.434-0400 m30999| 2015-07-09T14:17:33.434-0400 I SHARDING [conn1] distributed lock 'db77.coll77/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559ebabdca4787b9985d1f2d [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.435-0400 m31100| 2015-07-09T14:17:33.435-0400 I COMMAND [conn38] CMD: drop db77.coll77 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.437-0400 m31200| 2015-07-09T14:17:33.437-0400 I COMMAND [conn18] CMD: drop db77.coll77 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.439-0400 m31102| 2015-07-09T14:17:33.439-0400 I COMMAND [repl writer worker 13] CMD: drop db77.coll77 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.440-0400 m31101| 2015-07-09T14:17:33.439-0400 I COMMAND [repl writer worker 7] CMD: drop db77.coll77 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.441-0400 m31201| 2015-07-09T14:17:33.441-0400 I COMMAND [repl writer worker 13] CMD: drop db77.coll77 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.442-0400 m31202| 2015-07-09T14:17:33.441-0400 I COMMAND [repl writer worker 13] CMD: drop db77.coll77 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.494-0400 m31100| 2015-07-09T14:17:33.493-0400 I SHARDING [conn38] remotely refreshing metadata for db77.coll77 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559ebab9ca4787b9985d1f2a, current metadata version is 2|3||559ebab9ca4787b9985d1f2a [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.495-0400 m31100| 2015-07-09T14:17:33.495-0400 W SHARDING [conn38] no chunks found when reloading db77.coll77, previous version was 0|0||559ebab9ca4787b9985d1f2a, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.495-0400 m31100| 2015-07-09T14:17:33.495-0400 I SHARDING [conn38] dropping metadata for db77.coll77 at shard version 2|3||559ebab9ca4787b9985d1f2a, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.496-0400 m31200| 2015-07-09T14:17:33.496-0400 I SHARDING [conn18] remotely refreshing metadata for db77.coll77 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559ebab9ca4787b9985d1f2a, current metadata version is 2|5||559ebab9ca4787b9985d1f2a [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.498-0400 m31200| 2015-07-09T14:17:33.497-0400 W SHARDING [conn18] no chunks found when reloading db77.coll77, previous version was 0|0||559ebab9ca4787b9985d1f2a, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.498-0400 m31200| 2015-07-09T14:17:33.497-0400 I SHARDING [conn18] dropping metadata for db77.coll77 at shard version 2|5||559ebab9ca4787b9985d1f2a, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.499-0400 m30999| 2015-07-09T14:17:33.498-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:33.498-0400-559ebabdca4787b9985d1f2e", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465853498), what: "dropCollection", ns: "db77.coll77", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.553-0400 m30999| 2015-07-09T14:17:33.552-0400 I SHARDING [conn1] distributed lock 'db77.coll77/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.608-0400 m30999| 2015-07-09T14:17:33.608-0400 I COMMAND [conn1] DROP DATABASE: db77 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.609-0400 m30999| 2015-07-09T14:17:33.608-0400 I SHARDING [conn1] DBConfig::dropDatabase: db77 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.609-0400 m30999| 2015-07-09T14:17:33.608-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:33.608-0400-559ebabdca4787b9985d1f2f", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465853608), what: "dropDatabase.start", ns: "db77", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.715-0400 m30999| 2015-07-09T14:17:33.715-0400 I SHARDING [conn1] DBConfig::dropDatabase: db77 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.716-0400 m31100| 2015-07-09T14:17:33.716-0400 I COMMAND [conn160] dropDatabase db77 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.716-0400 m31100| 2015-07-09T14:17:33.716-0400 I COMMAND [conn160] dropDatabase db77 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.717-0400 m30999| 2015-07-09T14:17:33.716-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:33.716-0400-559ebabdca4787b9985d1f30", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465853716), what: "dropDatabase", ns: "db77", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.717-0400 m31101| 2015-07-09T14:17:33.717-0400 I COMMAND [repl writer worker 14] dropDatabase db77 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.717-0400 m31101| 2015-07-09T14:17:33.717-0400 I COMMAND [repl writer worker 14] dropDatabase db77 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.717-0400 m31102| 2015-07-09T14:17:33.717-0400 I COMMAND [repl writer worker 14] dropDatabase db77 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.718-0400 m31102| 2015-07-09T14:17:33.717-0400 I COMMAND [repl writer worker 14] dropDatabase db77 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.806-0400 m31100| 2015-07-09T14:17:33.806-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.810-0400 m31102| 2015-07-09T14:17:33.809-0400 I COMMAND [repl writer worker 10] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.810-0400 m31101| 2015-07-09T14:17:33.809-0400 I COMMAND [repl writer worker 5] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.851-0400 m31200| 2015-07-09T14:17:33.850-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.854-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.854-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.854-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.854-0400 jstests/concurrency/fsm_workloads/update_multifield_noindex.js [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.854-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.854-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.854-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.854-0400 m31201| 2015-07-09T14:17:33.854-0400 I COMMAND [repl writer worker 9] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.854-0400 m31202| 2015-07-09T14:17:33.854-0400 I COMMAND [repl writer worker 4] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.862-0400 m30999| 2015-07-09T14:17:33.862-0400 I SHARDING [conn1] distributed lock 'db78/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559ebabdca4787b9985d1f31 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.866-0400 m30999| 2015-07-09T14:17:33.865-0400 I SHARDING [conn1] Placing [db78] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.866-0400 m30999| 2015-07-09T14:17:33.866-0400 I SHARDING [conn1] Enabling sharding for database [db78] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.919-0400 m30999| 2015-07-09T14:17:33.918-0400 I SHARDING [conn1] distributed lock 'db78/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.941-0400 m31100| 2015-07-09T14:17:33.939-0400 I INDEX [conn145] build index on: db78.coll78 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db78.coll78" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.941-0400 m31100| 2015-07-09T14:17:33.939-0400 I INDEX [conn145] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.943-0400 m31100| 2015-07-09T14:17:33.943-0400 I INDEX [conn145] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.944-0400 m30999| 2015-07-09T14:17:33.944-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db78.coll78", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.948-0400 m30999| 2015-07-09T14:17:33.948-0400 I SHARDING [conn1] distributed lock 'db78.coll78/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559ebabdca4787b9985d1f32 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.949-0400 m30999| 2015-07-09T14:17:33.949-0400 I SHARDING [conn1] enable sharding on: db78.coll78 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.950-0400 m30999| 2015-07-09T14:17:33.949-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:33.949-0400-559ebabdca4787b9985d1f33", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465853949), what: "shardCollection.start", ns: "db78.coll78", details: { shardKey: { _id: "hashed" }, collection: "db78.coll78", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.962-0400 m31102| 2015-07-09T14:17:33.961-0400 I INDEX [repl writer worker 0] build index on: db78.coll78 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db78.coll78" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.962-0400 m31102| 2015-07-09T14:17:33.961-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.969-0400 m31102| 2015-07-09T14:17:33.968-0400 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.971-0400 m31101| 2015-07-09T14:17:33.971-0400 I INDEX [repl writer worker 2] build index on: db78.coll78 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db78.coll78" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.971-0400 m31101| 2015-07-09T14:17:33.971-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:33.976-0400 m31101| 2015-07-09T14:17:33.976-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.002-0400 m30999| 2015-07-09T14:17:34.001-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db78.coll78 using new epoch 559ebabeca4787b9985d1f34 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.110-0400 m30999| 2015-07-09T14:17:34.110-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db78.coll78: 0ms sequenceNumber: 338 version: 1|1||559ebabeca4787b9985d1f34 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.166-0400 m30999| 2015-07-09T14:17:34.166-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db78.coll78: 0ms sequenceNumber: 339 version: 1|1||559ebabeca4787b9985d1f34 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.168-0400 m31100| 2015-07-09T14:17:34.168-0400 I SHARDING [conn182] remotely refreshing metadata for db78.coll78 with requested shard version 1|1||559ebabeca4787b9985d1f34, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.170-0400 m31100| 2015-07-09T14:17:34.169-0400 I SHARDING [conn182] collection db78.coll78 was previously unsharded, new metadata loaded with shard version 1|1||559ebabeca4787b9985d1f34 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.170-0400 m31100| 2015-07-09T14:17:34.169-0400 I SHARDING [conn182] collection version was loaded at version 1|1||559ebabeca4787b9985d1f34, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.170-0400 m30999| 2015-07-09T14:17:34.170-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:34.170-0400-559ebabeca4787b9985d1f35", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465854170), what: "shardCollection", ns: "db78.coll78", details: { version: "1|1||559ebabeca4787b9985d1f34" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.223-0400 m30999| 2015-07-09T14:17:34.223-0400 I SHARDING [conn1] distributed lock 'db78.coll78/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.225-0400 m30999| 2015-07-09T14:17:34.224-0400 I SHARDING [conn1] moving chunk ns: db78.coll78 moving ( ns: db78.coll78, shard: test-rs0, lastmod: 1|1||000000000000000000000000, min: { _id: 0 }, max: { _id: MaxKey }) test-rs0 -> test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.225-0400 m31100| 2015-07-09T14:17:34.225-0400 I SHARDING [conn38] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.226-0400 m31100| 2015-07-09T14:17:34.225-0400 I SHARDING [conn38] received moveChunk request: { moveChunk: "db78.coll78", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559ebabeca4787b9985d1f34') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.228-0400 m31100| 2015-07-09T14:17:34.228-0400 I SHARDING [conn38] distributed lock 'db78.coll78/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559ebabe792e00bb67274b07 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.228-0400 m31100| 2015-07-09T14:17:34.228-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:34.228-0400-559ebabe792e00bb67274b08", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465854228), what: "moveChunk.start", ns: "db78.coll78", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.282-0400 m31100| 2015-07-09T14:17:34.281-0400 I SHARDING [conn38] remotely refreshing metadata for db78.coll78 based on current shard version 1|1||559ebabeca4787b9985d1f34, current metadata version is 1|1||559ebabeca4787b9985d1f34 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.283-0400 m31100| 2015-07-09T14:17:34.283-0400 I SHARDING [conn38] metadata of collection db78.coll78 already up to date (shard version : 1|1||559ebabeca4787b9985d1f34, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.283-0400 m31100| 2015-07-09T14:17:34.283-0400 I SHARDING [conn38] moveChunk request accepted at version 1|1||559ebabeca4787b9985d1f34 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.284-0400 m31100| 2015-07-09T14:17:34.284-0400 I SHARDING [conn38] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.284-0400 m31200| 2015-07-09T14:17:34.284-0400 I SHARDING [conn16] remotely refreshing metadata for db78.coll78, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.286-0400 m31200| 2015-07-09T14:17:34.285-0400 I SHARDING [conn16] collection db78.coll78 was previously unsharded, new metadata loaded with shard version 0|0||559ebabeca4787b9985d1f34 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.286-0400 m31200| 2015-07-09T14:17:34.286-0400 I SHARDING [conn16] collection version was loaded at version 1|1||559ebabeca4787b9985d1f34, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.286-0400 m31200| 2015-07-09T14:17:34.286-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: 0 } -> { _id: MaxKey } for collection db78.coll78 from test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 at epoch 559ebabeca4787b9985d1f34 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.289-0400 m31100| 2015-07-09T14:17:34.288-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db78.coll78", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.292-0400 m31100| 2015-07-09T14:17:34.292-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db78.coll78", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.298-0400 m31100| 2015-07-09T14:17:34.297-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db78.coll78", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.308-0400 m31100| 2015-07-09T14:17:34.307-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db78.coll78", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.308-0400 m31200| 2015-07-09T14:17:34.308-0400 I INDEX [migrateThread] build index on: db78.coll78 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db78.coll78" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.309-0400 m31200| 2015-07-09T14:17:34.308-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.317-0400 m31200| 2015-07-09T14:17:34.315-0400 I INDEX [migrateThread] build index on: db78.coll78 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db78.coll78" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.317-0400 m31200| 2015-07-09T14:17:34.316-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.327-0400 m31100| 2015-07-09T14:17:34.325-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db78.coll78", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.327-0400 m31200| 2015-07-09T14:17:34.327-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.327-0400 m31200| 2015-07-09T14:17:34.327-0400 I SHARDING [migrateThread] Deleter starting delete for: db78.coll78 from { _id: 0 } -> { _id: MaxKey }, with opId: 105220 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.328-0400 m31200| 2015-07-09T14:17:34.327-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db78.coll78 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.333-0400 m31202| 2015-07-09T14:17:34.332-0400 I INDEX [repl writer worker 12] build index on: db78.coll78 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db78.coll78" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.333-0400 m31202| 2015-07-09T14:17:34.332-0400 I INDEX [repl writer worker 12] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.336-0400 m31201| 2015-07-09T14:17:34.335-0400 I INDEX [repl writer worker 14] build index on: db78.coll78 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db78.coll78" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.336-0400 m31201| 2015-07-09T14:17:34.335-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.342-0400 m31202| 2015-07-09T14:17:34.342-0400 I INDEX [repl writer worker 12] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.344-0400 m31200| 2015-07-09T14:17:34.343-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.344-0400 m31200| 2015-07-09T14:17:34.343-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db78.coll78' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.345-0400 m31201| 2015-07-09T14:17:34.344-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.360-0400 m31100| 2015-07-09T14:17:34.359-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db78.coll78", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.360-0400 m31100| 2015-07-09T14:17:34.359-0400 I SHARDING [conn38] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.360-0400 m31100| 2015-07-09T14:17:34.360-0400 I SHARDING [conn38] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.360-0400 m31100| 2015-07-09T14:17:34.360-0400 I SHARDING [conn38] moveChunk setting version to: 2|0||559ebabeca4787b9985d1f34 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.367-0400 m31200| 2015-07-09T14:17:34.367-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db78.coll78' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.368-0400 m31200| 2015-07-09T14:17:34.367-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:34.367-0400-559ebabed5a107a5b9c0db82", server: "bs-osx108-8", clientAddr: "", time: new Date(1436465854367), what: "moveChunk.to", ns: "db78.coll78", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 5: 40, step 2 of 5: 15, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 23, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.421-0400 m31100| 2015-07-09T14:17:34.420-0400 I SHARDING [conn38] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db78.coll78", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.421-0400 m31100| 2015-07-09T14:17:34.420-0400 I SHARDING [conn38] moveChunk updating self version to: 2|1||559ebabeca4787b9985d1f34 through { _id: MinKey } -> { _id: 0 } for collection 'db78.coll78' [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.423-0400 m31100| 2015-07-09T14:17:34.422-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:34.422-0400-559ebabe792e00bb67274b09", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465854422), what: "moveChunk.commit", ns: "db78.coll78", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.479-0400 m31100| 2015-07-09T14:17:34.479-0400 I SHARDING [conn38] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.480-0400 m31100| 2015-07-09T14:17:34.479-0400 I SHARDING [conn38] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.480-0400 m31100| 2015-07-09T14:17:34.479-0400 I SHARDING [conn38] Deleter starting delete for: db78.coll78 from { _id: 0 } -> { _id: MaxKey }, with opId: 243392 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.480-0400 m31100| 2015-07-09T14:17:34.479-0400 I SHARDING [conn38] rangeDeleter deleted 0 documents for db78.coll78 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.480-0400 m31100| 2015-07-09T14:17:34.479-0400 I SHARDING [conn38] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.481-0400 m31100| 2015-07-09T14:17:34.480-0400 I SHARDING [conn38] distributed lock 'db78.coll78/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.481-0400 m31100| 2015-07-09T14:17:34.480-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:34.480-0400-559ebabe792e00bb67274b0a", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465854480), what: "moveChunk.from", ns: "db78.coll78", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 6: 0, step 2 of 6: 57, step 3 of 6: 2, step 4 of 6: 73, step 5 of 6: 119, step 6 of 6: 0, to: "test-rs1", from: "test-rs0", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.534-0400 m31100| 2015-07-09T14:17:34.533-0400 I COMMAND [conn38] command db78.coll78 command: moveChunk { moveChunk: "db78.coll78", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559ebabeca4787b9985d1f34') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 308ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.536-0400 m30999| 2015-07-09T14:17:34.536-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db78.coll78: 0ms sequenceNumber: 340 version: 2|1||559ebabeca4787b9985d1f34 based on: 1|1||559ebabeca4787b9985d1f34 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.538-0400 m31100| 2015-07-09T14:17:34.537-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db78.coll78", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebabeca4787b9985d1f34') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.542-0400 m31100| 2015-07-09T14:17:34.541-0400 I SHARDING [conn38] distributed lock 'db78.coll78/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559ebabe792e00bb67274b0b [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.542-0400 m31100| 2015-07-09T14:17:34.542-0400 I SHARDING [conn38] remotely refreshing metadata for db78.coll78 based on current shard version 2|0||559ebabeca4787b9985d1f34, current metadata version is 2|0||559ebabeca4787b9985d1f34 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.544-0400 m31100| 2015-07-09T14:17:34.543-0400 I SHARDING [conn38] updating metadata for db78.coll78 from shard version 2|0||559ebabeca4787b9985d1f34 to shard version 2|1||559ebabeca4787b9985d1f34 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.544-0400 m31100| 2015-07-09T14:17:34.544-0400 I SHARDING [conn38] collection version was loaded at version 2|1||559ebabeca4787b9985d1f34, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.544-0400 m31100| 2015-07-09T14:17:34.544-0400 I SHARDING [conn38] splitChunk accepted at version 2|1||559ebabeca4787b9985d1f34 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.546-0400 m31100| 2015-07-09T14:17:34.545-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:34.545-0400-559ebabe792e00bb67274b0c", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465854545), what: "split", ns: "db78.coll78", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559ebabeca4787b9985d1f34') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559ebabeca4787b9985d1f34') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.597-0400 m30999| 2015-07-09T14:17:34.596-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:17:34.589-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30999:1436464534:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.597-0400 m31100| 2015-07-09T14:17:34.597-0400 I SHARDING [conn38] distributed lock 'db78.coll78/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.599-0400 m30999| 2015-07-09T14:17:34.599-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db78.coll78: 0ms sequenceNumber: 341 version: 2|3||559ebabeca4787b9985d1f34 based on: 2|1||559ebabeca4787b9985d1f34 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.600-0400 m31200| 2015-07-09T14:17:34.599-0400 I SHARDING [conn18] received splitChunk request: { splitChunk: "db78.coll78", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebabeca4787b9985d1f34') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.604-0400 m31200| 2015-07-09T14:17:34.603-0400 I SHARDING [conn18] distributed lock 'db78.coll78/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559ebabed5a107a5b9c0db83 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.604-0400 m31200| 2015-07-09T14:17:34.604-0400 I SHARDING [conn18] remotely refreshing metadata for db78.coll78 based on current shard version 0|0||559ebabeca4787b9985d1f34, current metadata version is 1|1||559ebabeca4787b9985d1f34 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.606-0400 m31200| 2015-07-09T14:17:34.605-0400 I SHARDING [conn18] updating metadata for db78.coll78 from shard version 0|0||559ebabeca4787b9985d1f34 to shard version 2|0||559ebabeca4787b9985d1f34 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.606-0400 m31200| 2015-07-09T14:17:34.606-0400 I SHARDING [conn18] collection version was loaded at version 2|3||559ebabeca4787b9985d1f34, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.606-0400 m31200| 2015-07-09T14:17:34.606-0400 I SHARDING [conn18] splitChunk accepted at version 2|0||559ebabeca4787b9985d1f34 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.607-0400 m31200| 2015-07-09T14:17:34.606-0400 I SHARDING [conn18] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:34.606-0400-559ebabed5a107a5b9c0db84", server: "bs-osx108-8", clientAddr: "127.0.0.1:62585", time: new Date(1436465854606), what: "split", ns: "db78.coll78", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559ebabeca4787b9985d1f34') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559ebabeca4787b9985d1f34') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.663-0400 m31200| 2015-07-09T14:17:34.663-0400 I SHARDING [conn18] distributed lock 'db78.coll78/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.665-0400 m30999| 2015-07-09T14:17:34.665-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db78.coll78: 0ms sequenceNumber: 342 version: 2|5||559ebabeca4787b9985d1f34 based on: 2|3||559ebabeca4787b9985d1f34 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.667-0400 m30999| 2015-07-09T14:17:34.667-0400 I SHARDING [conn1] sharded connection to test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.668-0400 m30999| 2015-07-09T14:17:34.667-0400 I SHARDING [conn1] retrying command: { listIndexes: "coll78" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.668-0400 m31100| 2015-07-09T14:17:34.667-0400 I NETWORK [conn182] end connection 127.0.0.1:63720 (118 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.681-0400 m31100| 2015-07-09T14:17:34.681-0400 I INDEX [conn45] build index on: db78.coll78 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db78.coll78" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.682-0400 m31100| 2015-07-09T14:17:34.681-0400 I INDEX [conn45] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.682-0400 m31200| 2015-07-09T14:17:34.682-0400 I INDEX [conn35] build index on: db78.coll78 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db78.coll78" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.682-0400 m31200| 2015-07-09T14:17:34.682-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.688-0400 m31100| 2015-07-09T14:17:34.687-0400 I INDEX [conn45] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.690-0400 m31200| 2015-07-09T14:17:34.690-0400 I INDEX [conn35] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.697-0400 m31102| 2015-07-09T14:17:34.696-0400 I INDEX [repl writer worker 11] build index on: db78.coll78 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db78.coll78" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.697-0400 m31200| 2015-07-09T14:17:34.696-0400 I INDEX [conn35] build index on: db78.coll78 properties: { v: 1, key: { y: 1.0 }, name: "y_1", ns: "db78.coll78" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.698-0400 m31102| 2015-07-09T14:17:34.696-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.698-0400 m31200| 2015-07-09T14:17:34.696-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.701-0400 m31100| 2015-07-09T14:17:34.700-0400 I INDEX [conn45] build index on: db78.coll78 properties: { v: 1, key: { y: 1.0 }, name: "y_1", ns: "db78.coll78" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.701-0400 m31100| 2015-07-09T14:17:34.700-0400 I INDEX [conn45] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.708-0400 m31101| 2015-07-09T14:17:34.708-0400 I INDEX [repl writer worker 0] build index on: db78.coll78 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db78.coll78" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.709-0400 m31101| 2015-07-09T14:17:34.708-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.714-0400 m31202| 2015-07-09T14:17:34.713-0400 I INDEX [repl writer worker 5] build index on: db78.coll78 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db78.coll78" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.715-0400 m31202| 2015-07-09T14:17:34.713-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.718-0400 m31102| 2015-07-09T14:17:34.718-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.718-0400 m31200| 2015-07-09T14:17:34.718-0400 I INDEX [conn35] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.719-0400 m31201| 2015-07-09T14:17:34.718-0400 I INDEX [repl writer worker 0] build index on: db78.coll78 properties: { v: 1, key: { x: 1.0 }, name: "x_1", ns: "db78.coll78" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.719-0400 m31201| 2015-07-09T14:17:34.718-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.723-0400 m31100| 2015-07-09T14:17:34.722-0400 I INDEX [conn45] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.723-0400 m31101| 2015-07-09T14:17:34.722-0400 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.729-0400 m31202| 2015-07-09T14:17:34.727-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.735-0400 m31201| 2015-07-09T14:17:34.735-0400 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.736-0400 m31200| 2015-07-09T14:17:34.735-0400 I INDEX [conn35] build index on: db78.coll78 properties: { v: 1, key: { z: 1.0 }, name: "z_1", ns: "db78.coll78" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.736-0400 m31200| 2015-07-09T14:17:34.735-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.737-0400 m31100| 2015-07-09T14:17:34.737-0400 I INDEX [conn45] build index on: db78.coll78 properties: { v: 1, key: { z: 1.0 }, name: "z_1", ns: "db78.coll78" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.738-0400 m31100| 2015-07-09T14:17:34.737-0400 I INDEX [conn45] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.740-0400 m31102| 2015-07-09T14:17:34.740-0400 I INDEX [repl writer worker 3] build index on: db78.coll78 properties: { v: 1, key: { y: 1.0 }, name: "y_1", ns: "db78.coll78" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.740-0400 m31102| 2015-07-09T14:17:34.740-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.749-0400 m31202| 2015-07-09T14:17:34.748-0400 I INDEX [repl writer worker 14] build index on: db78.coll78 properties: { v: 1, key: { y: 1.0 }, name: "y_1", ns: "db78.coll78" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.749-0400 m31202| 2015-07-09T14:17:34.748-0400 I INDEX [repl writer worker 14] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.749-0400 m31101| 2015-07-09T14:17:34.748-0400 I INDEX [repl writer worker 9] build index on: db78.coll78 properties: { v: 1, key: { y: 1.0 }, name: "y_1", ns: "db78.coll78" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.749-0400 m31101| 2015-07-09T14:17:34.748-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.756-0400 m31200| 2015-07-09T14:17:34.755-0400 I INDEX [conn35] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.756-0400 m31100| 2015-07-09T14:17:34.756-0400 I INDEX [conn45] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.756-0400 m31201| 2015-07-09T14:17:34.756-0400 I INDEX [repl writer worker 3] build index on: db78.coll78 properties: { v: 1, key: { y: 1.0 }, name: "y_1", ns: "db78.coll78" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.757-0400 m31201| 2015-07-09T14:17:34.756-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.757-0400 m31102| 2015-07-09T14:17:34.757-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.760-0400 m31101| 2015-07-09T14:17:34.760-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.764-0400 m31100| 2015-07-09T14:17:34.764-0400 I INDEX [conn45] build index on: db78.coll78 properties: { v: 1, key: { x: 1.0, y: 1.0, z: 1.0 }, name: "x_1_y_1_z_1", ns: "db78.coll78" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.765-0400 m31100| 2015-07-09T14:17:34.764-0400 I INDEX [conn45] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.766-0400 m31202| 2015-07-09T14:17:34.766-0400 I INDEX [repl writer worker 14] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.766-0400 m31200| 2015-07-09T14:17:34.766-0400 I INDEX [conn35] build index on: db78.coll78 properties: { v: 1, key: { x: 1.0, y: 1.0, z: 1.0 }, name: "x_1_y_1_z_1", ns: "db78.coll78" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.767-0400 m31200| 2015-07-09T14:17:34.766-0400 I INDEX [conn35] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.772-0400 m31201| 2015-07-09T14:17:34.772-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.772-0400 m31100| 2015-07-09T14:17:34.772-0400 I INDEX [conn45] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.778-0400 m31101| 2015-07-09T14:17:34.778-0400 I INDEX [repl writer worker 6] build index on: db78.coll78 properties: { v: 1, key: { z: 1.0 }, name: "z_1", ns: "db78.coll78" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.779-0400 m31101| 2015-07-09T14:17:34.778-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.781-0400 m31102| 2015-07-09T14:17:34.781-0400 I INDEX [repl writer worker 4] build index on: db78.coll78 properties: { v: 1, key: { z: 1.0 }, name: "z_1", ns: "db78.coll78" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.782-0400 m31102| 2015-07-09T14:17:34.781-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.783-0400 m31201| 2015-07-09T14:17:34.783-0400 I INDEX [repl writer worker 4] build index on: db78.coll78 properties: { v: 1, key: { z: 1.0 }, name: "z_1", ns: "db78.coll78" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.784-0400 m31201| 2015-07-09T14:17:34.783-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.789-0400 m31101| 2015-07-09T14:17:34.789-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.789-0400 m31200| 2015-07-09T14:17:34.789-0400 I INDEX [conn35] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.794-0400 m31201| 2015-07-09T14:17:34.794-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.794-0400 m31102| 2015-07-09T14:17:34.794-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.796-0400 m31202| 2015-07-09T14:17:34.796-0400 I INDEX [repl writer worker 3] build index on: db78.coll78 properties: { v: 1, key: { z: 1.0 }, name: "z_1", ns: "db78.coll78" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.797-0400 m31202| 2015-07-09T14:17:34.796-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.800-0400 m31101| 2015-07-09T14:17:34.800-0400 I INDEX [repl writer worker 3] build index on: db78.coll78 properties: { v: 1, key: { x: 1.0, y: 1.0, z: 1.0 }, name: "x_1_y_1_z_1", ns: "db78.coll78" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.801-0400 m31101| 2015-07-09T14:17:34.800-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.807-0400 m31100| 2015-07-09T14:17:34.805-0400 I COMMAND [conn38] CMD: dropIndexes db78.coll78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.807-0400 m31200| 2015-07-09T14:17:34.805-0400 I COMMAND [conn18] CMD: dropIndexes db78.coll78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.807-0400 m31201| 2015-07-09T14:17:34.804-0400 I INDEX [repl writer worker 11] build index on: db78.coll78 properties: { v: 1, key: { x: 1.0, y: 1.0, z: 1.0 }, name: "x_1_y_1_z_1", ns: "db78.coll78" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.807-0400 m31201| 2015-07-09T14:17:34.805-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.809-0400 m31100| 2015-07-09T14:17:34.809-0400 I COMMAND [conn38] CMD: dropIndexes db78.coll78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.809-0400 m31200| 2015-07-09T14:17:34.809-0400 I COMMAND [conn18] CMD: dropIndexes db78.coll78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.814-0400 m31202| 2015-07-09T14:17:34.813-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.814-0400 m31102| 2015-07-09T14:17:34.813-0400 I INDEX [repl writer worker 7] build index on: db78.coll78 properties: { v: 1, key: { x: 1.0, y: 1.0, z: 1.0 }, name: "x_1_y_1_z_1", ns: "db78.coll78" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.815-0400 m31201| 2015-07-09T14:17:34.813-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.815-0400 m31102| 2015-07-09T14:17:34.813-0400 I INDEX [repl writer worker 7] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.817-0400 m31101| 2015-07-09T14:17:34.817-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.819-0400 m31101| 2015-07-09T14:17:34.818-0400 I COMMAND [repl writer worker 11] CMD: dropIndexes db78.coll78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.822-0400 m31202| 2015-07-09T14:17:34.819-0400 I INDEX [repl writer worker 10] build index on: db78.coll78 properties: { v: 1, key: { x: 1.0, y: 1.0, z: 1.0 }, name: "x_1_y_1_z_1", ns: "db78.coll78" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.823-0400 m31202| 2015-07-09T14:17:34.819-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.823-0400 m31100| 2015-07-09T14:17:34.821-0400 I COMMAND [conn38] CMD: dropIndexes db78.coll78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.823-0400 m31200| 2015-07-09T14:17:34.821-0400 I COMMAND [conn18] CMD: dropIndexes db78.coll78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.825-0400 m31102| 2015-07-09T14:17:34.825-0400 I INDEX [repl writer worker 7] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.827-0400 m31202| 2015-07-09T14:17:34.826-0400 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.827-0400 m31201| 2015-07-09T14:17:34.826-0400 I COMMAND [repl writer worker 10] CMD: dropIndexes db78.coll78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.829-0400 m31101| 2015-07-09T14:17:34.828-0400 I COMMAND [repl writer worker 13] CMD: dropIndexes db78.coll78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.830-0400 m31100| 2015-07-09T14:17:34.829-0400 I COMMAND [conn38] CMD: dropIndexes db78.coll78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.830-0400 m31200| 2015-07-09T14:17:34.829-0400 I COMMAND [conn18] CMD: dropIndexes db78.coll78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.830-0400 m31102| 2015-07-09T14:17:34.830-0400 I COMMAND [repl writer worker 2] CMD: dropIndexes db78.coll78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.831-0400 m31201| 2015-07-09T14:17:34.830-0400 I COMMAND [repl writer worker 5] CMD: dropIndexes db78.coll78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.831-0400 m31202| 2015-07-09T14:17:34.831-0400 I COMMAND [repl writer worker 11] CMD: dropIndexes db78.coll78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.831-0400 m31101| 2015-07-09T14:17:34.831-0400 I COMMAND [repl writer worker 10] CMD: dropIndexes db78.coll78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.833-0400 m31201| 2015-07-09T14:17:34.832-0400 I COMMAND [repl writer worker 13] CMD: dropIndexes db78.coll78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.833-0400 m31102| 2015-07-09T14:17:34.833-0400 I COMMAND [repl writer worker 15] CMD: dropIndexes db78.coll78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.834-0400 m31202| 2015-07-09T14:17:34.834-0400 I COMMAND [repl writer worker 7] CMD: dropIndexes db78.coll78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.834-0400 Using 10 threads (requested 10) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.850-0400 m31201| 2015-07-09T14:17:34.850-0400 I COMMAND [repl writer worker 12] CMD: dropIndexes db78.coll78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.851-0400 m31102| 2015-07-09T14:17:34.851-0400 I COMMAND [repl writer worker 6] CMD: dropIndexes db78.coll78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.853-0400 m31102| 2015-07-09T14:17:34.852-0400 I COMMAND [repl writer worker 9] CMD: dropIndexes db78.coll78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.884-0400 m31202| 2015-07-09T14:17:34.880-0400 I COMMAND [repl writer worker 13] CMD: dropIndexes db78.coll78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.933-0400 m31202| 2015-07-09T14:17:34.933-0400 I COMMAND [repl writer worker 1] CMD: dropIndexes db78.coll78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.952-0400 m30998| 2015-07-09T14:17:34.951-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64115 #489 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.952-0400 m30999| 2015-07-09T14:17:34.951-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64116 #490 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.961-0400 m30998| 2015-07-09T14:17:34.961-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64118 #490 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.962-0400 m30998| 2015-07-09T14:17:34.962-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64119 #491 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.964-0400 m30998| 2015-07-09T14:17:34.963-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64121 #492 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.965-0400 m30999| 2015-07-09T14:17:34.964-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64117 #491 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.966-0400 m30999| 2015-07-09T14:17:34.965-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64120 #492 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.967-0400 m30999| 2015-07-09T14:17:34.967-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64122 #493 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.971-0400 m30998| 2015-07-09T14:17:34.971-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64123 #493 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.973-0400 m30999| 2015-07-09T14:17:34.972-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64124 #494 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.981-0400 setting random seed: 3049302664585 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.981-0400 setting random seed: 8967394507490 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.981-0400 setting random seed: 2204465372487 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.981-0400 setting random seed: 8974418351426 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.981-0400 setting random seed: 7564049167558 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.982-0400 setting random seed: 9856226132251 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.983-0400 setting random seed: 9629907547496 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.988-0400 m30998| 2015-07-09T14:17:34.988-0400 I SHARDING [conn492] ChunkManager: time to load chunks for db78.coll78: 0ms sequenceNumber: 96 version: 2|5||559ebabeca4787b9985d1f34 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.991-0400 setting random seed: 3998272586613 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.994-0400 setting random seed: 3592681884765 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:34.995-0400 setting random seed: 4999557565897 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.053-0400 m30999| 2015-07-09T14:17:35.053-0400 I NETWORK [conn492] end connection 127.0.0.1:64120 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.059-0400 m30999| 2015-07-09T14:17:35.056-0400 I NETWORK [conn493] end connection 127.0.0.1:64122 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.077-0400 m30999| 2015-07-09T14:17:35.076-0400 I NETWORK [conn494] end connection 127.0.0.1:64124 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.085-0400 m30999| 2015-07-09T14:17:35.085-0400 I NETWORK [conn490] end connection 127.0.0.1:64116 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.094-0400 m30998| 2015-07-09T14:17:35.094-0400 I NETWORK [conn489] end connection 127.0.0.1:64115 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.097-0400 m30998| 2015-07-09T14:17:35.097-0400 I NETWORK [conn493] end connection 127.0.0.1:64123 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.101-0400 m30999| 2015-07-09T14:17:35.100-0400 I NETWORK [conn491] end connection 127.0.0.1:64117 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.103-0400 m30998| 2015-07-09T14:17:35.101-0400 I NETWORK [conn491] end connection 127.0.0.1:64119 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.116-0400 m30998| 2015-07-09T14:17:35.115-0400 I NETWORK [conn490] end connection 127.0.0.1:64118 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.120-0400 m30998| 2015-07-09T14:17:35.118-0400 I NETWORK [conn492] end connection 127.0.0.1:64121 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.141-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.142-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.142-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.142-0400 jstests/concurrency/fsm_workloads/update_multifield_noindex.js: Workload completed in 307 ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.142-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.142-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.142-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.142-0400 m30999| 2015-07-09T14:17:35.142-0400 I COMMAND [conn1] DROP: db78.coll78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.142-0400 m30999| 2015-07-09T14:17:35.142-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:35.142-0400-559ebabfca4787b9985d1f36", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465855142), what: "dropCollection.start", ns: "db78.coll78", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.199-0400 m30999| 2015-07-09T14:17:35.198-0400 I SHARDING [conn1] distributed lock 'db78.coll78/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559ebabfca4787b9985d1f37 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.200-0400 m31100| 2015-07-09T14:17:35.199-0400 I COMMAND [conn38] CMD: drop db78.coll78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.201-0400 m31101| 2015-07-09T14:17:35.201-0400 I COMMAND [repl writer worker 1] CMD: dropIndexes db78.coll78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.203-0400 m31200| 2015-07-09T14:17:35.202-0400 I COMMAND [conn18] CMD: drop db78.coll78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.204-0400 m31101| 2015-07-09T14:17:35.204-0400 I COMMAND [repl writer worker 4] CMD: drop db78.coll78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.204-0400 m31102| 2015-07-09T14:17:35.204-0400 I COMMAND [repl writer worker 12] CMD: drop db78.coll78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.206-0400 m31201| 2015-07-09T14:17:35.206-0400 I COMMAND [repl writer worker 5] CMD: drop db78.coll78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.207-0400 m31202| 2015-07-09T14:17:35.207-0400 I COMMAND [repl writer worker 14] CMD: drop db78.coll78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.258-0400 m31100| 2015-07-09T14:17:35.258-0400 I SHARDING [conn37] remotely refreshing metadata for db78.coll78 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559ebabeca4787b9985d1f34, current metadata version is 2|3||559ebabeca4787b9985d1f34 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.260-0400 m31100| 2015-07-09T14:17:35.259-0400 W SHARDING [conn37] no chunks found when reloading db78.coll78, previous version was 0|0||559ebabeca4787b9985d1f34, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.260-0400 m31100| 2015-07-09T14:17:35.259-0400 I SHARDING [conn37] dropping metadata for db78.coll78 at shard version 2|3||559ebabeca4787b9985d1f34, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.261-0400 m31200| 2015-07-09T14:17:35.261-0400 I SHARDING [conn63] remotely refreshing metadata for db78.coll78 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559ebabeca4787b9985d1f34, current metadata version is 2|5||559ebabeca4787b9985d1f34 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.263-0400 m31200| 2015-07-09T14:17:35.263-0400 W SHARDING [conn63] no chunks found when reloading db78.coll78, previous version was 0|0||559ebabeca4787b9985d1f34, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.263-0400 m31200| 2015-07-09T14:17:35.263-0400 I SHARDING [conn63] dropping metadata for db78.coll78 at shard version 2|5||559ebabeca4787b9985d1f34, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.265-0400 m30999| 2015-07-09T14:17:35.264-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:35.264-0400-559ebabfca4787b9985d1f38", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465855264), what: "dropCollection", ns: "db78.coll78", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.318-0400 m30999| 2015-07-09T14:17:35.318-0400 I SHARDING [conn1] distributed lock 'db78.coll78/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.374-0400 m30999| 2015-07-09T14:17:35.373-0400 I COMMAND [conn1] DROP DATABASE: db78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.374-0400 m30999| 2015-07-09T14:17:35.373-0400 I SHARDING [conn1] DBConfig::dropDatabase: db78 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.374-0400 m30999| 2015-07-09T14:17:35.373-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:35.373-0400-559ebabfca4787b9985d1f39", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465855373), what: "dropDatabase.start", ns: "db78", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.480-0400 m30999| 2015-07-09T14:17:35.480-0400 I SHARDING [conn1] DBConfig::dropDatabase: db78 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.480-0400 m31100| 2015-07-09T14:17:35.480-0400 I COMMAND [conn157] dropDatabase db78 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.481-0400 m31100| 2015-07-09T14:17:35.480-0400 I COMMAND [conn157] dropDatabase db78 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.481-0400 m30999| 2015-07-09T14:17:35.481-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:35.481-0400-559ebabfca4787b9985d1f3a", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465855481), what: "dropDatabase", ns: "db78", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.482-0400 m31101| 2015-07-09T14:17:35.482-0400 I COMMAND [repl writer worker 7] dropDatabase db78 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.482-0400 m31102| 2015-07-09T14:17:35.482-0400 I COMMAND [repl writer worker 13] dropDatabase db78 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.482-0400 m31101| 2015-07-09T14:17:35.482-0400 I COMMAND [repl writer worker 7] dropDatabase db78 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.482-0400 m31102| 2015-07-09T14:17:35.482-0400 I COMMAND [repl writer worker 13] dropDatabase db78 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.570-0400 m31100| 2015-07-09T14:17:35.569-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.574-0400 m31102| 2015-07-09T14:17:35.573-0400 I COMMAND [repl writer worker 5] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.574-0400 m31101| 2015-07-09T14:17:35.573-0400 I COMMAND [repl writer worker 12] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.610-0400 m31200| 2015-07-09T14:17:35.609-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.612-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.612-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.612-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.613-0400 jstests/concurrency/fsm_workloads/indexed_insert_upsert.js [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.613-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.613-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.613-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.613-0400 m31201| 2015-07-09T14:17:35.613-0400 I COMMAND [repl writer worker 15] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.613-0400 m31202| 2015-07-09T14:17:35.613-0400 I COMMAND [repl writer worker 8] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.619-0400 m30999| 2015-07-09T14:17:35.619-0400 I SHARDING [conn1] distributed lock 'db79/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559ebabfca4787b9985d1f3b [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.623-0400 m30999| 2015-07-09T14:17:35.623-0400 I SHARDING [conn1] Placing [db79] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.623-0400 m30999| 2015-07-09T14:17:35.623-0400 I SHARDING [conn1] Enabling sharding for database [db79] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.678-0400 m30999| 2015-07-09T14:17:35.677-0400 I SHARDING [conn1] distributed lock 'db79/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.707-0400 m31100| 2015-07-09T14:17:35.705-0400 I INDEX [conn22] build index on: db79.coll79 properties: { v: 1, key: { indexed_insert_upsert: 1.0 }, name: "indexed_insert_upsert_1", ns: "db79.coll79" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.707-0400 m31100| 2015-07-09T14:17:35.705-0400 I INDEX [conn22] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.713-0400 m31100| 2015-07-09T14:17:35.713-0400 I INDEX [conn22] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.714-0400 m30999| 2015-07-09T14:17:35.714-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db79.coll79", key: { indexed_insert_upsert: 1.0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.718-0400 m30999| 2015-07-09T14:17:35.718-0400 I SHARDING [conn1] distributed lock 'db79.coll79/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559ebabfca4787b9985d1f3c [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.720-0400 m30999| 2015-07-09T14:17:35.719-0400 I SHARDING [conn1] enable sharding on: db79.coll79 with shard key: { indexed_insert_upsert: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.721-0400 m30999| 2015-07-09T14:17:35.719-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:35.719-0400-559ebabfca4787b9985d1f3d", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465855719), what: "shardCollection.start", ns: "db79.coll79", details: { shardKey: { indexed_insert_upsert: 1.0 }, collection: "db79.coll79", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 1 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.730-0400 m31101| 2015-07-09T14:17:35.728-0400 I INDEX [repl writer worker 15] build index on: db79.coll79 properties: { v: 1, key: { indexed_insert_upsert: 1.0 }, name: "indexed_insert_upsert_1", ns: "db79.coll79" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.730-0400 m31101| 2015-07-09T14:17:35.729-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.735-0400 m31101| 2015-07-09T14:17:35.735-0400 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.735-0400 m31102| 2015-07-09T14:17:35.735-0400 I INDEX [repl writer worker 8] build index on: db79.coll79 properties: { v: 1, key: { indexed_insert_upsert: 1.0 }, name: "indexed_insert_upsert_1", ns: "db79.coll79" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.735-0400 m31102| 2015-07-09T14:17:35.735-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.740-0400 m31102| 2015-07-09T14:17:35.740-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.773-0400 m30999| 2015-07-09T14:17:35.773-0400 I SHARDING [conn1] going to create 1 chunk(s) for: db79.coll79 using new epoch 559ebabfca4787b9985d1f3e [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.827-0400 m30999| 2015-07-09T14:17:35.827-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db79.coll79: 0ms sequenceNumber: 343 version: 1|0||559ebabfca4787b9985d1f3e based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.883-0400 m30999| 2015-07-09T14:17:35.883-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db79.coll79: 0ms sequenceNumber: 344 version: 1|0||559ebabfca4787b9985d1f3e based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.885-0400 m31100| 2015-07-09T14:17:35.885-0400 I SHARDING [conn188] remotely refreshing metadata for db79.coll79 with requested shard version 1|0||559ebabfca4787b9985d1f3e, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.887-0400 m31100| 2015-07-09T14:17:35.886-0400 I SHARDING [conn188] collection db79.coll79 was previously unsharded, new metadata loaded with shard version 1|0||559ebabfca4787b9985d1f3e [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.887-0400 m31100| 2015-07-09T14:17:35.886-0400 I SHARDING [conn188] collection version was loaded at version 1|0||559ebabfca4787b9985d1f3e, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.887-0400 m30999| 2015-07-09T14:17:35.886-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:35.886-0400-559ebabfca4787b9985d1f3f", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465855886), what: "shardCollection", ns: "db79.coll79", details: { version: "1|0||559ebabfca4787b9985d1f3e" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.941-0400 m30999| 2015-07-09T14:17:35.941-0400 I SHARDING [conn1] distributed lock 'db79.coll79/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.966-0400 m31200| 2015-07-09T14:17:35.964-0400 I INDEX [conn39] build index on: db79.coll79 properties: { v: 1, key: { indexed_insert_upsert: 1.0 }, name: "indexed_insert_upsert_1", ns: "db79.coll79" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.966-0400 m31200| 2015-07-09T14:17:35.964-0400 I INDEX [conn39] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.981-0400 m31200| 2015-07-09T14:17:35.980-0400 I INDEX [conn39] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:35.982-0400 Using 20 threads (requested 20) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.085-0400 m30998| 2015-07-09T14:17:36.085-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64127 #494 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.114-0400 m30998| 2015-07-09T14:17:36.102-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64128 #495 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.114-0400 m30998| 2015-07-09T14:17:36.113-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:17:36.037-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30998:1436464535:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.115-0400 m31202| 2015-07-09T14:17:36.112-0400 I INDEX [repl writer worker 3] build index on: db79.coll79 properties: { v: 1, key: { indexed_insert_upsert: 1.0 }, name: "indexed_insert_upsert_1", ns: "db79.coll79" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.115-0400 m31202| 2015-07-09T14:17:36.113-0400 I INDEX [repl writer worker 3] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.116-0400 m30999| 2015-07-09T14:17:36.113-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64126 #495 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.116-0400 m31201| 2015-07-09T14:17:36.116-0400 I INDEX [repl writer worker 6] build index on: db79.coll79 properties: { v: 1, key: { indexed_insert_upsert: 1.0 }, name: "indexed_insert_upsert_1", ns: "db79.coll79" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.116-0400 m31201| 2015-07-09T14:17:36.116-0400 I INDEX [repl writer worker 6] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.123-0400 m30998| 2015-07-09T14:17:36.123-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64129 #496 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.127-0400 m30999| 2015-07-09T14:17:36.123-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64130 #496 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.145-0400 m31201| 2015-07-09T14:17:36.145-0400 I INDEX [repl writer worker 6] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.149-0400 m31202| 2015-07-09T14:17:36.149-0400 I INDEX [repl writer worker 3] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.150-0400 m30999| 2015-07-09T14:17:36.149-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64131 #497 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.152-0400 m30999| 2015-07-09T14:17:36.152-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64132 #498 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.156-0400 m30998| 2015-07-09T14:17:36.156-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64133 #497 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.157-0400 m30998| 2015-07-09T14:17:36.157-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64134 #498 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.171-0400 m30999| 2015-07-09T14:17:36.171-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64135 #499 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.177-0400 m30999| 2015-07-09T14:17:36.177-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64136 #500 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.204-0400 m30999| 2015-07-09T14:17:36.198-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64137 #501 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.218-0400 m30998| 2015-07-09T14:17:36.217-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64138 #499 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.219-0400 m30998| 2015-07-09T14:17:36.219-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64140 #500 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.220-0400 m30999| 2015-07-09T14:17:36.219-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64139 #502 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.222-0400 m30998| 2015-07-09T14:17:36.221-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64142 #501 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.228-0400 m30998| 2015-07-09T14:17:36.223-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64144 #502 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.228-0400 m30999| 2015-07-09T14:17:36.226-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64141 #503 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.228-0400 m30999| 2015-07-09T14:17:36.226-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64143 #504 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.229-0400 m30998| 2015-07-09T14:17:36.229-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64145 #503 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.238-0400 setting random seed: 2187500051222 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.238-0400 setting random seed: 449532656930 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.238-0400 setting random seed: 8923645718023 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.241-0400 setting random seed: 5295368544757 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.241-0400 setting random seed: 9259173176251 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.241-0400 setting random seed: 8900809357874 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.243-0400 setting random seed: 9456972079351 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.245-0400 setting random seed: 5902947448194 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.248-0400 m30998| 2015-07-09T14:17:36.247-0400 I SHARDING [conn495] ChunkManager: time to load chunks for db79.coll79: 0ms sequenceNumber: 97 version: 1|0||559ebabfca4787b9985d1f3e based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.250-0400 setting random seed: 7777817877940 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.252-0400 setting random seed: 7360178446397 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.253-0400 setting random seed: 9713652650825 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.254-0400 setting random seed: 1038983012549 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.255-0400 setting random seed: 2064483715221 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.258-0400 setting random seed: 2519206199795 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.260-0400 setting random seed: 5313378018327 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.262-0400 setting random seed: 1944395606406 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.265-0400 m31100| 2015-07-09T14:17:36.265-0400 I SHARDING [conn37] request split points lookup for chunk db79.coll79 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.267-0400 m31100| 2015-07-09T14:17:36.265-0400 I SHARDING [conn40] request split points lookup for chunk db79.coll79 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.268-0400 setting random seed: 298619670793 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.269-0400 setting random seed: 837807995267 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.273-0400 m31100| 2015-07-09T14:17:36.272-0400 I SHARDING [conn35] request split points lookup for chunk db79.coll79 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.273-0400 m31100| 2015-07-09T14:17:36.272-0400 I SHARDING [conn40] request split points lookup for chunk db79.coll79 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.274-0400 m31100| 2015-07-09T14:17:36.273-0400 I SHARDING [conn35] received splitChunk request: { splitChunk: "db79.coll79", keyPattern: { indexed_insert_upsert: 1.0 }, min: { indexed_insert_upsert: MinKey }, max: { indexed_insert_upsert: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_upsert: 0.0 }, { indexed_insert_upsert: 12.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebabfca4787b9985d1f3e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.274-0400 m31100| 2015-07-09T14:17:36.273-0400 I SHARDING [conn132] request split points lookup for chunk db79.coll79 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.274-0400 m31100| 2015-07-09T14:17:36.274-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db79.coll79", keyPattern: { indexed_insert_upsert: 1.0 }, min: { indexed_insert_upsert: MinKey }, max: { indexed_insert_upsert: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_upsert: 0.0 }, { indexed_insert_upsert: 12.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebabfca4787b9985d1f3e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.275-0400 m31100| 2015-07-09T14:17:36.274-0400 I SHARDING [conn132] received splitChunk request: { splitChunk: "db79.coll79", keyPattern: { indexed_insert_upsert: 1.0 }, min: { indexed_insert_upsert: MinKey }, max: { indexed_insert_upsert: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_upsert: 0.0 }, { indexed_insert_upsert: 12.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebabfca4787b9985d1f3e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.279-0400 setting random seed: 3329811673611 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.279-0400 m31100| 2015-07-09T14:17:36.278-0400 I SHARDING [conn37] request split points lookup for chunk db79.coll79 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.282-0400 m31100| 2015-07-09T14:17:36.279-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db79.coll79", keyPattern: { indexed_insert_upsert: 1.0 }, min: { indexed_insert_upsert: MinKey }, max: { indexed_insert_upsert: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_upsert: 0.0 }, { indexed_insert_upsert: 12.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebabfca4787b9985d1f3e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.282-0400 m31100| 2015-07-09T14:17:36.281-0400 I SHARDING [conn32] request split points lookup for chunk db79.coll79 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.282-0400 m31100| 2015-07-09T14:17:36.281-0400 I SHARDING [conn34] request split points lookup for chunk db79.coll79 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.283-0400 m31100| 2015-07-09T14:17:36.282-0400 I SHARDING [conn15] request split points lookup for chunk db79.coll79 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.284-0400 m31100| 2015-07-09T14:17:36.282-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db79.coll79", keyPattern: { indexed_insert_upsert: 1.0 }, min: { indexed_insert_upsert: MinKey }, max: { indexed_insert_upsert: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_upsert: 0.0 }, { indexed_insert_upsert: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebabfca4787b9985d1f3e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.284-0400 m31100| 2015-07-09T14:17:36.283-0400 I SHARDING [conn32] received splitChunk request: { splitChunk: "db79.coll79", keyPattern: { indexed_insert_upsert: 1.0 }, min: { indexed_insert_upsert: MinKey }, max: { indexed_insert_upsert: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_upsert: 0.0 }, { indexed_insert_upsert: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebabfca4787b9985d1f3e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.284-0400 m31100| 2015-07-09T14:17:36.283-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db79.coll79", keyPattern: { indexed_insert_upsert: 1.0 }, min: { indexed_insert_upsert: MinKey }, max: { indexed_insert_upsert: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_upsert: 0.0 }, { indexed_insert_upsert: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebabfca4787b9985d1f3e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.285-0400 m31100| 2015-07-09T14:17:36.284-0400 I SHARDING [conn32] could not acquire lock 'db79.coll79/bs-osx108-8:31100:1436464536:197041335' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.287-0400 m31100| 2015-07-09T14:17:36.284-0400 I SHARDING [conn32] distributed lock 'db79.coll79/bs-osx108-8:31100:1436464536:197041335' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.288-0400 m31100| 2015-07-09T14:17:36.284-0400 W SHARDING [conn32] could not acquire collection lock for db79.coll79 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db79.coll79 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.288-0400 m31100| 2015-07-09T14:17:36.285-0400 I SHARDING [conn40] could not acquire lock 'db79.coll79/bs-osx108-8:31100:1436464536:197041335' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.288-0400 m31100| 2015-07-09T14:17:36.285-0400 I SHARDING [conn40] distributed lock 'db79.coll79/bs-osx108-8:31100:1436464536:197041335' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.289-0400 m31100| 2015-07-09T14:17:36.285-0400 W SHARDING [conn40] could not acquire collection lock for db79.coll79 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db79.coll79 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.291-0400 m30998| 2015-07-09T14:17:36.285-0400 W SHARDING [conn497] splitChunk failed - cmd: { splitChunk: "db79.coll79", keyPattern: { indexed_insert_upsert: 1.0 }, min: { indexed_insert_upsert: MinKey }, max: { indexed_insert_upsert: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_upsert: 0.0 }, { indexed_insert_upsert: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebabfca4787b9985d1f3e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db79.coll79 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.291-0400 m30999| 2015-07-09T14:17:36.285-0400 W SHARDING [conn495] splitChunk failed - cmd: { splitChunk: "db79.coll79", keyPattern: { indexed_insert_upsert: 1.0 }, min: { indexed_insert_upsert: MinKey }, max: { indexed_insert_upsert: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_upsert: 0.0 }, { indexed_insert_upsert: 12.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebabfca4787b9985d1f3e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db79.coll79 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.291-0400 m31100| 2015-07-09T14:17:36.285-0400 I SHARDING [conn37] could not acquire lock 'db79.coll79/bs-osx108-8:31100:1436464536:197041335' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.291-0400 m31100| 2015-07-09T14:17:36.285-0400 I SHARDING [conn37] distributed lock 'db79.coll79/bs-osx108-8:31100:1436464536:197041335' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.291-0400 m31100| 2015-07-09T14:17:36.285-0400 W SHARDING [conn37] could not acquire collection lock for db79.coll79 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db79.coll79 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.292-0400 m31100| 2015-07-09T14:17:36.286-0400 I SHARDING [conn34] could not acquire lock 'db79.coll79/bs-osx108-8:31100:1436464536:197041335' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.292-0400 m31100| 2015-07-09T14:17:36.286-0400 I SHARDING [conn34] distributed lock 'db79.coll79/bs-osx108-8:31100:1436464536:197041335' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.292-0400 m31100| 2015-07-09T14:17:36.286-0400 W SHARDING [conn34] could not acquire collection lock for db79.coll79 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db79.coll79 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.292-0400 m31100| 2015-07-09T14:17:36.286-0400 I SHARDING [conn39] request split points lookup for chunk db79.coll79 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.293-0400 m30999| 2015-07-09T14:17:36.285-0400 W SHARDING [conn501] splitChunk failed - cmd: { splitChunk: "db79.coll79", keyPattern: { indexed_insert_upsert: 1.0 }, min: { indexed_insert_upsert: MinKey }, max: { indexed_insert_upsert: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_upsert: 0.0 }, { indexed_insert_upsert: 12.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebabfca4787b9985d1f3e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db79.coll79 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.293-0400 m30999| 2015-07-09T14:17:36.287-0400 W SHARDING [conn496] splitChunk failed - cmd: { splitChunk: "db79.coll79", keyPattern: { indexed_insert_upsert: 1.0 }, min: { indexed_insert_upsert: MinKey }, max: { indexed_insert_upsert: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_upsert: 0.0 }, { indexed_insert_upsert: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebabfca4787b9985d1f3e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db79.coll79 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.294-0400 m31100| 2015-07-09T14:17:36.287-0400 I SHARDING [conn132] could not acquire lock 'db79.coll79/bs-osx108-8:31100:1436464536:197041335' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.294-0400 m31100| 2015-07-09T14:17:36.287-0400 I SHARDING [conn132] distributed lock 'db79.coll79/bs-osx108-8:31100:1436464536:197041335' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.295-0400 m31100| 2015-07-09T14:17:36.287-0400 W SHARDING [conn132] could not acquire collection lock for db79.coll79 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db79.coll79 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.295-0400 m31100| 2015-07-09T14:17:36.287-0400 I SHARDING [conn15] could not acquire lock 'db79.coll79/bs-osx108-8:31100:1436464536:197041335' (another update won) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.295-0400 m31100| 2015-07-09T14:17:36.287-0400 I SHARDING [conn15] distributed lock 'db79.coll79/bs-osx108-8:31100:1436464536:197041335' was not acquired. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.296-0400 m31100| 2015-07-09T14:17:36.287-0400 W SHARDING [conn15] could not acquire collection lock for db79.coll79 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db79.coll79 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.297-0400 m30998| 2015-07-09T14:17:36.288-0400 W SHARDING [conn494] splitChunk failed - cmd: { splitChunk: "db79.coll79", keyPattern: { indexed_insert_upsert: 1.0 }, min: { indexed_insert_upsert: MinKey }, max: { indexed_insert_upsert: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_upsert: 0.0 }, { indexed_insert_upsert: 12.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebabfca4787b9985d1f3e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db79.coll79 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.298-0400 m31100| 2015-07-09T14:17:36.288-0400 I SHARDING [conn39] received splitChunk request: { splitChunk: "db79.coll79", keyPattern: { indexed_insert_upsert: 1.0 }, min: { indexed_insert_upsert: MinKey }, max: { indexed_insert_upsert: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_upsert: 0.0 }, { indexed_insert_upsert: 8.0 }, { indexed_insert_upsert: 12.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebabfca4787b9985d1f3e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.298-0400 m30999| 2015-07-09T14:17:36.288-0400 W SHARDING [conn503] splitChunk failed - cmd: { splitChunk: "db79.coll79", keyPattern: { indexed_insert_upsert: 1.0 }, min: { indexed_insert_upsert: MinKey }, max: { indexed_insert_upsert: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_upsert: 0.0 }, { indexed_insert_upsert: 9.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebabfca4787b9985d1f3e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db79.coll79 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.298-0400 m31100| 2015-07-09T14:17:36.289-0400 W SHARDING [conn39] could not acquire collection lock for db79.coll79 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db79.coll79 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.300-0400 m30998| 2015-07-09T14:17:36.289-0400 W SHARDING [conn496] splitChunk failed - cmd: { splitChunk: "db79.coll79", keyPattern: { indexed_insert_upsert: 1.0 }, min: { indexed_insert_upsert: MinKey }, max: { indexed_insert_upsert: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_upsert: 0.0 }, { indexed_insert_upsert: 8.0 }, { indexed_insert_upsert: 12.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebabfca4787b9985d1f3e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db79.coll79 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.300-0400 m31100| 2015-07-09T14:17:36.291-0400 I SHARDING [conn39] request split points lookup for chunk db79.coll79 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.301-0400 m31100| 2015-07-09T14:17:36.292-0400 I SHARDING [conn39] received splitChunk request: { splitChunk: "db79.coll79", keyPattern: { indexed_insert_upsert: 1.0 }, min: { indexed_insert_upsert: MinKey }, max: { indexed_insert_upsert: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_upsert: 0.0 }, { indexed_insert_upsert: 6.0 }, { indexed_insert_upsert: 12.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebabfca4787b9985d1f3e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.301-0400 m31100| 2015-07-09T14:17:36.294-0400 W SHARDING [conn39] could not acquire collection lock for db79.coll79 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db79.coll79 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.301-0400 m30998| 2015-07-09T14:17:36.294-0400 W SHARDING [conn499] splitChunk failed - cmd: { splitChunk: "db79.coll79", keyPattern: { indexed_insert_upsert: 1.0 }, min: { indexed_insert_upsert: MinKey }, max: { indexed_insert_upsert: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_upsert: 0.0 }, { indexed_insert_upsert: 6.0 }, { indexed_insert_upsert: 12.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebabfca4787b9985d1f3e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db79.coll79 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.302-0400 m31100| 2015-07-09T14:17:36.297-0400 I SHARDING [conn35] distributed lock 'db79.coll79/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559ebac0792e00bb67274b0e [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.302-0400 m31100| 2015-07-09T14:17:36.297-0400 I SHARDING [conn35] remotely refreshing metadata for db79.coll79 based on current shard version 1|0||559ebabfca4787b9985d1f3e, current metadata version is 1|0||559ebabfca4787b9985d1f3e [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.304-0400 setting random seed: 4144824445247 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.305-0400 m31100| 2015-07-09T14:17:36.305-0400 I SHARDING [conn35] metadata of collection db79.coll79 already up to date (shard version : 1|0||559ebabfca4787b9985d1f3e, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.306-0400 m31100| 2015-07-09T14:17:36.305-0400 I SHARDING [conn35] splitChunk accepted at version 1|0||559ebabfca4787b9985d1f3e [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.310-0400 m31100| 2015-07-09T14:17:36.305-0400 I SHARDING [conn15] request split points lookup for chunk db79.coll79 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.310-0400 m31100| 2015-07-09T14:17:36.305-0400 I SHARDING [conn37] request split points lookup for chunk db79.coll79 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.310-0400 m31100| 2015-07-09T14:17:36.306-0400 I SHARDING [conn34] request split points lookup for chunk db79.coll79 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.311-0400 m31100| 2015-07-09T14:17:36.307-0400 I SHARDING [conn37] received splitChunk request: { splitChunk: "db79.coll79", keyPattern: { indexed_insert_upsert: 1.0 }, min: { indexed_insert_upsert: MinKey }, max: { indexed_insert_upsert: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_upsert: 0.0 }, { indexed_insert_upsert: 4.0 }, { indexed_insert_upsert: 9.0 }, { indexed_insert_upsert: 13.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebabfca4787b9985d1f3e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.311-0400 m31100| 2015-07-09T14:17:36.307-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db79.coll79", keyPattern: { indexed_insert_upsert: 1.0 }, min: { indexed_insert_upsert: MinKey }, max: { indexed_insert_upsert: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_upsert: 0.0 }, { indexed_insert_upsert: 4.0 }, { indexed_insert_upsert: 9.0 }, { indexed_insert_upsert: 13.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebabfca4787b9985d1f3e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.312-0400 m31100| 2015-07-09T14:17:36.308-0400 I SHARDING [conn15] received splitChunk request: { splitChunk: "db79.coll79", keyPattern: { indexed_insert_upsert: 1.0 }, min: { indexed_insert_upsert: MinKey }, max: { indexed_insert_upsert: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_upsert: 0.0 }, { indexed_insert_upsert: 4.0 }, { indexed_insert_upsert: 9.0 }, { indexed_insert_upsert: 13.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebabfca4787b9985d1f3e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.317-0400 m31100| 2015-07-09T14:17:36.316-0400 I SHARDING [conn132] request split points lookup for chunk db79.coll79 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.318-0400 m31100| 2015-07-09T14:17:36.316-0400 I SHARDING [conn39] request split points lookup for chunk db79.coll79 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.319-0400 m31100| 2015-07-09T14:17:36.316-0400 I SHARDING [conn187] request split points lookup for chunk db79.coll79 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.319-0400 m31100| 2015-07-09T14:17:36.317-0400 I SHARDING [conn40] request split points lookup for chunk db79.coll79 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.324-0400 m31100| 2015-07-09T14:17:36.317-0400 I SHARDING [conn40] received splitChunk request: { splitChunk: "db79.coll79", keyPattern: { indexed_insert_upsert: 1.0 }, min: { indexed_insert_upsert: MinKey }, max: { indexed_insert_upsert: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_upsert: 0.0 }, { indexed_insert_upsert: 4.0 }, { indexed_insert_upsert: 9.0 }, { indexed_insert_upsert: 12.0 }, { indexed_insert_upsert: 16.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebabfca4787b9985d1f3e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.325-0400 m31100| 2015-07-09T14:17:36.318-0400 I SHARDING [conn132] received splitChunk request: { splitChunk: "db79.coll79", keyPattern: { indexed_insert_upsert: 1.0 }, min: { indexed_insert_upsert: MinKey }, max: { indexed_insert_upsert: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_upsert: 0.0 }, { indexed_insert_upsert: 4.0 }, { indexed_insert_upsert: 9.0 }, { indexed_insert_upsert: 12.0 }, { indexed_insert_upsert: 16.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebabfca4787b9985d1f3e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.326-0400 m31100| 2015-07-09T14:17:36.318-0400 I SHARDING [conn39] received splitChunk request: { splitChunk: "db79.coll79", keyPattern: { indexed_insert_upsert: 1.0 }, min: { indexed_insert_upsert: MinKey }, max: { indexed_insert_upsert: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_upsert: 0.0 }, { indexed_insert_upsert: 4.0 }, { indexed_insert_upsert: 9.0 }, { indexed_insert_upsert: 12.0 }, { indexed_insert_upsert: 16.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebabfca4787b9985d1f3e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.326-0400 m31100| 2015-07-09T14:17:36.320-0400 I SHARDING [conn32] request split points lookup for chunk db79.coll79 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.326-0400 m31100| 2015-07-09T14:17:36.320-0400 I SHARDING [conn187] received splitChunk request: { splitChunk: "db79.coll79", keyPattern: { indexed_insert_upsert: 1.0 }, min: { indexed_insert_upsert: MinKey }, max: { indexed_insert_upsert: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_upsert: 0.0 }, { indexed_insert_upsert: 4.0 }, { indexed_insert_upsert: 9.0 }, { indexed_insert_upsert: 12.0 }, { indexed_insert_upsert: 16.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebabfca4787b9985d1f3e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.326-0400 m31100| 2015-07-09T14:17:36.321-0400 I SHARDING [conn32] received splitChunk request: { splitChunk: "db79.coll79", keyPattern: { indexed_insert_upsert: 1.0 }, min: { indexed_insert_upsert: MinKey }, max: { indexed_insert_upsert: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_upsert: 0.0 }, { indexed_insert_upsert: 4.0 }, { indexed_insert_upsert: 8.0 }, { indexed_insert_upsert: 11.0 }, { indexed_insert_upsert: 14.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebabfca4787b9985d1f3e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.327-0400 m30999| 2015-07-09T14:17:36.322-0400 W SHARDING [conn499] splitChunk failed - cmd: { splitChunk: "db79.coll79", keyPattern: { indexed_insert_upsert: 1.0 }, min: { indexed_insert_upsert: MinKey }, max: { indexed_insert_upsert: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_upsert: 0.0 }, { indexed_insert_upsert: 4.0 }, { indexed_insert_upsert: 9.0 }, { indexed_insert_upsert: 12.0 }, { indexed_insert_upsert: 16.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebabfca4787b9985d1f3e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db79.coll79 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.327-0400 m31100| 2015-07-09T14:17:36.322-0400 W SHARDING [conn187] could not acquire collection lock for db79.coll79 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db79.coll79 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.327-0400 m31100| 2015-07-09T14:17:36.322-0400 W SHARDING [conn15] could not acquire collection lock for db79.coll79 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db79.coll79 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.328-0400 m31100| 2015-07-09T14:17:36.322-0400 W SHARDING [conn40] could not acquire collection lock for db79.coll79 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db79.coll79 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.328-0400 m31100| 2015-07-09T14:17:36.322-0400 W SHARDING [conn32] could not acquire collection lock for db79.coll79 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db79.coll79 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.328-0400 m30999| 2015-07-09T14:17:36.322-0400 W SHARDING [conn496] splitChunk failed - cmd: { splitChunk: "db79.coll79", keyPattern: { indexed_insert_upsert: 1.0 }, min: { indexed_insert_upsert: MinKey }, max: { indexed_insert_upsert: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_upsert: 0.0 }, { indexed_insert_upsert: 4.0 }, { indexed_insert_upsert: 9.0 }, { indexed_insert_upsert: 13.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebabfca4787b9985d1f3e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db79.coll79 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.329-0400 m30999| 2015-07-09T14:17:36.322-0400 W SHARDING [conn498] splitChunk failed - cmd: { splitChunk: "db79.coll79", keyPattern: { indexed_insert_upsert: 1.0 }, min: { indexed_insert_upsert: MinKey }, max: { indexed_insert_upsert: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_upsert: 0.0 }, { indexed_insert_upsert: 4.0 }, { indexed_insert_upsert: 9.0 }, { indexed_insert_upsert: 12.0 }, { indexed_insert_upsert: 16.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebabfca4787b9985d1f3e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db79.coll79 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.329-0400 m30998| 2015-07-09T14:17:36.323-0400 W SHARDING [conn497] splitChunk failed - cmd: { splitChunk: "db79.coll79", keyPattern: { indexed_insert_upsert: 1.0 }, min: { indexed_insert_upsert: MinKey }, max: { indexed_insert_upsert: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_upsert: 0.0 }, { indexed_insert_upsert: 4.0 }, { indexed_insert_upsert: 8.0 }, { indexed_insert_upsert: 11.0 }, { indexed_insert_upsert: 14.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebabfca4787b9985d1f3e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db79.coll79 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.329-0400 m31100| 2015-07-09T14:17:36.323-0400 W SHARDING [conn132] could not acquire collection lock for db79.coll79 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db79.coll79 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.330-0400 m30998| 2015-07-09T14:17:36.323-0400 W SHARDING [conn502] splitChunk failed - cmd: { splitChunk: "db79.coll79", keyPattern: { indexed_insert_upsert: 1.0 }, min: { indexed_insert_upsert: MinKey }, max: { indexed_insert_upsert: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_upsert: 0.0 }, { indexed_insert_upsert: 4.0 }, { indexed_insert_upsert: 9.0 }, { indexed_insert_upsert: 12.0 }, { indexed_insert_upsert: 16.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebabfca4787b9985d1f3e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db79.coll79 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.330-0400 m31100| 2015-07-09T14:17:36.324-0400 W SHARDING [conn37] could not acquire collection lock for db79.coll79 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db79.coll79 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.331-0400 m31100| 2015-07-09T14:17:36.324-0400 W SHARDING [conn39] could not acquire collection lock for db79.coll79 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db79.coll79 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.335-0400 m30998| 2015-07-09T14:17:36.324-0400 W SHARDING [conn501] splitChunk failed - cmd: { splitChunk: "db79.coll79", keyPattern: { indexed_insert_upsert: 1.0 }, min: { indexed_insert_upsert: MinKey }, max: { indexed_insert_upsert: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_upsert: 0.0 }, { indexed_insert_upsert: 4.0 }, { indexed_insert_upsert: 9.0 }, { indexed_insert_upsert: 12.0 }, { indexed_insert_upsert: 16.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebabfca4787b9985d1f3e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db79.coll79 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.336-0400 m30999| 2015-07-09T14:17:36.324-0400 W SHARDING [conn495] splitChunk failed - cmd: { splitChunk: "db79.coll79", keyPattern: { indexed_insert_upsert: 1.0 }, min: { indexed_insert_upsert: MinKey }, max: { indexed_insert_upsert: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_upsert: 0.0 }, { indexed_insert_upsert: 4.0 }, { indexed_insert_upsert: 9.0 }, { indexed_insert_upsert: 13.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebabfca4787b9985d1f3e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db79.coll79 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.337-0400 m30999| 2015-07-09T14:17:36.324-0400 W SHARDING [conn503] splitChunk failed - cmd: { splitChunk: "db79.coll79", keyPattern: { indexed_insert_upsert: 1.0 }, min: { indexed_insert_upsert: MinKey }, max: { indexed_insert_upsert: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_upsert: 0.0 }, { indexed_insert_upsert: 4.0 }, { indexed_insert_upsert: 9.0 }, { indexed_insert_upsert: 13.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebabfca4787b9985d1f3e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db79.coll79 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.342-0400 m31100| 2015-07-09T14:17:36.324-0400 W SHARDING [conn34] could not acquire collection lock for db79.coll79 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db79.coll79 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.342-0400 m30999| 2015-07-09T14:17:36.327-0400 I SHARDING [conn495] ChunkManager: time to load chunks for db79.coll79: 0ms sequenceNumber: 345 version: 1|3||559ebabfca4787b9985d1f3e based on: 1|0||559ebabfca4787b9985d1f3e [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.343-0400 m31100| 2015-07-09T14:17:36.331-0400 I SHARDING [conn35] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:36.331-0400-559ebac0792e00bb67274b15", server: "bs-osx108-8", clientAddr: "127.0.0.1:62637", time: new Date(1436465856331), what: "multi-split", ns: "db79.coll79", details: { before: { min: { indexed_insert_upsert: MinKey }, max: { indexed_insert_upsert: MaxKey } }, number: 1, of: 3, chunk: { min: { indexed_insert_upsert: MinKey }, max: { indexed_insert_upsert: 0.0 }, lastmod: Timestamp 1000|1, lastmodEpoch: ObjectId('559ebabfca4787b9985d1f3e') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.343-0400 m31100| 2015-07-09T14:17:36.331-0400 I SHARDING [conn36] request split points lookup for chunk db79.coll79 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.343-0400 m31100| 2015-07-09T14:17:36.331-0400 I SHARDING [conn34] request split points lookup for chunk db79.coll79 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.345-0400 m31100| 2015-07-09T14:17:36.332-0400 I SHARDING [conn36] received splitChunk request: { splitChunk: "db79.coll79", keyPattern: { indexed_insert_upsert: 1.0 }, min: { indexed_insert_upsert: MinKey }, max: { indexed_insert_upsert: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_upsert: 0.0 }, { indexed_insert_upsert: 3.0 }, { indexed_insert_upsert: 6.0 }, { indexed_insert_upsert: 9.0 }, { indexed_insert_upsert: 12.0 }, { indexed_insert_upsert: 15.0 }, { indexed_insert_upsert: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebabfca4787b9985d1f3e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.346-0400 m31100| 2015-07-09T14:17:36.332-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db79.coll79", keyPattern: { indexed_insert_upsert: 1.0 }, min: { indexed_insert_upsert: MinKey }, max: { indexed_insert_upsert: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_upsert: 0.0 }, { indexed_insert_upsert: 3.0 }, { indexed_insert_upsert: 6.0 }, { indexed_insert_upsert: 9.0 }, { indexed_insert_upsert: 12.0 }, { indexed_insert_upsert: 15.0 }, { indexed_insert_upsert: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebabfca4787b9985d1f3e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.346-0400 m31100| 2015-07-09T14:17:36.332-0400 I SHARDING [conn39] request split points lookup for chunk db79.coll79 { : MinKey } -->> { : MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.346-0400 m31100| 2015-07-09T14:17:36.334-0400 I SHARDING [conn39] received splitChunk request: { splitChunk: "db79.coll79", keyPattern: { indexed_insert_upsert: 1.0 }, min: { indexed_insert_upsert: MinKey }, max: { indexed_insert_upsert: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_upsert: 0.0 }, { indexed_insert_upsert: 3.0 }, { indexed_insert_upsert: 6.0 }, { indexed_insert_upsert: 9.0 }, { indexed_insert_upsert: 12.0 }, { indexed_insert_upsert: 15.0 }, { indexed_insert_upsert: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebabfca4787b9985d1f3e') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.347-0400 m30998| 2015-07-09T14:17:36.334-0400 W SHARDING [conn498] splitChunk failed - cmd: { splitChunk: "db79.coll79", keyPattern: { indexed_insert_upsert: 1.0 }, min: { indexed_insert_upsert: MinKey }, max: { indexed_insert_upsert: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_upsert: 0.0 }, { indexed_insert_upsert: 3.0 }, { indexed_insert_upsert: 6.0 }, { indexed_insert_upsert: 9.0 }, { indexed_insert_upsert: 12.0 }, { indexed_insert_upsert: 15.0 }, { indexed_insert_upsert: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebabfca4787b9985d1f3e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db79.coll79 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.347-0400 m30998| 2015-07-09T14:17:36.334-0400 I SHARDING [conn501] ChunkManager: time to load chunks for db79.coll79: 1ms sequenceNumber: 98 version: 1|3||559ebabfca4787b9985d1f3e based on: 1|0||559ebabfca4787b9985d1f3e [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.347-0400 m31100| 2015-07-09T14:17:36.334-0400 W SHARDING [conn36] could not acquire collection lock for db79.coll79 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db79.coll79 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.348-0400 m30999| 2015-07-09T14:17:36.335-0400 W SHARDING [conn504] splitChunk failed - cmd: { splitChunk: "db79.coll79", keyPattern: { indexed_insert_upsert: 1.0 }, min: { indexed_insert_upsert: MinKey }, max: { indexed_insert_upsert: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_upsert: 0.0 }, { indexed_insert_upsert: 3.0 }, { indexed_insert_upsert: 6.0 }, { indexed_insert_upsert: 9.0 }, { indexed_insert_upsert: 12.0 }, { indexed_insert_upsert: 15.0 }, { indexed_insert_upsert: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebabfca4787b9985d1f3e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db79.coll79 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.348-0400 m31100| 2015-07-09T14:17:36.334-0400 W SHARDING [conn34] could not acquire collection lock for db79.coll79 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db79.coll79 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.349-0400 m30998| 2015-07-09T14:17:36.338-0400 W SHARDING [conn500] splitChunk failed - cmd: { splitChunk: "db79.coll79", keyPattern: { indexed_insert_upsert: 1.0 }, min: { indexed_insert_upsert: MinKey }, max: { indexed_insert_upsert: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_upsert: 0.0 }, { indexed_insert_upsert: 3.0 }, { indexed_insert_upsert: 6.0 }, { indexed_insert_upsert: 9.0 }, { indexed_insert_upsert: 12.0 }, { indexed_insert_upsert: 15.0 }, { indexed_insert_upsert: 19.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebabfca4787b9985d1f3e') } result: { ok: 0.0, errmsg: "could not acquire collection lock for db79.coll79 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, {...", code: 125 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.349-0400 m31100| 2015-07-09T14:17:36.338-0400 W SHARDING [conn39] could not acquire collection lock for db79.coll79 to split chunk [{ : MinKey },{ : MaxKey }) :: caused by :: Lock for splitting chunk [{ : MinKey }, { : MaxKey }) in db79.coll79 is taken. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.396-0400 m31100| 2015-07-09T14:17:36.394-0400 I SHARDING [conn35] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:36.394-0400-559ebac0792e00bb67274b16", server: "bs-osx108-8", clientAddr: "127.0.0.1:62637", time: new Date(1436465856394), what: "multi-split", ns: "db79.coll79", details: { before: { min: { indexed_insert_upsert: MinKey }, max: { indexed_insert_upsert: MaxKey } }, number: 2, of: 3, chunk: { min: { indexed_insert_upsert: 0.0 }, max: { indexed_insert_upsert: 12.0 }, lastmod: Timestamp 1000|2, lastmodEpoch: ObjectId('559ebabfca4787b9985d1f3e') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.448-0400 m31100| 2015-07-09T14:17:36.447-0400 I SHARDING [conn35] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:36.447-0400-559ebac0792e00bb67274b17", server: "bs-osx108-8", clientAddr: "127.0.0.1:62637", time: new Date(1436465856447), what: "multi-split", ns: "db79.coll79", details: { before: { min: { indexed_insert_upsert: MinKey }, max: { indexed_insert_upsert: MaxKey } }, number: 3, of: 3, chunk: { min: { indexed_insert_upsert: 12.0 }, max: { indexed_insert_upsert: MaxKey }, lastmod: Timestamp 1000|3, lastmodEpoch: ObjectId('559ebabfca4787b9985d1f3e') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.523-0400 m31100| 2015-07-09T14:17:36.521-0400 I SHARDING [conn35] distributed lock 'db79.coll79/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.524-0400 m31100| 2015-07-09T14:17:36.521-0400 I COMMAND [conn35] command db79.coll79 command: splitChunk { splitChunk: "db79.coll79", keyPattern: { indexed_insert_upsert: 1.0 }, min: { indexed_insert_upsert: MinKey }, max: { indexed_insert_upsert: MaxKey }, from: "test-rs0", splitKeys: [ { indexed_insert_upsert: 0.0 }, { indexed_insert_upsert: 12.0 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebabfca4787b9985d1f3e') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 4, w: 2 } }, Database: { acquireCount: { r: 1, w: 2 } }, Collection: { acquireCount: { r: 1, W: 2 }, acquireWaitCount: { W: 2 }, timeAcquiringMicros: { W: 16198 } } } protocol:op_command 248ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.524-0400 m30998| 2015-07-09T14:17:36.522-0400 I SHARDING [conn495] autosplitted db79.coll79 shard: ns: db79.coll79, shard: test-rs0, lastmod: 1|0||000000000000000000000000, min: { indexed_insert_upsert: MinKey }, max: { indexed_insert_upsert: MaxKey } into 3 (splitThreshold 921) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.732-0400 m31100| 2015-07-09T14:17:36.731-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:64146 #198 (119 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.741-0400 m30998| 2015-07-09T14:17:36.740-0400 I NETWORK [conn494] end connection 127.0.0.1:64127 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.759-0400 m30999| 2015-07-09T14:17:36.758-0400 I NETWORK [conn495] end connection 127.0.0.1:64126 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.769-0400 m30998| 2015-07-09T14:17:36.768-0400 I NETWORK [conn496] end connection 127.0.0.1:64129 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.783-0400 m30999| 2015-07-09T14:17:36.782-0400 I NETWORK [conn496] end connection 127.0.0.1:64130 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.823-0400 m30998| 2015-07-09T14:17:36.823-0400 I NETWORK [conn498] end connection 127.0.0.1:64134 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.830-0400 m30998| 2015-07-09T14:17:36.830-0400 I NETWORK [conn497] end connection 127.0.0.1:64133 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.834-0400 m30999| 2015-07-09T14:17:36.834-0400 I NETWORK [conn497] end connection 127.0.0.1:64131 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.840-0400 m30999| 2015-07-09T14:17:36.840-0400 I NETWORK [conn498] end connection 127.0.0.1:64132 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.897-0400 m30999| 2015-07-09T14:17:36.897-0400 I NETWORK [conn499] end connection 127.0.0.1:64135 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.906-0400 m30999| 2015-07-09T14:17:36.906-0400 I NETWORK [conn500] end connection 127.0.0.1:64136 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.909-0400 m31100| 2015-07-09T14:17:36.908-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:17:36.902-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31100:1436464536:197041335', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.946-0400 m30999| 2015-07-09T14:17:36.946-0400 I NETWORK [conn501] end connection 127.0.0.1:64137 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.949-0400 m30998| 2015-07-09T14:17:36.949-0400 I NETWORK [conn502] end connection 127.0.0.1:64144 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.956-0400 m30999| 2015-07-09T14:17:36.954-0400 I NETWORK [conn504] end connection 127.0.0.1:64143 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.957-0400 m30999| 2015-07-09T14:17:36.956-0400 I NETWORK [conn502] end connection 127.0.0.1:64139 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.970-0400 m30998| 2015-07-09T14:17:36.970-0400 I NETWORK [conn499] end connection 127.0.0.1:64138 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.979-0400 m30998| 2015-07-09T14:17:36.979-0400 I NETWORK [conn501] end connection 127.0.0.1:64142 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.995-0400 m30998| 2015-07-09T14:17:36.991-0400 I NETWORK [conn495] end connection 127.0.0.1:64128 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:36.998-0400 m30999| 2015-07-09T14:17:36.996-0400 I NETWORK [conn503] end connection 127.0.0.1:64141 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.009-0400 m30998| 2015-07-09T14:17:37.009-0400 I NETWORK [conn500] end connection 127.0.0.1:64140 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.024-0400 m30998| 2015-07-09T14:17:37.024-0400 I NETWORK [conn503] end connection 127.0.0.1:64145 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.044-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.044-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.044-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.044-0400 jstests/concurrency/fsm_workloads/indexed_insert_upsert.js: Workload completed in 1062 ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.045-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.045-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.045-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.045-0400 m30999| 2015-07-09T14:17:37.044-0400 I COMMAND [conn1] DROP: db79.coll79 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.045-0400 m30999| 2015-07-09T14:17:37.044-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:37.044-0400-559ebac1ca4787b9985d1f40", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465857044), what: "dropCollection.start", ns: "db79.coll79", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.102-0400 m30999| 2015-07-09T14:17:37.102-0400 I SHARDING [conn1] distributed lock 'db79.coll79/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559ebac1ca4787b9985d1f41 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.103-0400 m31100| 2015-07-09T14:17:37.103-0400 I COMMAND [conn34] CMD: drop db79.coll79 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.107-0400 m31200| 2015-07-09T14:17:37.106-0400 I COMMAND [conn63] CMD: drop db79.coll79 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.108-0400 m31101| 2015-07-09T14:17:37.107-0400 I COMMAND [repl writer worker 9] CMD: drop db79.coll79 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.108-0400 m31102| 2015-07-09T14:17:37.108-0400 I COMMAND [repl writer worker 15] CMD: drop db79.coll79 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.110-0400 m31201| 2015-07-09T14:17:37.110-0400 I COMMAND [repl writer worker 14] CMD: drop db79.coll79 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.111-0400 m31202| 2015-07-09T14:17:37.110-0400 I COMMAND [repl writer worker 11] CMD: drop db79.coll79 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.164-0400 m31100| 2015-07-09T14:17:37.163-0400 I SHARDING [conn34] remotely refreshing metadata for db79.coll79 with requested shard version 0|0||000000000000000000000000, current shard version is 1|3||559ebabfca4787b9985d1f3e, current metadata version is 1|3||559ebabfca4787b9985d1f3e [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.165-0400 m31100| 2015-07-09T14:17:37.165-0400 W SHARDING [conn34] no chunks found when reloading db79.coll79, previous version was 0|0||559ebabfca4787b9985d1f3e, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.166-0400 m31100| 2015-07-09T14:17:37.165-0400 I SHARDING [conn34] dropping metadata for db79.coll79 at shard version 1|3||559ebabfca4787b9985d1f3e, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.168-0400 m30999| 2015-07-09T14:17:37.167-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:37.167-0400-559ebac1ca4787b9985d1f42", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465857167), what: "dropCollection", ns: "db79.coll79", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.222-0400 m30999| 2015-07-09T14:17:37.222-0400 I SHARDING [conn1] distributed lock 'db79.coll79/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.279-0400 m30999| 2015-07-09T14:17:37.278-0400 I COMMAND [conn1] DROP DATABASE: db79 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.279-0400 m30999| 2015-07-09T14:17:37.278-0400 I SHARDING [conn1] DBConfig::dropDatabase: db79 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.279-0400 m30999| 2015-07-09T14:17:37.278-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:37.278-0400-559ebac1ca4787b9985d1f43", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465857278), what: "dropDatabase.start", ns: "db79", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.323-0400 m31200| 2015-07-09T14:17:37.323-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:17:37.316-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31200:1436464537:809424560', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.376-0400 m30999| 2015-07-09T14:17:37.376-0400 I SHARDING [conn1] DBConfig::dropDatabase: db79 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.377-0400 m31100| 2015-07-09T14:17:37.376-0400 I COMMAND [conn157] dropDatabase db79 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.377-0400 m31100| 2015-07-09T14:17:37.376-0400 I COMMAND [conn157] dropDatabase db79 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.378-0400 m30999| 2015-07-09T14:17:37.377-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:37.377-0400-559ebac1ca4787b9985d1f44", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465857377), what: "dropDatabase", ns: "db79", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.378-0400 m31101| 2015-07-09T14:17:37.377-0400 I COMMAND [repl writer worker 8] dropDatabase db79 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.378-0400 m31101| 2015-07-09T14:17:37.378-0400 I COMMAND [repl writer worker 8] dropDatabase db79 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.378-0400 m31102| 2015-07-09T14:17:37.377-0400 I COMMAND [repl writer worker 13] dropDatabase db79 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.379-0400 m31102| 2015-07-09T14:17:37.377-0400 I COMMAND [repl writer worker 13] dropDatabase db79 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.471-0400 m31100| 2015-07-09T14:17:37.471-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.474-0400 m31101| 2015-07-09T14:17:37.474-0400 I COMMAND [repl writer worker 2] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.475-0400 m31102| 2015-07-09T14:17:37.474-0400 I COMMAND [repl writer worker 7] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.514-0400 m31200| 2015-07-09T14:17:37.513-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.517-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.517-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.517-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.518-0400 jstests/concurrency/fsm_workloads/server_status.js [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.518-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.518-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.518-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.518-0400 m31201| 2015-07-09T14:17:37.518-0400 I COMMAND [repl writer worker 11] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.518-0400 m31202| 2015-07-09T14:17:37.518-0400 I COMMAND [repl writer worker 0] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.525-0400 m30999| 2015-07-09T14:17:37.524-0400 I SHARDING [conn1] distributed lock 'db80/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559ebac1ca4787b9985d1f45 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.529-0400 m30999| 2015-07-09T14:17:37.528-0400 I SHARDING [conn1] Placing [db80] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.529-0400 m30999| 2015-07-09T14:17:37.528-0400 I SHARDING [conn1] Enabling sharding for database [db80] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.583-0400 m30999| 2015-07-09T14:17:37.582-0400 I SHARDING [conn1] distributed lock 'db80/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.607-0400 m31100| 2015-07-09T14:17:37.606-0400 I INDEX [conn30] build index on: db80.coll80 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db80.coll80" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.608-0400 m31100| 2015-07-09T14:17:37.606-0400 I INDEX [conn30] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.612-0400 m31100| 2015-07-09T14:17:37.611-0400 I INDEX [conn30] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.614-0400 m30999| 2015-07-09T14:17:37.614-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db80.coll80", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.617-0400 m30999| 2015-07-09T14:17:37.617-0400 I SHARDING [conn1] distributed lock 'db80.coll80/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559ebac1ca4787b9985d1f46 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.618-0400 m30999| 2015-07-09T14:17:37.617-0400 I SHARDING [conn1] enable sharding on: db80.coll80 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.619-0400 m30999| 2015-07-09T14:17:37.617-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:37.617-0400-559ebac1ca4787b9985d1f47", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465857617), what: "shardCollection.start", ns: "db80.coll80", details: { shardKey: { _id: "hashed" }, collection: "db80.coll80", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.622-0400 m31101| 2015-07-09T14:17:37.621-0400 I INDEX [repl writer worker 0] build index on: db80.coll80 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db80.coll80" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.623-0400 m31101| 2015-07-09T14:17:37.622-0400 I INDEX [repl writer worker 0] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.623-0400 m31102| 2015-07-09T14:17:37.622-0400 I INDEX [repl writer worker 11] build index on: db80.coll80 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db80.coll80" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.624-0400 m31102| 2015-07-09T14:17:37.622-0400 I INDEX [repl writer worker 11] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.628-0400 m31101| 2015-07-09T14:17:37.628-0400 I INDEX [repl writer worker 0] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.630-0400 m31102| 2015-07-09T14:17:37.629-0400 I INDEX [repl writer worker 11] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.671-0400 m30999| 2015-07-09T14:17:37.671-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db80.coll80 using new epoch 559ebac1ca4787b9985d1f48 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.780-0400 m30999| 2015-07-09T14:17:37.779-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db80.coll80: 1ms sequenceNumber: 346 version: 1|1||559ebac1ca4787b9985d1f48 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.835-0400 m30999| 2015-07-09T14:17:37.835-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db80.coll80: 0ms sequenceNumber: 347 version: 1|1||559ebac1ca4787b9985d1f48 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.837-0400 m31100| 2015-07-09T14:17:37.837-0400 I SHARDING [conn45] remotely refreshing metadata for db80.coll80 with requested shard version 1|1||559ebac1ca4787b9985d1f48, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.839-0400 m31100| 2015-07-09T14:17:37.839-0400 I SHARDING [conn45] collection db80.coll80 was previously unsharded, new metadata loaded with shard version 1|1||559ebac1ca4787b9985d1f48 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.839-0400 m31100| 2015-07-09T14:17:37.839-0400 I SHARDING [conn45] collection version was loaded at version 1|1||559ebac1ca4787b9985d1f48, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.840-0400 m30999| 2015-07-09T14:17:37.839-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:37.839-0400-559ebac1ca4787b9985d1f49", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465857839), what: "shardCollection", ns: "db80.coll80", details: { version: "1|1||559ebac1ca4787b9985d1f48" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.894-0400 m30999| 2015-07-09T14:17:37.894-0400 I SHARDING [conn1] distributed lock 'db80.coll80/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.895-0400 m30999| 2015-07-09T14:17:37.895-0400 I SHARDING [conn1] moving chunk ns: db80.coll80 moving ( ns: db80.coll80, shard: test-rs0, lastmod: 1|1||000000000000000000000000, min: { _id: 0 }, max: { _id: MaxKey }) test-rs0 -> test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.896-0400 m31100| 2015-07-09T14:17:37.895-0400 I SHARDING [conn34] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.897-0400 m31100| 2015-07-09T14:17:37.896-0400 I SHARDING [conn34] received moveChunk request: { moveChunk: "db80.coll80", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559ebac1ca4787b9985d1f48') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.900-0400 m31100| 2015-07-09T14:17:37.900-0400 I SHARDING [conn34] distributed lock 'db80.coll80/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559ebac1792e00bb67274b19 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.901-0400 m31100| 2015-07-09T14:17:37.900-0400 I SHARDING [conn34] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:37.900-0400-559ebac1792e00bb67274b1a", server: "bs-osx108-8", clientAddr: "127.0.0.1:62636", time: new Date(1436465857900), what: "moveChunk.start", ns: "db80.coll80", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.953-0400 m31100| 2015-07-09T14:17:37.953-0400 I SHARDING [conn34] remotely refreshing metadata for db80.coll80 based on current shard version 1|1||559ebac1ca4787b9985d1f48, current metadata version is 1|1||559ebac1ca4787b9985d1f48 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.954-0400 m31100| 2015-07-09T14:17:37.954-0400 I SHARDING [conn34] metadata of collection db80.coll80 already up to date (shard version : 1|1||559ebac1ca4787b9985d1f48, took 0ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.954-0400 m31100| 2015-07-09T14:17:37.954-0400 I SHARDING [conn34] moveChunk request accepted at version 1|1||559ebac1ca4787b9985d1f48 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.955-0400 m31100| 2015-07-09T14:17:37.954-0400 I SHARDING [conn34] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.955-0400 m31200| 2015-07-09T14:17:37.955-0400 I SHARDING [conn16] remotely refreshing metadata for db80.coll80, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.956-0400 m31200| 2015-07-09T14:17:37.956-0400 I SHARDING [conn16] collection db80.coll80 was previously unsharded, new metadata loaded with shard version 0|0||559ebac1ca4787b9985d1f48 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.956-0400 m31200| 2015-07-09T14:17:37.956-0400 I SHARDING [conn16] collection version was loaded at version 1|1||559ebac1ca4787b9985d1f48, took 0ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.956-0400 m31200| 2015-07-09T14:17:37.956-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: 0 } -> { _id: MaxKey } for collection db80.coll80 from test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 at epoch 559ebac1ca4787b9985d1f48 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.958-0400 m31100| 2015-07-09T14:17:37.957-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db80.coll80", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.961-0400 m31100| 2015-07-09T14:17:37.960-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db80.coll80", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.966-0400 m31100| 2015-07-09T14:17:37.966-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db80.coll80", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.976-0400 m31100| 2015-07-09T14:17:37.975-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db80.coll80", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.976-0400 m31200| 2015-07-09T14:17:37.975-0400 I INDEX [migrateThread] build index on: db80.coll80 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db80.coll80" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.977-0400 m31200| 2015-07-09T14:17:37.975-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.983-0400 m31200| 2015-07-09T14:17:37.983-0400 I INDEX [migrateThread] build index on: db80.coll80 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db80.coll80" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.983-0400 m31200| 2015-07-09T14:17:37.983-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:37.994-0400 m31100| 2015-07-09T14:17:37.993-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db80.coll80", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.001-0400 m31200| 2015-07-09T14:17:38.001-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.002-0400 m31200| 2015-07-09T14:17:38.002-0400 I SHARDING [migrateThread] Deleter starting delete for: db80.coll80 from { _id: 0 } -> { _id: MaxKey }, with opId: 105545 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.003-0400 m31200| 2015-07-09T14:17:38.003-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db80.coll80 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.009-0400 m31202| 2015-07-09T14:17:38.009-0400 I INDEX [repl writer worker 2] build index on: db80.coll80 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db80.coll80" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.009-0400 m31202| 2015-07-09T14:17:38.009-0400 I INDEX [repl writer worker 2] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.010-0400 m31201| 2015-07-09T14:17:38.010-0400 I INDEX [repl writer worker 8] build index on: db80.coll80 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db80.coll80" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.011-0400 m31201| 2015-07-09T14:17:38.010-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.014-0400 m31201| 2015-07-09T14:17:38.013-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.016-0400 m31200| 2015-07-09T14:17:38.015-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.016-0400 m31200| 2015-07-09T14:17:38.016-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db80.coll80' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.018-0400 m31202| 2015-07-09T14:17:38.018-0400 I INDEX [repl writer worker 2] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.027-0400 m31100| 2015-07-09T14:17:38.027-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db80.coll80", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.027-0400 m31100| 2015-07-09T14:17:38.027-0400 I SHARDING [conn34] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.028-0400 m31100| 2015-07-09T14:17:38.027-0400 I SHARDING [conn34] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.028-0400 m31100| 2015-07-09T14:17:38.027-0400 I SHARDING [conn34] moveChunk setting version to: 2|0||559ebac1ca4787b9985d1f48 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.040-0400 m31200| 2015-07-09T14:17:38.039-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db80.coll80' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.040-0400 m31200| 2015-07-09T14:17:38.040-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:38.039-0400-559ebac2d5a107a5b9c0db85", server: "bs-osx108-8", clientAddr: "", time: new Date(1436465858040), what: "moveChunk.to", ns: "db80.coll80", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 5: 46, step 2 of 5: 12, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 23, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.096-0400 m31100| 2015-07-09T14:17:38.094-0400 I SHARDING [conn34] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db80.coll80", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.096-0400 m31100| 2015-07-09T14:17:38.095-0400 I SHARDING [conn34] moveChunk updating self version to: 2|1||559ebac1ca4787b9985d1f48 through { _id: MinKey } -> { _id: 0 } for collection 'db80.coll80' [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.096-0400 m31100| 2015-07-09T14:17:38.096-0400 I SHARDING [conn34] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:38.096-0400-559ebac2792e00bb67274b1b", server: "bs-osx108-8", clientAddr: "127.0.0.1:62636", time: new Date(1436465858096), what: "moveChunk.commit", ns: "db80.coll80", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.149-0400 m31100| 2015-07-09T14:17:38.148-0400 I SHARDING [conn34] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.149-0400 m31100| 2015-07-09T14:17:38.148-0400 I SHARDING [conn34] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.149-0400 m31100| 2015-07-09T14:17:38.148-0400 I SHARDING [conn34] Deleter starting delete for: db80.coll80 from { _id: 0 } -> { _id: MaxKey }, with opId: 245479 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.149-0400 m31100| 2015-07-09T14:17:38.148-0400 I SHARDING [conn34] rangeDeleter deleted 0 documents for db80.coll80 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.149-0400 m31100| 2015-07-09T14:17:38.148-0400 I SHARDING [conn34] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.150-0400 m31100| 2015-07-09T14:17:38.149-0400 I SHARDING [conn34] distributed lock 'db80.coll80/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.150-0400 m31100| 2015-07-09T14:17:38.149-0400 I SHARDING [conn34] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:38.149-0400-559ebac2792e00bb67274b1c", server: "bs-osx108-8", clientAddr: "127.0.0.1:62636", time: new Date(1436465858149), what: "moveChunk.from", ns: "db80.coll80", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 6: 0, step 2 of 6: 57, step 3 of 6: 1, step 4 of 6: 70, step 5 of 6: 121, step 6 of 6: 0, to: "test-rs1", from: "test-rs0", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.204-0400 m31100| 2015-07-09T14:17:38.203-0400 I COMMAND [conn34] command db80.coll80 command: moveChunk { moveChunk: "db80.coll80", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559ebac1ca4787b9985d1f48') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 307ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.206-0400 m30999| 2015-07-09T14:17:38.206-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db80.coll80: 0ms sequenceNumber: 348 version: 2|1||559ebac1ca4787b9985d1f48 based on: 1|1||559ebac1ca4787b9985d1f48 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.208-0400 m31100| 2015-07-09T14:17:38.207-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db80.coll80", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebac1ca4787b9985d1f48') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.212-0400 m31100| 2015-07-09T14:17:38.211-0400 I SHARDING [conn34] distributed lock 'db80.coll80/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559ebac2792e00bb67274b1d [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.212-0400 m31100| 2015-07-09T14:17:38.211-0400 I SHARDING [conn34] remotely refreshing metadata for db80.coll80 based on current shard version 2|0||559ebac1ca4787b9985d1f48, current metadata version is 2|0||559ebac1ca4787b9985d1f48 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.213-0400 m31100| 2015-07-09T14:17:38.213-0400 I SHARDING [conn34] updating metadata for db80.coll80 from shard version 2|0||559ebac1ca4787b9985d1f48 to shard version 2|1||559ebac1ca4787b9985d1f48 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.213-0400 m31100| 2015-07-09T14:17:38.213-0400 I SHARDING [conn34] collection version was loaded at version 2|1||559ebac1ca4787b9985d1f48, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.214-0400 m31100| 2015-07-09T14:17:38.213-0400 I SHARDING [conn34] splitChunk accepted at version 2|1||559ebac1ca4787b9985d1f48 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.216-0400 m31100| 2015-07-09T14:17:38.215-0400 I SHARDING [conn34] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:38.215-0400-559ebac2792e00bb67274b1e", server: "bs-osx108-8", clientAddr: "127.0.0.1:62636", time: new Date(1436465858215), what: "split", ns: "db80.coll80", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559ebac1ca4787b9985d1f48') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559ebac1ca4787b9985d1f48') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.271-0400 m31100| 2015-07-09T14:17:38.270-0400 I SHARDING [conn34] distributed lock 'db80.coll80/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.273-0400 m30999| 2015-07-09T14:17:38.273-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db80.coll80: 1ms sequenceNumber: 349 version: 2|3||559ebac1ca4787b9985d1f48 based on: 2|1||559ebac1ca4787b9985d1f48 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.274-0400 m31200| 2015-07-09T14:17:38.273-0400 I SHARDING [conn63] received splitChunk request: { splitChunk: "db80.coll80", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebac1ca4787b9985d1f48') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.277-0400 m31200| 2015-07-09T14:17:38.277-0400 I SHARDING [conn63] distributed lock 'db80.coll80/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559ebac2d5a107a5b9c0db86 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.278-0400 m31200| 2015-07-09T14:17:38.277-0400 I SHARDING [conn63] remotely refreshing metadata for db80.coll80 based on current shard version 0|0||559ebac1ca4787b9985d1f48, current metadata version is 1|1||559ebac1ca4787b9985d1f48 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.279-0400 m31200| 2015-07-09T14:17:38.279-0400 I SHARDING [conn63] updating metadata for db80.coll80 from shard version 0|0||559ebac1ca4787b9985d1f48 to shard version 2|0||559ebac1ca4787b9985d1f48 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.279-0400 m31200| 2015-07-09T14:17:38.279-0400 I SHARDING [conn63] collection version was loaded at version 2|3||559ebac1ca4787b9985d1f48, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.279-0400 m31200| 2015-07-09T14:17:38.279-0400 I SHARDING [conn63] splitChunk accepted at version 2|0||559ebac1ca4787b9985d1f48 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.281-0400 m31200| 2015-07-09T14:17:38.280-0400 I SHARDING [conn63] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:38.280-0400-559ebac2d5a107a5b9c0db87", server: "bs-osx108-8", clientAddr: "127.0.0.1:62862", time: new Date(1436465858280), what: "split", ns: "db80.coll80", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559ebac1ca4787b9985d1f48') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559ebac1ca4787b9985d1f48') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.335-0400 m31200| 2015-07-09T14:17:38.335-0400 I SHARDING [conn63] distributed lock 'db80.coll80/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.337-0400 m30999| 2015-07-09T14:17:38.337-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db80.coll80: 0ms sequenceNumber: 350 version: 2|5||559ebac1ca4787b9985d1f48 based on: 2|3||559ebac1ca4787b9985d1f48 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.338-0400 Using 10 threads (requested 10) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.450-0400 m30998| 2015-07-09T14:17:38.450-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64147 #504 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.450-0400 m30999| 2015-07-09T14:17:38.450-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64148 #505 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.451-0400 m30998| 2015-07-09T14:17:38.450-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64150 #505 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.451-0400 m30999| 2015-07-09T14:17:38.450-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64149 #506 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.451-0400 m30998| 2015-07-09T14:17:38.451-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64152 #506 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.451-0400 m30998| 2015-07-09T14:17:38.451-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64154 #507 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.452-0400 m30999| 2015-07-09T14:17:38.451-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64151 #507 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.452-0400 m30999| 2015-07-09T14:17:38.451-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64153 #508 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.452-0400 m30998| 2015-07-09T14:17:38.452-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64155 #508 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.465-0400 m30999| 2015-07-09T14:17:38.465-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64156 #509 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.470-0400 setting random seed: 3298359261825 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.470-0400 setting random seed: 9473924762569 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.470-0400 setting random seed: 4209524239413 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.471-0400 setting random seed: 8253558645956 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.471-0400 setting random seed: 5524139315821 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.474-0400 setting random seed: 1785135422833 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.477-0400 setting random seed: 4738240437582 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.483-0400 setting random seed: 2771065528504 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.484-0400 setting random seed: 5607071523554 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.495-0400 setting random seed: 7560227178037 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.556-0400 m30999| 2015-07-09T14:17:38.556-0400 I NETWORK [conn505] end connection 127.0.0.1:64148 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.628-0400 m30998| 2015-07-09T14:17:38.628-0400 I NETWORK [conn508] end connection 127.0.0.1:64155 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.634-0400 m30998| 2015-07-09T14:17:38.632-0400 I NETWORK [conn505] end connection 127.0.0.1:64150 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.635-0400 m30998| 2015-07-09T14:17:38.635-0400 I NETWORK [conn507] end connection 127.0.0.1:64154 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.636-0400 m30998| 2015-07-09T14:17:38.636-0400 I NETWORK [conn504] end connection 127.0.0.1:64147 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.649-0400 m30999| 2015-07-09T14:17:38.648-0400 I NETWORK [conn508] end connection 127.0.0.1:64153 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.669-0400 m30999| 2015-07-09T14:17:38.668-0400 I NETWORK [conn507] end connection 127.0.0.1:64151 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.689-0400 m30999| 2015-07-09T14:17:38.688-0400 I NETWORK [conn506] end connection 127.0.0.1:64149 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.696-0400 m30999| 2015-07-09T14:17:38.696-0400 I NETWORK [conn509] end connection 127.0.0.1:64156 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.706-0400 m30998| 2015-07-09T14:17:38.703-0400 I NETWORK [conn506] end connection 127.0.0.1:64152 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.722-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.723-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.723-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.723-0400 jstests/concurrency/fsm_workloads/server_status.js: Workload completed in 384 ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.723-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.724-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.724-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.724-0400 m30999| 2015-07-09T14:17:38.722-0400 I COMMAND [conn1] DROP: db80.coll80 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.725-0400 m30999| 2015-07-09T14:17:38.723-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:38.723-0400-559ebac2ca4787b9985d1f4a", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465858723), what: "dropCollection.start", ns: "db80.coll80", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.778-0400 m30999| 2015-07-09T14:17:38.778-0400 I SHARDING [conn1] distributed lock 'db80.coll80/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559ebac2ca4787b9985d1f4b [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.779-0400 m31100| 2015-07-09T14:17:38.779-0400 I COMMAND [conn34] CMD: drop db80.coll80 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.784-0400 m31200| 2015-07-09T14:17:38.783-0400 I COMMAND [conn63] CMD: drop db80.coll80 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.786-0400 m31101| 2015-07-09T14:17:38.786-0400 I COMMAND [repl writer worker 11] CMD: drop db80.coll80 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.786-0400 m31102| 2015-07-09T14:17:38.786-0400 I COMMAND [repl writer worker 3] CMD: drop db80.coll80 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.787-0400 m31202| 2015-07-09T14:17:38.787-0400 I COMMAND [repl writer worker 1] CMD: drop db80.coll80 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.788-0400 m31201| 2015-07-09T14:17:38.788-0400 I COMMAND [repl writer worker 12] CMD: drop db80.coll80 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.839-0400 m31100| 2015-07-09T14:17:38.838-0400 I SHARDING [conn34] remotely refreshing metadata for db80.coll80 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559ebac1ca4787b9985d1f48, current metadata version is 2|3||559ebac1ca4787b9985d1f48 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.841-0400 m31100| 2015-07-09T14:17:38.840-0400 W SHARDING [conn34] no chunks found when reloading db80.coll80, previous version was 0|0||559ebac1ca4787b9985d1f48, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.841-0400 m31100| 2015-07-09T14:17:38.840-0400 I SHARDING [conn34] dropping metadata for db80.coll80 at shard version 2|3||559ebac1ca4787b9985d1f48, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.842-0400 m31200| 2015-07-09T14:17:38.842-0400 I SHARDING [conn63] remotely refreshing metadata for db80.coll80 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559ebac1ca4787b9985d1f48, current metadata version is 2|5||559ebac1ca4787b9985d1f48 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.844-0400 m31200| 2015-07-09T14:17:38.843-0400 W SHARDING [conn63] no chunks found when reloading db80.coll80, previous version was 0|0||559ebac1ca4787b9985d1f48, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.844-0400 m31200| 2015-07-09T14:17:38.843-0400 I SHARDING [conn63] dropping metadata for db80.coll80 at shard version 2|5||559ebac1ca4787b9985d1f48, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.845-0400 m30999| 2015-07-09T14:17:38.844-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:38.844-0400-559ebac2ca4787b9985d1f4c", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465858844), what: "dropCollection", ns: "db80.coll80", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.899-0400 m30999| 2015-07-09T14:17:38.899-0400 I SHARDING [conn1] distributed lock 'db80.coll80/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.958-0400 m30999| 2015-07-09T14:17:38.958-0400 I COMMAND [conn1] DROP DATABASE: db80 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.958-0400 m30999| 2015-07-09T14:17:38.958-0400 I SHARDING [conn1] DBConfig::dropDatabase: db80 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:38.958-0400 m30999| 2015-07-09T14:17:38.958-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:38.958-0400-559ebac2ca4787b9985d1f4d", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465858958), what: "dropDatabase.start", ns: "db80", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.064-0400 m30999| 2015-07-09T14:17:39.063-0400 I SHARDING [conn1] DBConfig::dropDatabase: db80 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.282-0400 m31100| 2015-07-09T14:17:39.064-0400 I COMMAND [conn157] dropDatabase db80 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.283-0400 m31100| 2015-07-09T14:17:39.064-0400 I COMMAND [conn157] dropDatabase db80 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.283-0400 m30999| 2015-07-09T14:17:39.065-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:39.065-0400-559ebac3ca4787b9985d1f4e", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465859065), what: "dropDatabase", ns: "db80", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.283-0400 m31102| 2015-07-09T14:17:39.066-0400 I COMMAND [repl writer worker 10] dropDatabase db80 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.283-0400 m31102| 2015-07-09T14:17:39.066-0400 I COMMAND [repl writer worker 10] dropDatabase db80 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.284-0400 m31101| 2015-07-09T14:17:39.066-0400 I COMMAND [repl writer worker 3] dropDatabase db80 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.284-0400 m31101| 2015-07-09T14:17:39.066-0400 I COMMAND [repl writer worker 3] dropDatabase db80 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.284-0400 m31100| 2015-07-09T14:17:39.153-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.284-0400 m31102| 2015-07-09T14:17:39.156-0400 I COMMAND [repl writer worker 9] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.284-0400 m31101| 2015-07-09T14:17:39.156-0400 I COMMAND [repl writer worker 15] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.284-0400 m31200| 2015-07-09T14:17:39.193-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.284-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.284-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.285-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.285-0400 jstests/concurrency/fsm_workloads/indexed_insert_ttl.js [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.285-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.285-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.285-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.285-0400 m31201| 2015-07-09T14:17:39.196-0400 I COMMAND [repl writer worker 5] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.286-0400 m31202| 2015-07-09T14:17:39.197-0400 I COMMAND [repl writer worker 14] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.286-0400 m30999| 2015-07-09T14:17:39.203-0400 I SHARDING [conn1] distributed lock 'db81/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559ebac3ca4787b9985d1f4f [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.286-0400 m30999| 2015-07-09T14:17:39.207-0400 I SHARDING [conn1] Placing [db81] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.286-0400 m30999| 2015-07-09T14:17:39.207-0400 I SHARDING [conn1] Enabling sharding for database [db81] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.287-0400 m30999| 2015-07-09T14:17:39.263-0400 I SHARDING [conn1] distributed lock 'db81/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.290-0400 m31100| 2015-07-09T14:17:39.290-0400 I INDEX [conn30] build index on: db81.coll81 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db81.coll81" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.291-0400 m31100| 2015-07-09T14:17:39.290-0400 I INDEX [conn30] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.300-0400 m31100| 2015-07-09T14:17:39.300-0400 I INDEX [conn30] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.301-0400 m30999| 2015-07-09T14:17:39.301-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db81.coll81", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.307-0400 m30999| 2015-07-09T14:17:39.307-0400 I SHARDING [conn1] distributed lock 'db81.coll81/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559ebac3ca4787b9985d1f50 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.309-0400 m30999| 2015-07-09T14:17:39.309-0400 I SHARDING [conn1] enable sharding on: db81.coll81 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.310-0400 m30999| 2015-07-09T14:17:39.309-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:39.309-0400-559ebac3ca4787b9985d1f51", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465859309), what: "shardCollection.start", ns: "db81.coll81", details: { shardKey: { _id: "hashed" }, collection: "db81.coll81", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.310-0400 m31102| 2015-07-09T14:17:39.307-0400 I INDEX [repl writer worker 5] build index on: db81.coll81 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db81.coll81" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.310-0400 m31102| 2015-07-09T14:17:39.308-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.311-0400 m31101| 2015-07-09T14:17:39.309-0400 I INDEX [repl writer worker 5] build index on: db81.coll81 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db81.coll81" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.311-0400 m31101| 2015-07-09T14:17:39.310-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.312-0400 m31102| 2015-07-09T14:17:39.311-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.318-0400 m31101| 2015-07-09T14:17:39.318-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.362-0400 m30999| 2015-07-09T14:17:39.362-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db81.coll81 using new epoch 559ebac3ca4787b9985d1f52 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.469-0400 m30999| 2015-07-09T14:17:39.468-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db81.coll81: 0ms sequenceNumber: 351 version: 1|1||559ebac3ca4787b9985d1f52 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.524-0400 m30999| 2015-07-09T14:17:39.523-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db81.coll81: 0ms sequenceNumber: 352 version: 1|1||559ebac3ca4787b9985d1f52 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.526-0400 m31100| 2015-07-09T14:17:39.525-0400 I SHARDING [conn45] remotely refreshing metadata for db81.coll81 with requested shard version 1|1||559ebac3ca4787b9985d1f52, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.527-0400 m31100| 2015-07-09T14:17:39.527-0400 I SHARDING [conn45] collection db81.coll81 was previously unsharded, new metadata loaded with shard version 1|1||559ebac3ca4787b9985d1f52 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.527-0400 m31100| 2015-07-09T14:17:39.527-0400 I SHARDING [conn45] collection version was loaded at version 1|1||559ebac3ca4787b9985d1f52, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.528-0400 m30999| 2015-07-09T14:17:39.527-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:39.527-0400-559ebac3ca4787b9985d1f53", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465859527), what: "shardCollection", ns: "db81.coll81", details: { version: "1|1||559ebac3ca4787b9985d1f52" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.581-0400 m30999| 2015-07-09T14:17:39.581-0400 I SHARDING [conn1] distributed lock 'db81.coll81/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.582-0400 m30999| 2015-07-09T14:17:39.582-0400 I SHARDING [conn1] moving chunk ns: db81.coll81 moving ( ns: db81.coll81, shard: test-rs0, lastmod: 1|1||000000000000000000000000, min: { _id: 0 }, max: { _id: MaxKey }) test-rs0 -> test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.583-0400 m31100| 2015-07-09T14:17:39.582-0400 I SHARDING [conn34] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.584-0400 m31100| 2015-07-09T14:17:39.583-0400 I SHARDING [conn34] received moveChunk request: { moveChunk: "db81.coll81", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559ebac3ca4787b9985d1f52') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.587-0400 m31100| 2015-07-09T14:17:39.587-0400 I SHARDING [conn34] distributed lock 'db81.coll81/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559ebac3792e00bb67274b20 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.587-0400 m31100| 2015-07-09T14:17:39.587-0400 I SHARDING [conn34] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:39.587-0400-559ebac3792e00bb67274b21", server: "bs-osx108-8", clientAddr: "127.0.0.1:62636", time: new Date(1436465859587), what: "moveChunk.start", ns: "db81.coll81", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.640-0400 m31100| 2015-07-09T14:17:39.639-0400 I SHARDING [conn34] remotely refreshing metadata for db81.coll81 based on current shard version 1|1||559ebac3ca4787b9985d1f52, current metadata version is 1|1||559ebac3ca4787b9985d1f52 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.642-0400 m31100| 2015-07-09T14:17:39.641-0400 I SHARDING [conn34] metadata of collection db81.coll81 already up to date (shard version : 1|1||559ebac3ca4787b9985d1f52, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.642-0400 m31100| 2015-07-09T14:17:39.641-0400 I SHARDING [conn34] moveChunk request accepted at version 1|1||559ebac3ca4787b9985d1f52 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.642-0400 m31100| 2015-07-09T14:17:39.642-0400 I SHARDING [conn34] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.642-0400 m31200| 2015-07-09T14:17:39.642-0400 I SHARDING [conn16] remotely refreshing metadata for db81.coll81, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.644-0400 m31200| 2015-07-09T14:17:39.644-0400 I SHARDING [conn16] collection db81.coll81 was previously unsharded, new metadata loaded with shard version 0|0||559ebac3ca4787b9985d1f52 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.645-0400 m31200| 2015-07-09T14:17:39.644-0400 I SHARDING [conn16] collection version was loaded at version 1|1||559ebac3ca4787b9985d1f52, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.645-0400 m31200| 2015-07-09T14:17:39.644-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: 0 } -> { _id: MaxKey } for collection db81.coll81 from test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 at epoch 559ebac3ca4787b9985d1f52 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.648-0400 m31100| 2015-07-09T14:17:39.647-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db81.coll81", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.651-0400 m31100| 2015-07-09T14:17:39.650-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db81.coll81", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.656-0400 m31100| 2015-07-09T14:17:39.655-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db81.coll81", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.665-0400 m31100| 2015-07-09T14:17:39.665-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db81.coll81", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.668-0400 m31200| 2015-07-09T14:17:39.668-0400 I INDEX [migrateThread] build index on: db81.coll81 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db81.coll81" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.668-0400 m31200| 2015-07-09T14:17:39.668-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.674-0400 m31200| 2015-07-09T14:17:39.674-0400 I INDEX [migrateThread] build index on: db81.coll81 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db81.coll81" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.674-0400 m31200| 2015-07-09T14:17:39.674-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.683-0400 m31100| 2015-07-09T14:17:39.682-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db81.coll81", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.685-0400 m31200| 2015-07-09T14:17:39.684-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.685-0400 m31200| 2015-07-09T14:17:39.685-0400 I SHARDING [migrateThread] Deleter starting delete for: db81.coll81 from { _id: 0 } -> { _id: MaxKey }, with opId: 105590 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.686-0400 m31200| 2015-07-09T14:17:39.685-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db81.coll81 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.692-0400 m31202| 2015-07-09T14:17:39.691-0400 I INDEX [repl writer worker 8] build index on: db81.coll81 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db81.coll81" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.692-0400 m31202| 2015-07-09T14:17:39.691-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.695-0400 m31201| 2015-07-09T14:17:39.695-0400 I INDEX [repl writer worker 15] build index on: db81.coll81 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db81.coll81" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.696-0400 m31201| 2015-07-09T14:17:39.695-0400 I INDEX [repl writer worker 15] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.701-0400 m31202| 2015-07-09T14:17:39.701-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.702-0400 m31200| 2015-07-09T14:17:39.702-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.703-0400 m31200| 2015-07-09T14:17:39.702-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db81.coll81' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.704-0400 m31201| 2015-07-09T14:17:39.703-0400 I INDEX [repl writer worker 15] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.716-0400 m31100| 2015-07-09T14:17:39.715-0400 I SHARDING [conn34] moveChunk data transfer progress: { active: true, ns: "db81.coll81", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.716-0400 m31100| 2015-07-09T14:17:39.715-0400 I SHARDING [conn34] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.717-0400 m31100| 2015-07-09T14:17:39.716-0400 I SHARDING [conn34] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.717-0400 m31100| 2015-07-09T14:17:39.716-0400 I SHARDING [conn34] moveChunk setting version to: 2|0||559ebac3ca4787b9985d1f52 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.726-0400 m31200| 2015-07-09T14:17:39.725-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db81.coll81' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.726-0400 m31200| 2015-07-09T14:17:39.726-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:39.726-0400-559ebac3d5a107a5b9c0db88", server: "bs-osx108-8", clientAddr: "", time: new Date(1436465859726), what: "moveChunk.to", ns: "db81.coll81", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 5: 40, step 2 of 5: 16, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 23, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.781-0400 m31100| 2015-07-09T14:17:39.780-0400 I SHARDING [conn34] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db81.coll81", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.781-0400 m31100| 2015-07-09T14:17:39.780-0400 I SHARDING [conn34] moveChunk updating self version to: 2|1||559ebac3ca4787b9985d1f52 through { _id: MinKey } -> { _id: 0 } for collection 'db81.coll81' [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.783-0400 m31100| 2015-07-09T14:17:39.782-0400 I SHARDING [conn34] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:39.782-0400-559ebac3792e00bb67274b22", server: "bs-osx108-8", clientAddr: "127.0.0.1:62636", time: new Date(1436465859782), what: "moveChunk.commit", ns: "db81.coll81", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.835-0400 m31100| 2015-07-09T14:17:39.835-0400 I SHARDING [conn34] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.835-0400 m31100| 2015-07-09T14:17:39.835-0400 I SHARDING [conn34] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.836-0400 m31100| 2015-07-09T14:17:39.835-0400 I SHARDING [conn34] Deleter starting delete for: db81.coll81 from { _id: 0 } -> { _id: MaxKey }, with opId: 245535 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.836-0400 m31100| 2015-07-09T14:17:39.835-0400 I SHARDING [conn34] rangeDeleter deleted 0 documents for db81.coll81 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.836-0400 m31100| 2015-07-09T14:17:39.835-0400 I SHARDING [conn34] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.836-0400 m31100| 2015-07-09T14:17:39.836-0400 I SHARDING [conn34] distributed lock 'db81.coll81/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.837-0400 m31100| 2015-07-09T14:17:39.836-0400 I SHARDING [conn34] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:39.836-0400-559ebac3792e00bb67274b23", server: "bs-osx108-8", clientAddr: "127.0.0.1:62636", time: new Date(1436465859836), what: "moveChunk.from", ns: "db81.coll81", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 6: 0, step 2 of 6: 57, step 3 of 6: 3, step 4 of 6: 70, step 5 of 6: 119, step 6 of 6: 0, to: "test-rs1", from: "test-rs0", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.892-0400 m31100| 2015-07-09T14:17:39.891-0400 I COMMAND [conn34] command db81.coll81 command: moveChunk { moveChunk: "db81.coll81", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559ebac3ca4787b9985d1f52') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 308ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.894-0400 m30999| 2015-07-09T14:17:39.894-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db81.coll81: 0ms sequenceNumber: 353 version: 2|1||559ebac3ca4787b9985d1f52 based on: 1|1||559ebac3ca4787b9985d1f52 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.895-0400 m31100| 2015-07-09T14:17:39.895-0400 I SHARDING [conn34] received splitChunk request: { splitChunk: "db81.coll81", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebac3ca4787b9985d1f52') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.903-0400 m31100| 2015-07-09T14:17:39.903-0400 I SHARDING [conn34] distributed lock 'db81.coll81/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559ebac3792e00bb67274b24 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.904-0400 m31100| 2015-07-09T14:17:39.903-0400 I SHARDING [conn34] remotely refreshing metadata for db81.coll81 based on current shard version 2|0||559ebac3ca4787b9985d1f52, current metadata version is 2|0||559ebac3ca4787b9985d1f52 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.905-0400 m31100| 2015-07-09T14:17:39.904-0400 I SHARDING [conn34] updating metadata for db81.coll81 from shard version 2|0||559ebac3ca4787b9985d1f52 to shard version 2|1||559ebac3ca4787b9985d1f52 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.905-0400 m31100| 2015-07-09T14:17:39.904-0400 I SHARDING [conn34] collection version was loaded at version 2|1||559ebac3ca4787b9985d1f52, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.905-0400 m31100| 2015-07-09T14:17:39.904-0400 I SHARDING [conn34] splitChunk accepted at version 2|1||559ebac3ca4787b9985d1f52 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.907-0400 m31100| 2015-07-09T14:17:39.906-0400 I SHARDING [conn34] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:39.906-0400-559ebac3792e00bb67274b25", server: "bs-osx108-8", clientAddr: "127.0.0.1:62636", time: new Date(1436465859906), what: "split", ns: "db81.coll81", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559ebac3ca4787b9985d1f52') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559ebac3ca4787b9985d1f52') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.960-0400 m31100| 2015-07-09T14:17:39.960-0400 I SHARDING [conn34] distributed lock 'db81.coll81/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.962-0400 m30999| 2015-07-09T14:17:39.962-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db81.coll81: 0ms sequenceNumber: 354 version: 2|3||559ebac3ca4787b9985d1f52 based on: 2|1||559ebac3ca4787b9985d1f52 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.963-0400 m31200| 2015-07-09T14:17:39.963-0400 I SHARDING [conn63] received splitChunk request: { splitChunk: "db81.coll81", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebac3ca4787b9985d1f52') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.967-0400 m31200| 2015-07-09T14:17:39.966-0400 I SHARDING [conn63] distributed lock 'db81.coll81/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559ebac3d5a107a5b9c0db89 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.967-0400 m31200| 2015-07-09T14:17:39.966-0400 I SHARDING [conn63] remotely refreshing metadata for db81.coll81 based on current shard version 0|0||559ebac3ca4787b9985d1f52, current metadata version is 1|1||559ebac3ca4787b9985d1f52 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.968-0400 m31200| 2015-07-09T14:17:39.968-0400 I SHARDING [conn63] updating metadata for db81.coll81 from shard version 0|0||559ebac3ca4787b9985d1f52 to shard version 2|0||559ebac3ca4787b9985d1f52 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.969-0400 m31200| 2015-07-09T14:17:39.968-0400 I SHARDING [conn63] collection version was loaded at version 2|3||559ebac3ca4787b9985d1f52, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.969-0400 m31200| 2015-07-09T14:17:39.968-0400 I SHARDING [conn63] splitChunk accepted at version 2|0||559ebac3ca4787b9985d1f52 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:39.971-0400 m31200| 2015-07-09T14:17:39.970-0400 I SHARDING [conn63] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:17:39.970-0400-559ebac3d5a107a5b9c0db8a", server: "bs-osx108-8", clientAddr: "127.0.0.1:62862", time: new Date(1436465859970), what: "split", ns: "db81.coll81", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559ebac3ca4787b9985d1f52') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559ebac3ca4787b9985d1f52') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.024-0400 m31200| 2015-07-09T14:17:40.023-0400 I SHARDING [conn63] distributed lock 'db81.coll81/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.026-0400 m30999| 2015-07-09T14:17:40.025-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db81.coll81: 0ms sequenceNumber: 355 version: 2|5||559ebac3ca4787b9985d1f52 based on: 2|3||559ebac3ca4787b9985d1f52 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.034-0400 m31100| 2015-07-09T14:17:40.034-0400 I INDEX [conn45] build index on: db81.coll81 properties: { v: 1, key: { indexed_insert_ttl: 1.0 }, name: "indexed_insert_ttl_1", ns: "db81.coll81", expireAfterSeconds: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.034-0400 m31100| 2015-07-09T14:17:40.034-0400 I INDEX [conn45] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.035-0400 m31200| 2015-07-09T14:17:40.034-0400 I INDEX [conn39] build index on: db81.coll81 properties: { v: 1, key: { indexed_insert_ttl: 1.0 }, name: "indexed_insert_ttl_1", ns: "db81.coll81", expireAfterSeconds: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.035-0400 m31200| 2015-07-09T14:17:40.034-0400 I INDEX [conn39] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.038-0400 m31100| 2015-07-09T14:17:40.037-0400 I INDEX [conn45] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.043-0400 m31200| 2015-07-09T14:17:40.043-0400 I INDEX [conn39] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.044-0400 Using 20 threads (requested 20) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.130-0400 m31102| 2015-07-09T14:17:40.119-0400 I INDEX [repl writer worker 1] build index on: db81.coll81 properties: { v: 1, key: { indexed_insert_ttl: 1.0 }, name: "indexed_insert_ttl_1", ns: "db81.coll81", expireAfterSeconds: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.130-0400 m31102| 2015-07-09T14:17:40.119-0400 I INDEX [repl writer worker 1] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.133-0400 m31101| 2015-07-09T14:17:40.123-0400 I INDEX [repl writer worker 10] build index on: db81.coll81 properties: { v: 1, key: { indexed_insert_ttl: 1.0 }, name: "indexed_insert_ttl_1", ns: "db81.coll81", expireAfterSeconds: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.133-0400 m31101| 2015-07-09T14:17:40.123-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.142-0400 m31201| 2015-07-09T14:17:40.133-0400 I INDEX [repl writer worker 9] build index on: db81.coll81 properties: { v: 1, key: { indexed_insert_ttl: 1.0 }, name: "indexed_insert_ttl_1", ns: "db81.coll81", expireAfterSeconds: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.142-0400 m31201| 2015-07-09T14:17:40.133-0400 I INDEX [repl writer worker 9] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.145-0400 m31202| 2015-07-09T14:17:40.141-0400 I INDEX [repl writer worker 5] build index on: db81.coll81 properties: { v: 1, key: { indexed_insert_ttl: 1.0 }, name: "indexed_insert_ttl_1", ns: "db81.coll81", expireAfterSeconds: 5.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.145-0400 m31202| 2015-07-09T14:17:40.141-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.161-0400 m31201| 2015-07-09T14:17:40.158-0400 I INDEX [repl writer worker 9] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.165-0400 m31102| 2015-07-09T14:17:40.164-0400 I INDEX [repl writer worker 1] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.182-0400 m31101| 2015-07-09T14:17:40.177-0400 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.184-0400 m31202| 2015-07-09T14:17:40.184-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.282-0400 m30998| 2015-07-09T14:17:40.281-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64158 #509 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.334-0400 m30999| 2015-07-09T14:17:40.333-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64159 #510 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.334-0400 m30998| 2015-07-09T14:17:40.334-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64160 #510 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.335-0400 m30999| 2015-07-09T14:17:40.334-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64161 #511 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.336-0400 m30999| 2015-07-09T14:17:40.335-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64164 #512 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.336-0400 m30998| 2015-07-09T14:17:40.335-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64162 #511 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.336-0400 m30999| 2015-07-09T14:17:40.335-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64165 #513 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.336-0400 m30998| 2015-07-09T14:17:40.336-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64163 #512 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.337-0400 m30999| 2015-07-09T14:17:40.336-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64167 #514 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.340-0400 m30998| 2015-07-09T14:17:40.340-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64166 #513 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.340-0400 m30999| 2015-07-09T14:17:40.340-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64168 #515 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.340-0400 m30998| 2015-07-09T14:17:40.340-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64171 #514 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.341-0400 m30999| 2015-07-09T14:17:40.341-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64169 #516 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.341-0400 m30998| 2015-07-09T14:17:40.341-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64172 #515 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.341-0400 m30999| 2015-07-09T14:17:40.341-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64170 #517 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.347-0400 m30998| 2015-07-09T14:17:40.344-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64174 #516 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.347-0400 m30999| 2015-07-09T14:17:40.344-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64173 #518 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.348-0400 m30998| 2015-07-09T14:17:40.348-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64175 #517 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.354-0400 m30999| 2015-07-09T14:17:40.352-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64176 #519 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.354-0400 m30998| 2015-07-09T14:17:40.352-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64177 #518 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.359-0400 setting random seed: 5514443316496 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.359-0400 setting random seed: 1486533582210 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.360-0400 setting random seed: 5052322233095 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.360-0400 setting random seed: 6887425128370 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.361-0400 setting random seed: 4513317118398 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.363-0400 setting random seed: 4379811179824 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.367-0400 setting random seed: 2536639333702 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.368-0400 setting random seed: 9011277239769 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.368-0400 setting random seed: 3977319812402 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.369-0400 m30998| 2015-07-09T14:17:40.368-0400 I SHARDING [conn510] ChunkManager: time to load chunks for db81.coll81: 0ms sequenceNumber: 99 version: 2|5||559ebac3ca4787b9985d1f52 based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.370-0400 setting random seed: 1248863521032 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.370-0400 setting random seed: 4169927844777 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.370-0400 setting random seed: 8051140261813 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.370-0400 setting random seed: 4884092574939 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.374-0400 setting random seed: 4234813665971 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.395-0400 setting random seed: 6943945223465 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.398-0400 setting random seed: 7429348025470 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.404-0400 setting random seed: 5320844249799 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.406-0400 setting random seed: 6954186423681 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.415-0400 setting random seed: 9649207754991 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:40.423-0400 setting random seed: 6814118167385 [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:42.110-0400 m30999| 2015-07-09T14:17:42.109-0400 I NETWORK [conn512] end connection 127.0.0.1:64164 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:42.152-0400 m30998| 2015-07-09T14:17:42.152-0400 I NETWORK [conn513] end connection 127.0.0.1:64166 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:42.163-0400 m30998| 2015-07-09T14:17:42.160-0400 I NETWORK [conn510] end connection 127.0.0.1:64160 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:42.182-0400 m30999| 2015-07-09T14:17:42.182-0400 I NETWORK [conn511] end connection 127.0.0.1:64161 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:42.203-0400 m30998| 2015-07-09T14:17:42.202-0400 I NETWORK [conn511] end connection 127.0.0.1:64162 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:42.213-0400 m30998| 2015-07-09T14:17:42.213-0400 I NETWORK [conn509] end connection 127.0.0.1:64158 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:42.236-0400 m30998| 2015-07-09T14:17:42.236-0400 I NETWORK [conn516] end connection 127.0.0.1:64174 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:42.241-0400 m30998| 2015-07-09T14:17:42.241-0400 I NETWORK [conn514] end connection 127.0.0.1:64171 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:42.250-0400 m30998| 2015-07-09T14:17:42.249-0400 I NETWORK [conn517] end connection 127.0.0.1:64175 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:42.254-0400 m30998| 2015-07-09T14:17:42.254-0400 I NETWORK [conn512] end connection 127.0.0.1:64163 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:42.266-0400 m30999| 2015-07-09T14:17:42.266-0400 I NETWORK [conn510] end connection 127.0.0.1:64159 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:42.279-0400 m30999| 2015-07-09T14:17:42.279-0400 I NETWORK [conn518] end connection 127.0.0.1:64173 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:42.280-0400 m30999| 2015-07-09T14:17:42.280-0400 I NETWORK [conn514] end connection 127.0.0.1:64167 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:42.280-0400 m30999| 2015-07-09T14:17:42.280-0400 I NETWORK [conn513] end connection 127.0.0.1:64165 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:42.299-0400 m30999| 2015-07-09T14:17:42.298-0400 I NETWORK [conn519] end connection 127.0.0.1:64176 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:42.362-0400 m30998| 2015-07-09T14:17:42.361-0400 I NETWORK [conn515] end connection 127.0.0.1:64172 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:42.379-0400 m30999| 2015-07-09T14:17:42.378-0400 I NETWORK [conn517] end connection 127.0.0.1:64170 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:42.379-0400 m30999| 2015-07-09T14:17:42.378-0400 I NETWORK [conn516] end connection 127.0.0.1:64169 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:42.404-0400 m30999| 2015-07-09T14:17:42.404-0400 I NETWORK [conn515] end connection 127.0.0.1:64168 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:17:42.415-0400 m30998| 2015-07-09T14:17:42.415-0400 I NETWORK [conn518] end connection 127.0.0.1:64177 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:18:04.599-0400 m30999| 2015-07-09T14:18:04.599-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:18:04.596-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30999:1436464534:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:18:06.116-0400 m30998| 2015-07-09T14:18:06.116-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:18:06.113-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30998:1436464535:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:18:06.911-0400 m31100| 2015-07-09T14:18:06.910-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:18:06.908-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31100:1436464536:197041335', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:18:07.326-0400 m31200| 2015-07-09T14:18:07.325-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:18:07.323-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31200:1436464537:809424560', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:18:34.602-0400 m30999| 2015-07-09T14:18:34.601-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:18:34.599-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30999:1436464534:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:18:36.120-0400 m30998| 2015-07-09T14:18:36.119-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:18:36.116-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30998:1436464535:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:18:36.915-0400 m31100| 2015-07-09T14:18:36.914-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:18:36.911-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31100:1436464536:197041335', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:18:37.328-0400 m31200| 2015-07-09T14:18:37.328-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:18:37.325-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31200:1436464537:809424560', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:02.455-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:02.456-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:02.456-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:02.456-0400 jstests/concurrency/fsm_workloads/indexed_insert_ttl.js: Workload completed in 2406 ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:02.456-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:02.456-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:02.456-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:02.456-0400 m30999| 2015-07-09T14:19:02.456-0400 I COMMAND [conn1] DROP: db81.coll81 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:02.456-0400 m30999| 2015-07-09T14:19:02.456-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:19:02.456-0400-559ebb16ca4787b9985d1f54", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465942456), what: "dropCollection.start", ns: "db81.coll81", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:02.512-0400 m30999| 2015-07-09T14:19:02.511-0400 I SHARDING [conn1] distributed lock 'db81.coll81/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559ebb16ca4787b9985d1f55 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:02.513-0400 m31100| 2015-07-09T14:19:02.512-0400 I COMMAND [conn38] CMD: drop db81.coll81 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:02.517-0400 m31200| 2015-07-09T14:19:02.517-0400 I COMMAND [conn18] CMD: drop db81.coll81 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:02.517-0400 m31102| 2015-07-09T14:19:02.517-0400 I COMMAND [repl writer worker 7] CMD: drop db81.coll81 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:02.519-0400 m31101| 2015-07-09T14:19:02.519-0400 I COMMAND [repl writer worker 8] CMD: drop db81.coll81 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:02.521-0400 m31202| 2015-07-09T14:19:02.520-0400 I COMMAND [repl writer worker 10] CMD: drop db81.coll81 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:02.521-0400 m31201| 2015-07-09T14:19:02.521-0400 I COMMAND [repl writer worker 1] CMD: drop db81.coll81 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:02.576-0400 m31100| 2015-07-09T14:19:02.575-0400 I SHARDING [conn38] remotely refreshing metadata for db81.coll81 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559ebac3ca4787b9985d1f52, current metadata version is 2|3||559ebac3ca4787b9985d1f52 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:02.577-0400 m31100| 2015-07-09T14:19:02.577-0400 W SHARDING [conn38] no chunks found when reloading db81.coll81, previous version was 0|0||559ebac3ca4787b9985d1f52, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:02.578-0400 m31100| 2015-07-09T14:19:02.577-0400 I SHARDING [conn38] dropping metadata for db81.coll81 at shard version 2|3||559ebac3ca4787b9985d1f52, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:02.579-0400 m31200| 2015-07-09T14:19:02.578-0400 I SHARDING [conn18] remotely refreshing metadata for db81.coll81 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559ebac3ca4787b9985d1f52, current metadata version is 2|5||559ebac3ca4787b9985d1f52 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:02.581-0400 m31200| 2015-07-09T14:19:02.580-0400 W SHARDING [conn18] no chunks found when reloading db81.coll81, previous version was 0|0||559ebac3ca4787b9985d1f52, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:02.581-0400 m31200| 2015-07-09T14:19:02.580-0400 I SHARDING [conn18] dropping metadata for db81.coll81 at shard version 2|5||559ebac3ca4787b9985d1f52, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:02.582-0400 m30999| 2015-07-09T14:19:02.581-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:19:02.581-0400-559ebb16ca4787b9985d1f56", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465942581), what: "dropCollection", ns: "db81.coll81", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:02.636-0400 m30999| 2015-07-09T14:19:02.635-0400 I SHARDING [conn1] distributed lock 'db81.coll81/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:02.692-0400 m30999| 2015-07-09T14:19:02.691-0400 I COMMAND [conn1] DROP DATABASE: db81 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:02.692-0400 m30999| 2015-07-09T14:19:02.691-0400 I SHARDING [conn1] DBConfig::dropDatabase: db81 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:02.692-0400 m30999| 2015-07-09T14:19:02.692-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:19:02.692-0400-559ebb16ca4787b9985d1f57", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465942692), what: "dropDatabase.start", ns: "db81", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:02.799-0400 m30999| 2015-07-09T14:19:02.798-0400 I SHARDING [conn1] DBConfig::dropDatabase: db81 dropped sharded collections: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:02.799-0400 m31100| 2015-07-09T14:19:02.799-0400 I COMMAND [conn160] dropDatabase db81 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:02.799-0400 m31100| 2015-07-09T14:19:02.799-0400 I COMMAND [conn160] dropDatabase db81 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:02.800-0400 m30999| 2015-07-09T14:19:02.799-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:19:02.799-0400-559ebb16ca4787b9985d1f58", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465942799), what: "dropDatabase", ns: "db81", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:02.800-0400 m31102| 2015-07-09T14:19:02.800-0400 I COMMAND [repl writer worker 0] dropDatabase db81 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:02.801-0400 m31102| 2015-07-09T14:19:02.800-0400 I COMMAND [repl writer worker 0] dropDatabase db81 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:02.801-0400 m31101| 2015-07-09T14:19:02.800-0400 I COMMAND [repl writer worker 9] dropDatabase db81 starting [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:02.801-0400 m31101| 2015-07-09T14:19:02.800-0400 I COMMAND [repl writer worker 9] dropDatabase db81 finished [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:02.894-0400 m31100| 2015-07-09T14:19:02.894-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:02.898-0400 m31102| 2015-07-09T14:19:02.897-0400 I COMMAND [repl writer worker 6] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:02.898-0400 m31101| 2015-07-09T14:19:02.897-0400 I COMMAND [repl writer worker 15] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:02.940-0400 m31200| 2015-07-09T14:19:02.940-0400 I COMMAND [conn1] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:02.942-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:02.942-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:02.942-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:02.942-0400 jstests/concurrency/fsm_workloads/agg_base.js [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:02.942-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:02.943-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:02.943-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:02.944-0400 m31201| 2015-07-09T14:19:02.943-0400 I COMMAND [repl writer worker 8] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:02.944-0400 m31202| 2015-07-09T14:19:02.943-0400 I COMMAND [repl writer worker 13] CMD: drop test.fsm_teardown [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:02.950-0400 m30999| 2015-07-09T14:19:02.949-0400 I SHARDING [conn1] distributed lock 'db82/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559ebb16ca4787b9985d1f59 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:02.955-0400 m30999| 2015-07-09T14:19:02.954-0400 I SHARDING [conn1] Placing [db82] on: test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:02.955-0400 m30999| 2015-07-09T14:19:02.955-0400 I SHARDING [conn1] Enabling sharding for database [db82] in config db [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.011-0400 m30999| 2015-07-09T14:19:03.010-0400 I SHARDING [conn1] distributed lock 'db82/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.044-0400 m31100| 2015-07-09T14:19:03.043-0400 I INDEX [conn145] build index on: db82.coll82 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db82.coll82" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.045-0400 m31100| 2015-07-09T14:19:03.044-0400 I INDEX [conn145] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.050-0400 m31100| 2015-07-09T14:19:03.050-0400 I INDEX [conn145] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.051-0400 m30999| 2015-07-09T14:19:03.051-0400 I COMMAND [conn1] CMD: shardcollection: { shardcollection: "db82.coll82", key: { _id: "hashed" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.054-0400 m30999| 2015-07-09T14:19:03.054-0400 I SHARDING [conn1] distributed lock 'db82.coll82/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559ebb17ca4787b9985d1f5a [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.055-0400 m30999| 2015-07-09T14:19:03.055-0400 I SHARDING [conn1] enable sharding on: db82.coll82 with shard key: { _id: "hashed" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.056-0400 m30999| 2015-07-09T14:19:03.055-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:19:03.055-0400-559ebb17ca4787b9985d1f5b", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465943055), what: "shardCollection.start", ns: "db82.coll82", details: { shardKey: { _id: "hashed" }, collection: "db82.coll82", primary: "test-rs0:test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", initShards: [], numChunks: 2 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.074-0400 m31101| 2015-07-09T14:19:03.073-0400 I INDEX [repl writer worker 4] build index on: db82.coll82 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db82.coll82" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.074-0400 m31101| 2015-07-09T14:19:03.073-0400 I INDEX [repl writer worker 4] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.075-0400 m31102| 2015-07-09T14:19:03.074-0400 I INDEX [repl writer worker 5] build index on: db82.coll82 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db82.coll82" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.075-0400 m31102| 2015-07-09T14:19:03.075-0400 I INDEX [repl writer worker 5] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.079-0400 m31102| 2015-07-09T14:19:03.078-0400 I INDEX [repl writer worker 5] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.080-0400 m31101| 2015-07-09T14:19:03.079-0400 I INDEX [repl writer worker 4] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.109-0400 m30999| 2015-07-09T14:19:03.108-0400 I SHARDING [conn1] going to create 2 chunk(s) for: db82.coll82 using new epoch 559ebb17ca4787b9985d1f5c [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.217-0400 m30999| 2015-07-09T14:19:03.217-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db82.coll82: 1ms sequenceNumber: 356 version: 1|1||559ebb17ca4787b9985d1f5c based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.273-0400 m30999| 2015-07-09T14:19:03.273-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db82.coll82: 1ms sequenceNumber: 357 version: 1|1||559ebb17ca4787b9985d1f5c based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.275-0400 m31100| 2015-07-09T14:19:03.275-0400 I SHARDING [conn175] remotely refreshing metadata for db82.coll82 with requested shard version 1|1||559ebb17ca4787b9985d1f5c, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.277-0400 m31100| 2015-07-09T14:19:03.276-0400 I SHARDING [conn175] collection db82.coll82 was previously unsharded, new metadata loaded with shard version 1|1||559ebb17ca4787b9985d1f5c [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.277-0400 m31100| 2015-07-09T14:19:03.277-0400 I SHARDING [conn175] collection version was loaded at version 1|1||559ebb17ca4787b9985d1f5c, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.278-0400 m30999| 2015-07-09T14:19:03.277-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:19:03.277-0400-559ebb17ca4787b9985d1f5d", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465943277), what: "shardCollection", ns: "db82.coll82", details: { version: "1|1||559ebb17ca4787b9985d1f5c" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.332-0400 m30999| 2015-07-09T14:19:03.332-0400 I SHARDING [conn1] distributed lock 'db82.coll82/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.333-0400 m30999| 2015-07-09T14:19:03.333-0400 I SHARDING [conn1] moving chunk ns: db82.coll82 moving ( ns: db82.coll82, shard: test-rs0, lastmod: 1|1||000000000000000000000000, min: { _id: 0 }, max: { _id: MaxKey }) test-rs0 -> test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.334-0400 m31100| 2015-07-09T14:19:03.333-0400 I SHARDING [conn38] moveChunk waiting for full cleanup after move [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.335-0400 m31100| 2015-07-09T14:19:03.334-0400 I SHARDING [conn38] received moveChunk request: { moveChunk: "db82.coll82", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559ebb17ca4787b9985d1f5c') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.340-0400 m31100| 2015-07-09T14:19:03.340-0400 I SHARDING [conn38] distributed lock 'db82.coll82/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559ebb17792e00bb67274b27 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.341-0400 m31100| 2015-07-09T14:19:03.340-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:19:03.340-0400-559ebb17792e00bb67274b28", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465943340), what: "moveChunk.start", ns: "db82.coll82", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.394-0400 m31100| 2015-07-09T14:19:03.393-0400 I SHARDING [conn38] remotely refreshing metadata for db82.coll82 based on current shard version 1|1||559ebb17ca4787b9985d1f5c, current metadata version is 1|1||559ebb17ca4787b9985d1f5c [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.395-0400 m31100| 2015-07-09T14:19:03.394-0400 I SHARDING [conn38] metadata of collection db82.coll82 already up to date (shard version : 1|1||559ebb17ca4787b9985d1f5c, took 1ms) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.395-0400 m31100| 2015-07-09T14:19:03.395-0400 I SHARDING [conn38] moveChunk request accepted at version 1|1||559ebb17ca4787b9985d1f5c [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.396-0400 m31100| 2015-07-09T14:19:03.395-0400 I SHARDING [conn38] moveChunk number of documents: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.396-0400 m31200| 2015-07-09T14:19:03.396-0400 I SHARDING [conn16] remotely refreshing metadata for db82.coll82, current shard version is 0|0||000000000000000000000000, current metadata version is 0|0||000000000000000000000000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.398-0400 m31200| 2015-07-09T14:19:03.398-0400 I SHARDING [conn16] collection db82.coll82 was previously unsharded, new metadata loaded with shard version 0|0||559ebb17ca4787b9985d1f5c [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.398-0400 m31200| 2015-07-09T14:19:03.398-0400 I SHARDING [conn16] collection version was loaded at version 1|1||559ebb17ca4787b9985d1f5c, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.399-0400 m31200| 2015-07-09T14:19:03.398-0400 I SHARDING [migrateThread] starting receiving-end of migration of chunk { _id: 0 } -> { _id: MaxKey } for collection db82.coll82 from test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102 at epoch 559ebb17ca4787b9985d1f5c [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.401-0400 m31100| 2015-07-09T14:19:03.400-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db82.coll82", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.405-0400 m31100| 2015-07-09T14:19:03.404-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db82.coll82", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.409-0400 m31100| 2015-07-09T14:19:03.409-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db82.coll82", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.420-0400 m31100| 2015-07-09T14:19:03.419-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db82.coll82", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.425-0400 m31200| 2015-07-09T14:19:03.425-0400 I INDEX [migrateThread] build index on: db82.coll82 properties: { v: 1, key: { _id: 1 }, name: "_id_", ns: "db82.coll82" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.425-0400 m31200| 2015-07-09T14:19:03.425-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.437-0400 m31200| 2015-07-09T14:19:03.436-0400 I INDEX [migrateThread] build index on: db82.coll82 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db82.coll82" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.437-0400 m31200| 2015-07-09T14:19:03.436-0400 I INDEX [migrateThread] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.437-0400 m31100| 2015-07-09T14:19:03.436-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db82.coll82", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "ready", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.447-0400 m31200| 2015-07-09T14:19:03.447-0400 I INDEX [migrateThread] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.448-0400 m31200| 2015-07-09T14:19:03.448-0400 I SHARDING [migrateThread] Deleter starting delete for: db82.coll82 from { _id: 0 } -> { _id: MaxKey }, with opId: 109522 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.449-0400 m31200| 2015-07-09T14:19:03.448-0400 I SHARDING [migrateThread] rangeDeleter deleted 0 documents for db82.coll82 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.460-0400 m31202| 2015-07-09T14:19:03.459-0400 I INDEX [repl writer worker 8] build index on: db82.coll82 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db82.coll82" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.460-0400 m31202| 2015-07-09T14:19:03.459-0400 I INDEX [repl writer worker 8] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.465-0400 m31201| 2015-07-09T14:19:03.464-0400 I INDEX [repl writer worker 10] build index on: db82.coll82 properties: { v: 1, key: { _id: "hashed" }, name: "_id_hashed", ns: "db82.coll82" } [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.465-0400 m31201| 2015-07-09T14:19:03.464-0400 I INDEX [repl writer worker 10] building index using bulk method [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.466-0400 m31202| 2015-07-09T14:19:03.464-0400 I INDEX [repl writer worker 8] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.467-0400 m31200| 2015-07-09T14:19:03.466-0400 I SHARDING [migrateThread] Waiting for replication to catch up before entering critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.467-0400 m31200| 2015-07-09T14:19:03.467-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db82.coll82' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.469-0400 m31201| 2015-07-09T14:19:03.469-0400 I INDEX [repl writer worker 10] build index done. scanned 0 total records. 0 secs [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.470-0400 m31100| 2015-07-09T14:19:03.469-0400 I SHARDING [conn38] moveChunk data transfer progress: { active: true, ns: "db82.coll82", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "steady", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } my mem used: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.470-0400 m31100| 2015-07-09T14:19:03.469-0400 I SHARDING [conn38] About to check if it is safe to enter critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.470-0400 m31100| 2015-07-09T14:19:03.470-0400 I SHARDING [conn38] About to enter migrate critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.470-0400 m31100| 2015-07-09T14:19:03.470-0400 I SHARDING [conn38] moveChunk setting version to: 2|0||559ebb17ca4787b9985d1f5c [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.479-0400 m31200| 2015-07-09T14:19:03.479-0400 I SHARDING [migrateThread] migrate commit succeeded flushing to secondaries for 'db82.coll82' { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.479-0400 m31200| 2015-07-09T14:19:03.479-0400 I SHARDING [migrateThread] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:19:03.479-0400-559ebb17d5a107a5b9c0db8b", server: "bs-osx108-8", clientAddr: "", time: new Date(1436465943479), what: "moveChunk.to", ns: "db82.coll82", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 5: 49, step 2 of 5: 17, step 3 of 5: 0, step 4 of 5: 0, step 5 of 5: 12, note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.533-0400 m31100| 2015-07-09T14:19:03.532-0400 I SHARDING [conn38] moveChunk migrate commit accepted by TO-shard: { active: false, ns: "db82.coll82", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", min: { _id: 0 }, max: { _id: MaxKey }, shardKeyPattern: { _id: "hashed" }, state: "done", counts: { cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 }, ok: 1.0 } [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.533-0400 m31100| 2015-07-09T14:19:03.532-0400 I SHARDING [conn38] moveChunk updating self version to: 2|1||559ebb17ca4787b9985d1f5c through { _id: MinKey } -> { _id: 0 } for collection 'db82.coll82' [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.534-0400 m31100| 2015-07-09T14:19:03.534-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:19:03.534-0400-559ebb17792e00bb67274b29", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465943534), what: "moveChunk.commit", ns: "db82.coll82", details: { min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs0", to: "test-rs1", cloned: 0, clonedBytes: 0, catchup: 0, steady: 0 } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.587-0400 m31100| 2015-07-09T14:19:03.587-0400 I SHARDING [conn38] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.588-0400 m31100| 2015-07-09T14:19:03.587-0400 I SHARDING [conn38] doing delete inline for cleanup of chunk data [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.588-0400 m31100| 2015-07-09T14:19:03.587-0400 I SHARDING [conn38] Deleter starting delete for: db82.coll82 from { _id: 0 } -> { _id: MaxKey }, with opId: 249358 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.588-0400 m31100| 2015-07-09T14:19:03.587-0400 I SHARDING [conn38] rangeDeleter deleted 0 documents for db82.coll82 from { _id: 0 } -> { _id: MaxKey } [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.588-0400 m31100| 2015-07-09T14:19:03.587-0400 I SHARDING [conn38] MigrateFromStatus::done About to acquire global lock to exit critical section [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.589-0400 m31100| 2015-07-09T14:19:03.588-0400 I SHARDING [conn38] distributed lock 'db82.coll82/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.589-0400 m31100| 2015-07-09T14:19:03.589-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:19:03.589-0400-559ebb17792e00bb67274b2a", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465943589), what: "moveChunk.from", ns: "db82.coll82", details: { min: { _id: 0 }, max: { _id: MaxKey }, step 1 of 6: 0, step 2 of 6: 60, step 3 of 6: 3, step 4 of 6: 70, step 5 of 6: 117, step 6 of 6: 0, to: "test-rs1", from: "test-rs0", note: "success" } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.644-0400 m31100| 2015-07-09T14:19:03.643-0400 I COMMAND [conn38] command db82.coll82 command: moveChunk { moveChunk: "db82.coll82", from: "test-rs0/bs-osx108-8:31100,bs-osx108-8:31101,bs-osx108-8:31102", to: "test-rs1/bs-osx108-8:31200,bs-osx108-8:31201,bs-osx108-8:31202", fromShard: "test-rs0", toShard: "test-rs1", min: { _id: 0 }, max: { _id: MaxKey }, maxChunkSizeBytes: 52428800, configdb: "test-configRS/bs-osx108-8:29000", secondaryThrottle: false, waitForDelete: true, maxTimeMS: 0, epoch: ObjectId('559ebb17ca4787b9985d1f5c') } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:82 locks:{ Global: { acquireCount: { r: 12, w: 3, R: 3 } }, Database: { acquireCount: { r: 3, w: 3 } }, Collection: { acquireCount: { r: 3, w: 1, W: 2 } } } protocol:op_command 309ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.646-0400 m30999| 2015-07-09T14:19:03.646-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db82.coll82: 0ms sequenceNumber: 358 version: 2|1||559ebb17ca4787b9985d1f5c based on: 1|1||559ebb17ca4787b9985d1f5c [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.647-0400 m31100| 2015-07-09T14:19:03.647-0400 I SHARDING [conn38] received splitChunk request: { splitChunk: "db82.coll82", keyPattern: { _id: "hashed" }, min: { _id: MinKey }, max: { _id: 0 }, from: "test-rs0", splitKeys: [ { _id: -4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebb17ca4787b9985d1f5c') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.651-0400 m31100| 2015-07-09T14:19:03.650-0400 I SHARDING [conn38] distributed lock 'db82.coll82/bs-osx108-8:31100:1436464536:197041335' acquired, ts : 559ebb17792e00bb67274b2b [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.651-0400 m31100| 2015-07-09T14:19:03.650-0400 I SHARDING [conn38] remotely refreshing metadata for db82.coll82 based on current shard version 2|0||559ebb17ca4787b9985d1f5c, current metadata version is 2|0||559ebb17ca4787b9985d1f5c [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.653-0400 m31100| 2015-07-09T14:19:03.652-0400 I SHARDING [conn38] updating metadata for db82.coll82 from shard version 2|0||559ebb17ca4787b9985d1f5c to shard version 2|1||559ebb17ca4787b9985d1f5c [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.653-0400 m31100| 2015-07-09T14:19:03.652-0400 I SHARDING [conn38] collection version was loaded at version 2|1||559ebb17ca4787b9985d1f5c, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.653-0400 m31100| 2015-07-09T14:19:03.652-0400 I SHARDING [conn38] splitChunk accepted at version 2|1||559ebb17ca4787b9985d1f5c [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.654-0400 m31100| 2015-07-09T14:19:03.654-0400 I SHARDING [conn38] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:19:03.654-0400-559ebb17792e00bb67274b2c", server: "bs-osx108-8", clientAddr: "127.0.0.1:62640", time: new Date(1436465943654), what: "split", ns: "db82.coll82", details: { before: { min: { _id: MinKey }, max: { _id: 0 } }, left: { min: { _id: MinKey }, max: { _id: -4611686018427387902 }, lastmod: Timestamp 2000|2, lastmodEpoch: ObjectId('559ebb17ca4787b9985d1f5c') }, right: { min: { _id: -4611686018427387902 }, max: { _id: 0 }, lastmod: Timestamp 2000|3, lastmodEpoch: ObjectId('559ebb17ca4787b9985d1f5c') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.710-0400 m31100| 2015-07-09T14:19:03.709-0400 I SHARDING [conn38] distributed lock 'db82.coll82/bs-osx108-8:31100:1436464536:197041335' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.712-0400 m30999| 2015-07-09T14:19:03.712-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db82.coll82: 1ms sequenceNumber: 359 version: 2|3||559ebb17ca4787b9985d1f5c based on: 2|1||559ebb17ca4787b9985d1f5c [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.713-0400 m31200| 2015-07-09T14:19:03.712-0400 I SHARDING [conn18] received splitChunk request: { splitChunk: "db82.coll82", keyPattern: { _id: "hashed" }, min: { _id: 0 }, max: { _id: MaxKey }, from: "test-rs1", splitKeys: [ { _id: 4611686018427387902 } ], configdb: "test-configRS/bs-osx108-8:29000", epoch: ObjectId('559ebb17ca4787b9985d1f5c') } [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.719-0400 m31200| 2015-07-09T14:19:03.718-0400 I SHARDING [conn18] distributed lock 'db82.coll82/bs-osx108-8:31200:1436464537:809424560' acquired, ts : 559ebb17d5a107a5b9c0db8c [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.719-0400 m31200| 2015-07-09T14:19:03.718-0400 I SHARDING [conn18] remotely refreshing metadata for db82.coll82 based on current shard version 0|0||559ebb17ca4787b9985d1f5c, current metadata version is 1|1||559ebb17ca4787b9985d1f5c [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.720-0400 m31200| 2015-07-09T14:19:03.720-0400 I SHARDING [conn18] updating metadata for db82.coll82 from shard version 0|0||559ebb17ca4787b9985d1f5c to shard version 2|0||559ebb17ca4787b9985d1f5c [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.720-0400 m31200| 2015-07-09T14:19:03.720-0400 I SHARDING [conn18] collection version was loaded at version 2|3||559ebb17ca4787b9985d1f5c, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.720-0400 m31200| 2015-07-09T14:19:03.720-0400 I SHARDING [conn18] splitChunk accepted at version 2|0||559ebb17ca4787b9985d1f5c [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.722-0400 m31200| 2015-07-09T14:19:03.721-0400 I SHARDING [conn18] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:19:03.721-0400-559ebb17d5a107a5b9c0db8d", server: "bs-osx108-8", clientAddr: "127.0.0.1:62585", time: new Date(1436465943721), what: "split", ns: "db82.coll82", details: { before: { min: { _id: 0 }, max: { _id: MaxKey } }, left: { min: { _id: 0 }, max: { _id: 4611686018427387902 }, lastmod: Timestamp 2000|4, lastmodEpoch: ObjectId('559ebb17ca4787b9985d1f5c') }, right: { min: { _id: 4611686018427387902 }, max: { _id: MaxKey }, lastmod: Timestamp 2000|5, lastmodEpoch: ObjectId('559ebb17ca4787b9985d1f5c') } } } [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.777-0400 m31200| 2015-07-09T14:19:03.776-0400 I SHARDING [conn18] distributed lock 'db82.coll82/bs-osx108-8:31200:1436464537:809424560' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:03.779-0400 m30999| 2015-07-09T14:19:03.778-0400 I SHARDING [conn1] ChunkManager: time to load chunks for db82.coll82: 0ms sequenceNumber: 360 version: 2|5||559ebb17ca4787b9985d1f5c based on: 2|3||559ebb17ca4787b9985d1f5c [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:04.023-0400 m31102| 2015-07-09T14:19:04.022-0400 I REPL [ReplicationExecutor] changing sync target because current sync target's most recent OpTime is (term: 0, timestamp: Jul 9 14:18:16:7a3) which is more than 30 seconds behind member bs-osx108-8:31101 whose most recent OpTime is (term: 0, timestamp: Jul 9 14:19:04:6) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:04.023-0400 m31100| 2015-07-09T14:19:04.023-0400 I NETWORK [conn10] end connection 127.0.0.1:62524 (118 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:04.024-0400 m31102| 2015-07-09T14:19:04.023-0400 I REPL [ReplicationExecutor] could not find member to sync from [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:04.030-0400 m31100| 2015-07-09T14:19:04.030-0400 I NETWORK [conn11] end connection 127.0.0.1:62527 (117 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:04.243-0400 m31202| 2015-07-09T14:19:04.242-0400 I REPL [ReplicationExecutor] changing sync target because current sync target's most recent OpTime is (term: 0, timestamp: Jul 9 14:18:24:7fd) which is more than 30 seconds behind member bs-osx108-8:31201 whose most recent OpTime is (term: 0, timestamp: Jul 9 14:19:04:93) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:04.243-0400 m31202| 2015-07-09T14:19:04.243-0400 I REPL [ReplicationExecutor] could not find member to sync from [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:04.243-0400 m31200| 2015-07-09T14:19:04.243-0400 I NETWORK [conn9] end connection 127.0.0.1:62526 (99 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:04.250-0400 m31200| 2015-07-09T14:19:04.250-0400 I NETWORK [conn10] end connection 127.0.0.1:62529 (98 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:04.443-0400 m31100| 2015-07-09T14:19:04.443-0400 I COMMAND [conn145] command db82.$cmd command: insert { insert: "coll82", documents: 477, ordered: false, metadata: { shardName: "test-rs0", shardVersion: [ Timestamp 2000|3, ObjectId('559ebb17ca4787b9985d1f5c') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 498, w: 498 } }, Database: { acquireCount: { w: 498 } }, Collection: { acquireCount: { w: 21 } }, Metadata: { acquireCount: { w: 477 } }, oplog: { acquireCount: { w: 477 } } } protocol:op_command 450ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:04.516-0400 m31200| 2015-07-09T14:19:04.515-0400 I COMMAND [conn23] command db82.$cmd command: insert { insert: "coll82", documents: 523, ordered: false, metadata: { shardName: "test-rs1", shardVersion: [ Timestamp 2000|5, ObjectId('559ebb17ca4787b9985d1f5c') ], session: 0 } } ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:125 locks:{ Global: { acquireCount: { r: 545, w: 545 } }, Database: { acquireCount: { w: 545 } }, Collection: { acquireCount: { w: 22 } }, Metadata: { acquireCount: { w: 523 } }, oplog: { acquireCount: { w: 523 } } } protocol:op_command 496ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:04.605-0400 m30999| 2015-07-09T14:19:04.605-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:19:04.601-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30999:1436464534:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:04.694-0400 Using 5 threads (requested 5) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:04.748-0400 m30999| 2015-07-09T14:19:04.746-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64185 #520 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:04.753-0400 m30998| 2015-07-09T14:19:04.753-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64186 #519 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:04.753-0400 m30999| 2015-07-09T14:19:04.753-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64187 #521 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:04.756-0400 m30998| 2015-07-09T14:19:04.754-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64188 #520 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:04.757-0400 m30998| 2015-07-09T14:19:04.756-0400 I NETWORK [mongosMain] connection accepted from 127.0.0.1:64189 #521 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:04.762-0400 setting random seed: 6762136360630 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:04.762-0400 setting random seed: 155694778077 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:04.762-0400 setting random seed: 1225866037420 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:04.762-0400 setting random seed: 3130500526167 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:04.762-0400 setting random seed: 4322367794811 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:04.764-0400 m31102| 2015-07-09T14:19:04.764-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:64190 #11 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:04.764-0400 m30998| 2015-07-09T14:19:04.764-0400 I SHARDING [conn520] ChunkManager: time to load chunks for db82.coll82: 0ms sequenceNumber: 100 version: 2|5||559ebb17ca4787b9985d1f5c based on: (empty) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:04.765-0400 m31101| 2015-07-09T14:19:04.765-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:64191 #11 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:04.765-0400 m31202| 2015-07-09T14:19:04.765-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:64192 #11 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:04.766-0400 m31101| 2015-07-09T14:19:04.766-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:64193 #12 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:04.766-0400 m31102| 2015-07-09T14:19:04.766-0400 I SHARDING [conn11] first cluster operation detected, adding sharding hook to enable versioning and authentication to remote servers [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:04.767-0400 m31102| 2015-07-09T14:19:04.767-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:64194 #12 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:04.768-0400 m31101| 2015-07-09T14:19:04.768-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:64195 #13 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:04.769-0400 m31201| 2015-07-09T14:19:04.768-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:64197 #11 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:04.769-0400 m31202| 2015-07-09T14:19:04.768-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:64198 #12 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:04.769-0400 m31101| 2015-07-09T14:19:04.768-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:64196 #14 (12 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:04.770-0400 m31101| 2015-07-09T14:19:04.769-0400 I SHARDING [conn11] first cluster operation detected, adding sharding hook to enable versioning and authentication to remote servers [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:04.770-0400 m31201| 2015-07-09T14:19:04.768-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:64199 #12 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:04.771-0400 m31101| 2015-07-09T14:19:04.770-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:64200 #15 (13 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:04.772-0400 m31202| 2015-07-09T14:19:04.772-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:64201 #13 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:04.773-0400 m31101| 2015-07-09T14:19:04.772-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:64202 #16 (14 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:04.774-0400 m31202| 2015-07-09T14:19:04.772-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:64203 #14 (12 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:04.775-0400 m31201| 2015-07-09T14:19:04.773-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:64204 #13 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:04.782-0400 m31101| 2015-07-09T14:19:04.781-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:64206 #17 (15 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:04.786-0400 m31201| 2015-07-09T14:19:04.781-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:64205 #14 (12 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:04.788-0400 m31101| 2015-07-09T14:19:04.787-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:64207 #18 (16 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:04.795-0400 m31202| 2015-07-09T14:19:04.795-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:64208 #15 (13 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:04.797-0400 m31202| 2015-07-09T14:19:04.796-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:64209 #16 (14 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:04.840-0400 m30999| 2015-07-09T14:19:04.839-0400 I NETWORK [conn521] end connection 127.0.0.1:64187 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:04.901-0400 m31101| 2015-07-09T14:19:04.900-0400 I COMMAND [conn14] command db82.coll82 command: aggregate { aggregate: "coll82", pipeline: [ { $mergeCursors: [ { host: "bs-osx108-8:31101", id: 6066594954036 }, { host: "bs-osx108-8:31202", id: 3854035309532 } ] } ], cursor: {} } cursorid:6064543404170 ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:1214196 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_command 119ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:04.925-0400 m31101| 2015-07-09T14:19:04.923-0400 I COMMAND [conn13] command db82.coll82 command: aggregate { aggregate: "coll82", pipeline: [ { $mergeCursors: [ { host: "bs-osx108-8:31101", id: 6066526169763 }, { host: "bs-osx108-8:31202", id: 3853242500406 } ] } ], cursor: {} } cursorid:6065518542664 ntoreturn:1 ntoskip:0 keyUpdates:0 writeConflicts:0 numYields:0 reslen:1214196 locks:{ Global: { acquireCount: { r: 4 } }, Database: { acquireCount: { r: 2 } }, Collection: { acquireCount: { r: 2 } } } protocol:op_command 139ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:05.025-0400 m31102| 2015-07-09T14:19:05.024-0400 I REPL [ReplicationExecutor] syncing from: bs-osx108-8:31100 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:05.029-0400 m31100| 2015-07-09T14:19:05.027-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:64210 #199 (118 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:05.034-0400 m30998| 2015-07-09T14:19:05.033-0400 I NETWORK [conn519] end connection 127.0.0.1:64186 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:05.098-0400 m31102| 2015-07-09T14:19:05.097-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:64211 #13 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:05.102-0400 m30998| 2015-07-09T14:19:05.102-0400 I NETWORK [conn521] end connection 127.0.0.1:64189 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:05.215-0400 m31102| 2015-07-09T14:19:05.215-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:64212 #14 (12 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:05.219-0400 m31201| 2015-07-09T14:19:05.219-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:64213 #15 (13 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:05.244-0400 m31202| 2015-07-09T14:19:05.244-0400 I REPL [ReplicationExecutor] syncing from: bs-osx108-8:31200 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:05.245-0400 m31200| 2015-07-09T14:19:05.245-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:64214 #158 (99 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:05.252-0400 m31202| 2015-07-09T14:19:05.251-0400 I REPL [SyncSourceFeedback] setting syncSourceFeedback to bs-osx108-8:31200 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:05.254-0400 m31200| 2015-07-09T14:19:05.253-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:64215 #159 (100 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:05.298-0400 m30998| 2015-07-09T14:19:05.297-0400 I NETWORK [conn520] end connection 127.0.0.1:64188 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:05.534-0400 m31102| 2015-07-09T14:19:05.534-0400 I REPL [SyncSourceFeedback] setting syncSourceFeedback to bs-osx108-8:31100 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:05.535-0400 m31100| 2015-07-09T14:19:05.535-0400 I NETWORK [initandlisten] connection accepted from 127.0.0.1:64216 #200 (119 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.122-0400 m30998| 2015-07-09T14:19:06.122-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:19:06.119-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30998:1436464535:16807', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.209-0400 m30999| 2015-07-09T14:19:06.208-0400 I NETWORK [conn520] end connection 127.0.0.1:64185 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.227-0400 m30999| 2015-07-09T14:19:06.226-0400 I COMMAND [conn1] DROP: db82.coll82 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.227-0400 m30999| 2015-07-09T14:19:06.226-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:19:06.226-0400-559ebb1aca4787b9985d1f5e", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465946226), what: "dropCollection.start", ns: "db82.coll82", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.285-0400 m30999| 2015-07-09T14:19:06.285-0400 I SHARDING [conn1] distributed lock 'db82.coll82/bs-osx108-8:30999:1436464534:16807' acquired, ts : 559ebb1aca4787b9985d1f5f [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.287-0400 m31100| 2015-07-09T14:19:06.286-0400 I COMMAND [conn38] CMD: drop db82.coll82 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.294-0400 m31101| 2015-07-09T14:19:06.294-0400 I COMMAND [repl writer worker 4] CMD: drop db82.coll82 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.294-0400 m31102| 2015-07-09T14:19:06.294-0400 I COMMAND [repl writer worker 1] CMD: drop db82.coll82 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.298-0400 m31200| 2015-07-09T14:19:06.298-0400 I COMMAND [conn18] CMD: drop db82.coll82 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.304-0400 m31201| 2015-07-09T14:19:06.304-0400 I COMMAND [repl writer worker 15] CMD: drop db82.coll82 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.309-0400 m31202| 2015-07-09T14:19:06.308-0400 I COMMAND [repl writer worker 11] CMD: drop db82.coll82 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.357-0400 m31100| 2015-07-09T14:19:06.357-0400 I SHARDING [conn38] remotely refreshing metadata for db82.coll82 with requested shard version 0|0||000000000000000000000000, current shard version is 2|3||559ebb17ca4787b9985d1f5c, current metadata version is 2|3||559ebb17ca4787b9985d1f5c [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.359-0400 m31100| 2015-07-09T14:19:06.358-0400 W SHARDING [conn38] no chunks found when reloading db82.coll82, previous version was 0|0||559ebb17ca4787b9985d1f5c, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.359-0400 m31100| 2015-07-09T14:19:06.359-0400 I SHARDING [conn38] dropping metadata for db82.coll82 at shard version 2|3||559ebb17ca4787b9985d1f5c, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.360-0400 m31200| 2015-07-09T14:19:06.360-0400 I SHARDING [conn18] remotely refreshing metadata for db82.coll82 with requested shard version 0|0||000000000000000000000000, current shard version is 2|5||559ebb17ca4787b9985d1f5c, current metadata version is 2|5||559ebb17ca4787b9985d1f5c [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.362-0400 m31200| 2015-07-09T14:19:06.362-0400 W SHARDING [conn18] no chunks found when reloading db82.coll82, previous version was 0|0||559ebb17ca4787b9985d1f5c, this is a drop [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.362-0400 m31200| 2015-07-09T14:19:06.362-0400 I SHARDING [conn18] dropping metadata for db82.coll82 at shard version 2|5||559ebb17ca4787b9985d1f5c, took 1ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.363-0400 m30999| 2015-07-09T14:19:06.363-0400 I SHARDING [conn1] about to log metadata event: { _id: "bs-osx108-8-2015-07-09T14:19:06.363-0400-559ebb1aca4787b9985d1f60", server: "bs-osx108-8", clientAddr: "127.0.0.1:62545", time: new Date(1436465946363), what: "dropCollection", ns: "db82.coll82", details: {} } [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.416-0400 m30999| 2015-07-09T14:19:06.416-0400 I SHARDING [conn1] distributed lock 'db82.coll82/bs-osx108-8:30999:1436464534:16807' unlocked. [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.470-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.470-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.470-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.470-0400 jstests/concurrency/fsm_workloads/agg_base.js: Workload completed in 1533 ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.470-0400 ---- [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.471-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.471-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.475-0400 m30999| 2015-07-09T14:19:06.475-0400 I CONTROL [signalProcessingThread] got signal 15 (Terminated: 15), will terminate after current cmd ends [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.475-0400 m30999| 2015-07-09T14:19:06.475-0400 W SHARDING [LockPinger] removing distributed lock ping thread 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30999:1436464534:16807' [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.476-0400 m30999| 2015-07-09T14:19:06.475-0400 I NETWORK [LockPinger] scoped connection to test-configRS/bs-osx108-8:29000 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.476-0400 m29000| 2015-07-09T14:19:06.476-0400 I NETWORK [conn36] end connection 127.0.0.1:62712 (72 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.476-0400 m30999| 2015-07-09T14:19:06.476-0400 I SHARDING [signalProcessingThread] dbexit: rc:0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.486-0400 m31100| 2015-07-09T14:19:06.485-0400 I NETWORK [conn188] end connection 127.0.0.1:63804 (118 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.487-0400 m31100| 2015-07-09T14:19:06.485-0400 I NETWORK [conn184] end connection 127.0.0.1:63722 (118 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.487-0400 m31100| 2015-07-09T14:19:06.485-0400 I NETWORK [conn183] end connection 127.0.0.1:63721 (116 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.487-0400 m29000| 2015-07-09T14:19:06.486-0400 I NETWORK [conn71] end connection 127.0.0.1:63671 (71 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.487-0400 m29000| 2015-07-09T14:19:06.486-0400 I NETWORK [conn61] end connection 127.0.0.1:63646 (70 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.488-0400 m29000| 2015-07-09T14:19:06.486-0400 I NETWORK [conn70] end connection 127.0.0.1:63665 (69 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.488-0400 m29000| 2015-07-09T14:19:06.486-0400 I NETWORK [conn37] end connection 127.0.0.1:62713 (68 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.488-0400 m31200| 2015-07-09T14:19:06.485-0400 I NETWORK [conn143] end connection 127.0.0.1:63339 (99 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.488-0400 m31200| 2015-07-09T14:19:06.486-0400 I NETWORK [conn148] end connection 127.0.0.1:63669 (98 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.488-0400 m31200| 2015-07-09T14:19:06.486-0400 I NETWORK [conn137] end connection 127.0.0.1:63332 (98 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.489-0400 m31102| 2015-07-09T14:19:06.488-0400 I NETWORK [conn11] end connection 127.0.0.1:64190 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.489-0400 m31102| 2015-07-09T14:19:06.488-0400 I NETWORK [conn6] end connection 127.0.0.1:62559 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.489-0400 m31100| 2015-07-09T14:19:06.485-0400 I NETWORK [conn179] end connection 127.0.0.1:63717 (116 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.490-0400 m31201| 2015-07-09T14:19:06.487-0400 I NETWORK [conn11] end connection 127.0.0.1:64197 (12 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.490-0400 m31201| 2015-07-09T14:19:06.487-0400 I NETWORK [conn8] end connection 127.0.0.1:62682 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.490-0400 m29000| 2015-07-09T14:19:06.487-0400 I NETWORK [conn67] end connection 127.0.0.1:63661 (67 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.490-0400 m29000| 2015-07-09T14:19:06.487-0400 I NETWORK [conn38] end connection 127.0.0.1:62714 (66 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.490-0400 m29000| 2015-07-09T14:19:06.487-0400 I NETWORK [conn63] end connection 127.0.0.1:63648 (66 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.491-0400 m29000| 2015-07-09T14:19:06.487-0400 I NETWORK [conn59] end connection 127.0.0.1:63644 (65 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.491-0400 m29000| 2015-07-09T14:19:06.488-0400 I NETWORK [conn17] end connection 127.0.0.1:62558 (63 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.491-0400 m29000| 2015-07-09T14:19:06.488-0400 I NETWORK [conn7] end connection 127.0.0.1:62537 (62 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.491-0400 m29000| 2015-07-09T14:19:06.488-0400 I NETWORK [conn9] end connection 127.0.0.1:62541 (61 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.492-0400 m29000| 2015-07-09T14:19:06.488-0400 I NETWORK [conn40] end connection 127.0.0.1:62756 (60 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.492-0400 m31101| 2015-07-09T14:19:06.488-0400 I NETWORK [conn11] end connection 127.0.0.1:64191 (15 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.492-0400 m31101| 2015-07-09T14:19:06.488-0400 I NETWORK [conn8] end connection 127.0.0.1:62681 (14 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.492-0400 m31100| 2015-07-09T14:19:06.485-0400 I NETWORK [conn165] end connection 127.0.0.1:63667 (115 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.492-0400 m29000| 2015-07-09T14:19:06.488-0400 I NETWORK [conn8] end connection 127.0.0.1:62538 (60 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.493-0400 m31200| 2015-07-09T14:19:06.486-0400 I NETWORK [conn147] end connection 127.0.0.1:63664 (97 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.493-0400 m31100| 2015-07-09T14:19:06.485-0400 I NETWORK [conn162] end connection 127.0.0.1:63658 (115 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.494-0400 m29000| 2015-07-09T14:19:06.488-0400 I NETWORK [conn6] end connection 127.0.0.1:62536 (60 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.494-0400 m31202| 2015-07-09T14:19:06.486-0400 I NETWORK [conn11] end connection 127.0.0.1:64192 (13 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.494-0400 m31200| 2015-07-09T14:19:06.486-0400 I NETWORK [conn71] end connection 127.0.0.1:62903 (97 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.494-0400 m31100| 2015-07-09T14:19:06.486-0400 I NETWORK [conn198] end connection 127.0.0.1:64146 (115 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.494-0400 m29000| 2015-07-09T14:19:06.489-0400 I NETWORK [conn5] end connection 127.0.0.1:62535 (57 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.495-0400 m31202| 2015-07-09T14:19:06.488-0400 I NETWORK [conn6] end connection 127.0.0.1:62562 (12 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.495-0400 m31200| 2015-07-09T14:19:06.486-0400 I NETWORK [conn81] end connection 127.0.0.1:63004 (97 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.495-0400 m31100| 2015-07-09T14:19:06.486-0400 I NETWORK [conn150] end connection 127.0.0.1:63267 (115 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.495-0400 m29000| 2015-07-09T14:19:06.489-0400 I NETWORK [conn4] end connection 127.0.0.1:62534 (56 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.495-0400 m31200| 2015-07-09T14:19:06.486-0400 I NETWORK [conn59] end connection 127.0.0.1:62848 (97 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.496-0400 m31100| 2015-07-09T14:19:06.486-0400 I NETWORK [conn160] end connection 127.0.0.1:63656 (115 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.496-0400 m29000| 2015-07-09T14:19:06.490-0400 I NETWORK [conn3] end connection 127.0.0.1:62533 (55 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.496-0400 m31200| 2015-07-09T14:19:06.486-0400 I NETWORK [conn142] end connection 127.0.0.1:63338 (97 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.496-0400 m31100| 2015-07-09T14:19:06.486-0400 I NETWORK [conn155] end connection 127.0.0.1:63643 (115 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.497-0400 m31200| 2015-07-09T14:19:06.486-0400 I NETWORK [conn51] end connection 127.0.0.1:62801 (97 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.497-0400 m31100| 2015-07-09T14:19:06.486-0400 I NETWORK [conn142] end connection 127.0.0.1:63207 (115 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.497-0400 m31200| 2015-07-09T14:19:06.486-0400 I NETWORK [conn41] end connection 127.0.0.1:62754 (97 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.497-0400 m31100| 2015-07-09T14:19:06.486-0400 I NETWORK [conn145] end connection 127.0.0.1:63252 (115 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.497-0400 m31200| 2015-07-09T14:19:06.486-0400 I NETWORK [conn144] end connection 127.0.0.1:63543 (97 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.498-0400 m31100| 2015-07-09T14:19:06.486-0400 I NETWORK [conn175] end connection 127.0.0.1:63713 (115 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.498-0400 m31200| 2015-07-09T14:19:06.486-0400 I NETWORK [conn84] end connection 127.0.0.1:63007 (97 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.498-0400 m31100| 2015-07-09T14:19:06.486-0400 I NETWORK [conn70] end connection 127.0.0.1:62804 (114 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.498-0400 m31200| 2015-07-09T14:19:06.486-0400 I NETWORK [conn83] end connection 127.0.0.1:63006 (97 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.499-0400 m31100| 2015-07-09T14:19:06.486-0400 I NETWORK [conn156] end connection 127.0.0.1:63652 (114 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.499-0400 m31200| 2015-07-09T14:19:06.487-0400 I NETWORK [conn64] end connection 127.0.0.1:62863 (97 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.499-0400 m31100| 2015-07-09T14:19:06.486-0400 I NETWORK [conn144] end connection 127.0.0.1:63251 (114 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.499-0400 m31200| 2015-07-09T14:19:06.487-0400 I NETWORK [conn82] end connection 127.0.0.1:63005 (97 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.499-0400 m31100| 2015-07-09T14:19:06.486-0400 I NETWORK [conn141] end connection 127.0.0.1:63206 (114 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.500-0400 m31200| 2015-07-09T14:19:06.487-0400 I NETWORK [conn53] end connection 127.0.0.1:62806 (97 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.500-0400 m31100| 2015-07-09T14:19:06.486-0400 I NETWORK [conn154] end connection 127.0.0.1:63642 (114 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.500-0400 m31200| 2015-07-09T14:19:06.487-0400 I NETWORK [conn39] end connection 127.0.0.1:62750 (97 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.500-0400 m31100| 2015-07-09T14:19:06.487-0400 I NETWORK [conn187] end connection 127.0.0.1:63761 (114 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.500-0400 m31200| 2015-07-09T14:19:06.487-0400 I NETWORK [conn35] end connection 127.0.0.1:62742 (97 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.501-0400 m31100| 2015-07-09T14:19:06.487-0400 I NETWORK [conn135] end connection 127.0.0.1:63200 (114 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.501-0400 m31200| 2015-07-09T14:19:06.487-0400 I NETWORK [conn32] end connection 127.0.0.1:62688 (97 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.501-0400 m31100| 2015-07-09T14:19:06.487-0400 I NETWORK [conn68] end connection 127.0.0.1:62802 (114 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.501-0400 m31200| 2015-07-09T14:19:06.487-0400 I NETWORK [conn66] end connection 127.0.0.1:62867 (97 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.502-0400 m31100| 2015-07-09T14:19:06.487-0400 I NETWORK [conn191] end connection 127.0.0.1:63875 (114 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.502-0400 m31200| 2015-07-09T14:19:06.487-0400 I NETWORK [conn63] end connection 127.0.0.1:62862 (97 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.502-0400 m31100| 2015-07-09T14:19:06.487-0400 I NETWORK [conn180] end connection 127.0.0.1:63718 (114 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.502-0400 m31200| 2015-07-09T14:19:06.487-0400 I NETWORK [conn56] end connection 127.0.0.1:62822 (97 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.502-0400 m31100| 2015-07-09T14:19:06.487-0400 I NETWORK [conn86] end connection 127.0.0.1:63076 (114 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.503-0400 m31200| 2015-07-09T14:19:06.487-0400 I NETWORK [conn48] end connection 127.0.0.1:62774 (96 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.503-0400 m31100| 2015-07-09T14:19:06.487-0400 I NETWORK [conn47] end connection 127.0.0.1:62664 (114 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.503-0400 m31100| 2015-07-09T14:19:06.487-0400 I NETWORK [conn38] end connection 127.0.0.1:62640 (114 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.503-0400 m31100| 2015-07-09T14:19:06.487-0400 I NETWORK [conn30] end connection 127.0.0.1:62632 (114 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.503-0400 m31200| 2015-07-09T14:19:06.487-0400 I NETWORK [conn72] end connection 127.0.0.1:62904 (96 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.504-0400 m31100| 2015-07-09T14:19:06.487-0400 I NETWORK [conn157] end connection 127.0.0.1:63653 (114 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.504-0400 m31200| 2015-07-09T14:19:06.487-0400 I NETWORK [conn141] end connection 127.0.0.1:63337 (96 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.504-0400 m31100| 2015-07-09T14:19:06.487-0400 I NETWORK [conn29] end connection 127.0.0.1:62631 (114 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.504-0400 m31200| 2015-07-09T14:19:06.488-0400 I NETWORK [conn14] end connection 127.0.0.1:62564 (96 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.505-0400 m31100| 2015-07-09T14:19:06.487-0400 I NETWORK [conn22] end connection 127.0.0.1:62605 (114 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.505-0400 m31200| 2015-07-09T14:19:06.488-0400 I NETWORK [conn13] end connection 127.0.0.1:62563 (96 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.505-0400 m31100| 2015-07-09T14:19:06.487-0400 I NETWORK [conn44] end connection 127.0.0.1:62651 (114 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.505-0400 m31200| 2015-07-09T14:19:06.488-0400 I NETWORK [conn58] end connection 127.0.0.1:62847 (96 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.506-0400 m31100| 2015-07-09T14:19:06.487-0400 I NETWORK [conn40] end connection 127.0.0.1:62642 (114 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.506-0400 m31200| 2015-07-09T14:19:06.488-0400 I NETWORK [conn91] end connection 127.0.0.1:63077 (96 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.506-0400 m31100| 2015-07-09T14:19:06.487-0400 I NETWORK [conn149] end connection 127.0.0.1:63266 (114 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.506-0400 m31200| 2015-07-09T14:19:06.488-0400 I NETWORK [conn54] end connection 127.0.0.1:62820 (96 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.506-0400 m31100| 2015-07-09T14:19:06.487-0400 I NETWORK [conn37] end connection 127.0.0.1:62639 (114 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.506-0400 m31200| 2015-07-09T14:19:06.488-0400 I NETWORK [conn30] end connection 127.0.0.1:62685 (96 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.507-0400 m31100| 2015-07-09T14:19:06.487-0400 I NETWORK [conn143] end connection 127.0.0.1:63209 (114 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.507-0400 m31200| 2015-07-09T14:19:06.488-0400 I NETWORK [conn23] end connection 127.0.0.1:62611 (96 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.507-0400 m31100| 2015-07-09T14:19:06.487-0400 I NETWORK [conn23] end connection 127.0.0.1:62606 (114 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.507-0400 m31200| 2015-07-09T14:19:06.488-0400 I NETWORK [conn22] end connection 127.0.0.1:62610 (96 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.507-0400 m31200| 2015-07-09T14:19:06.489-0400 I NETWORK [conn20] end connection 127.0.0.1:62591 (94 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.507-0400 m31200| 2015-07-09T14:19:06.489-0400 I NETWORK [conn19] end connection 127.0.0.1:62590 (94 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.507-0400 m31200| 2015-07-09T14:19:06.489-0400 I NETWORK [conn18] end connection 127.0.0.1:62585 (94 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.507-0400 m31100| 2015-07-09T14:19:06.487-0400 I NETWORK [conn28] end connection 127.0.0.1:62620 (114 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.508-0400 m31100| 2015-07-09T14:19:06.487-0400 I NETWORK [conn16] end connection 127.0.0.1:62567 (114 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.508-0400 m31100| 2015-07-09T14:19:06.488-0400 I NETWORK [conn15] end connection 127.0.0.1:62566 (114 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.508-0400 m31100| 2015-07-09T14:19:06.488-0400 I NETWORK [conn69] end connection 127.0.0.1:62803 (114 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.508-0400 m31100| 2015-07-09T14:19:06.488-0400 I NETWORK [conn43] end connection 127.0.0.1:62650 (114 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.508-0400 m31100| 2015-07-09T14:19:06.488-0400 I NETWORK [conn13] end connection 127.0.0.1:62560 (114 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.508-0400 m31100| 2015-07-09T14:19:06.488-0400 I NETWORK [conn45] end connection 127.0.0.1:62662 (114 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.508-0400 m31100| 2015-07-09T14:19:06.488-0400 I NETWORK [conn34] end connection 127.0.0.1:62636 (114 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.508-0400 m31100| 2015-07-09T14:19:06.489-0400 I NETWORK [conn14] end connection 127.0.0.1:62561 (113 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:06.917-0400 m31100| 2015-07-09T14:19:06.917-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:19:06.914-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31100:1436464536:197041335', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.331-0400 m31200| 2015-07-09T14:19:07.331-0400 I SHARDING [LockPinger] cluster test-configRS/bs-osx108-8:29000 pinged successfully at 2015-07-09T14:19:07.328-0400 by distributed lock pinger 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31200:1436464537:809424560', sleeping for 30000ms [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.476-0400 2015-07-09T14:19:07.476-0400 I - [main] shell: stopped mongo program on port 30999 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.476-0400 m30998| 2015-07-09T14:19:07.476-0400 I CONTROL [signalProcessingThread] got signal 15 (Terminated: 15), will terminate after current cmd ends [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.477-0400 m30998| 2015-07-09T14:19:07.476-0400 W SHARDING [LockPinger] removing distributed lock ping thread 'test-configRS/bs-osx108-8:29000/bs-osx108-8:30998:1436464535:16807' [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.477-0400 m30998| 2015-07-09T14:19:07.477-0400 I NETWORK [LockPinger] scoped connection to test-configRS/bs-osx108-8:29000 not being returned to the pool [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.477-0400 m30998| 2015-07-09T14:19:07.477-0400 I SHARDING [signalProcessingThread] dbexit: rc:0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.478-0400 m29000| 2015-07-09T14:19:07.477-0400 I NETWORK [conn12] end connection 127.0.0.1:62549 (54 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.482-0400 m31100| 2015-07-09T14:19:07.481-0400 I NETWORK [conn186] end connection 127.0.0.1:63724 (73 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.482-0400 m31100| 2015-07-09T14:19:07.481-0400 I NETWORK [conn185] end connection 127.0.0.1:63723 (73 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.483-0400 m31100| 2015-07-09T14:19:07.481-0400 I NETWORK [conn178] end connection 127.0.0.1:63716 (71 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.483-0400 m31100| 2015-07-09T14:19:07.482-0400 I NETWORK [conn167] end connection 127.0.0.1:63673 (70 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.484-0400 m29000| 2015-07-09T14:19:07.482-0400 I NETWORK [conn66] end connection 127.0.0.1:63660 (53 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.484-0400 m29000| 2015-07-09T14:19:07.482-0400 I NETWORK [conn72] end connection 127.0.0.1:63672 (53 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.485-0400 m31200| 2015-07-09T14:19:07.482-0400 I NETWORK [conn149] end connection 127.0.0.1:63670 (63 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.485-0400 m31200| 2015-07-09T14:19:07.482-0400 I NETWORK [conn146] end connection 127.0.0.1:63651 (62 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.485-0400 m31200| 2015-07-09T14:19:07.482-0400 I NETWORK [conn139] end connection 127.0.0.1:63334 (61 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.485-0400 m31200| 2015-07-09T14:19:07.482-0400 I NETWORK [conn85] end connection 127.0.0.1:63008 (60 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.486-0400 m31200| 2015-07-09T14:19:07.482-0400 I NETWORK [conn68] end connection 127.0.0.1:62900 (60 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.486-0400 m31200| 2015-07-09T14:19:07.482-0400 I NETWORK [conn79] end connection 127.0.0.1:63002 (58 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.486-0400 m31200| 2015-07-09T14:19:07.483-0400 I NETWORK [conn67] end connection 127.0.0.1:62899 (57 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.486-0400 m31200| 2015-07-09T14:19:07.483-0400 I NETWORK [conn80] end connection 127.0.0.1:63003 (56 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.486-0400 m31200| 2015-07-09T14:19:07.483-0400 I NETWORK [conn52] end connection 127.0.0.1:62805 (55 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.487-0400 m31200| 2015-07-09T14:19:07.483-0400 I NETWORK [conn69] end connection 127.0.0.1:62901 (55 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.487-0400 m31200| 2015-07-09T14:19:07.483-0400 I NETWORK [conn60] end connection 127.0.0.1:62849 (53 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.487-0400 m31202| 2015-07-09T14:19:07.483-0400 I NETWORK [conn14] end connection 127.0.0.1:64203 (11 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.487-0400 m31101| 2015-07-09T14:19:07.483-0400 I NETWORK [conn14] end connection 127.0.0.1:64196 (13 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.488-0400 m31202| 2015-07-09T14:19:07.484-0400 I NETWORK [conn13] end connection 127.0.0.1:64201 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.488-0400 m31100| 2015-07-09T14:19:07.482-0400 I NETWORK [conn181] end connection 127.0.0.1:63719 (70 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.489-0400 m31201| 2015-07-09T14:19:07.483-0400 I NETWORK [conn12] end connection 127.0.0.1:64199 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.489-0400 m31200| 2015-07-09T14:19:07.483-0400 I NETWORK [conn36] end connection 127.0.0.1:62743 (53 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.490-0400 m31101| 2015-07-09T14:19:07.484-0400 I NETWORK [conn13] end connection 127.0.0.1:64195 (12 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.490-0400 m31102| 2015-07-09T14:19:07.485-0400 I NETWORK [conn8] end connection 127.0.0.1:62603 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.490-0400 m29000| 2015-07-09T14:19:07.482-0400 I NETWORK [conn69] end connection 127.0.0.1:63663 (52 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.490-0400 m31202| 2015-07-09T14:19:07.485-0400 I NETWORK [conn8] end connection 127.0.0.1:62607 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.491-0400 m31100| 2015-07-09T14:19:07.482-0400 I NETWORK [conn163] end connection 127.0.0.1:63659 (70 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.491-0400 m31201| 2015-07-09T14:19:07.484-0400 I NETWORK [conn7] end connection 127.0.0.1:62608 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.491-0400 m31200| 2015-07-09T14:19:07.483-0400 I NETWORK [conn65] end connection 127.0.0.1:62864 (53 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.491-0400 m31101| 2015-07-09T14:19:07.484-0400 I NETWORK [conn12] end connection 127.0.0.1:64193 (12 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.491-0400 m31102| 2015-07-09T14:19:07.485-0400 I NETWORK [conn13] end connection 127.0.0.1:64211 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.491-0400 m29000| 2015-07-09T14:19:07.482-0400 I NETWORK [conn62] end connection 127.0.0.1:63647 (52 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.492-0400 m31100| 2015-07-09T14:19:07.482-0400 I NETWORK [conn158] end connection 127.0.0.1:63654 (70 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.492-0400 m31200| 2015-07-09T14:19:07.484-0400 I NETWORK [conn49] end connection 127.0.0.1:62799 (52 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.492-0400 m31101| 2015-07-09T14:19:07.484-0400 I NETWORK [conn7] end connection 127.0.0.1:62602 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.492-0400 m29000| 2015-07-09T14:19:07.482-0400 I NETWORK [conn65] end connection 127.0.0.1:63650 (52 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.492-0400 m31100| 2015-07-09T14:19:07.482-0400 I NETWORK [conn164] end connection 127.0.0.1:63666 (69 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.492-0400 m31200| 2015-07-09T14:19:07.484-0400 I NETWORK [conn62] end connection 127.0.0.1:62861 (52 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.492-0400 m29000| 2015-07-09T14:19:07.482-0400 I NETWORK [conn60] end connection 127.0.0.1:63645 (52 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.492-0400 m31100| 2015-07-09T14:19:07.482-0400 I NETWORK [conn159] end connection 127.0.0.1:63655 (69 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.493-0400 m31200| 2015-07-09T14:19:07.484-0400 I NETWORK [conn140] end connection 127.0.0.1:63335 (52 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.493-0400 m29000| 2015-07-09T14:19:07.483-0400 I NETWORK [conn68] end connection 127.0.0.1:63662 (51 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.493-0400 m31100| 2015-07-09T14:19:07.482-0400 I NETWORK [conn147] end connection 127.0.0.1:63254 (69 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.493-0400 m31200| 2015-07-09T14:19:07.484-0400 I NETWORK [conn29] end connection 127.0.0.1:62684 (52 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.493-0400 m29000| 2015-07-09T14:19:07.484-0400 I NETWORK [conn64] end connection 127.0.0.1:63649 (51 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.493-0400 m31100| 2015-07-09T14:19:07.482-0400 I NETWORK [conn148] end connection 127.0.0.1:63265 (69 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.493-0400 m31200| 2015-07-09T14:19:07.484-0400 I NETWORK [conn145] end connection 127.0.0.1:63641 (52 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.493-0400 m29000| 2015-07-09T14:19:07.484-0400 I NETWORK [conn39] end connection 127.0.0.1:62715 (51 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.494-0400 m31100| 2015-07-09T14:19:07.482-0400 I NETWORK [conn146] end connection 127.0.0.1:63253 (69 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.494-0400 m31200| 2015-07-09T14:19:07.484-0400 I NETWORK [conn37] end connection 127.0.0.1:62748 (52 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.494-0400 m29000| 2015-07-09T14:19:07.484-0400 I NETWORK [conn43] end connection 127.0.0.1:62929 (51 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.494-0400 m31100| 2015-07-09T14:19:07.482-0400 I NETWORK [conn138] end connection 127.0.0.1:63203 (69 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.494-0400 m31200| 2015-07-09T14:19:07.484-0400 I NETWORK [conn34] end connection 127.0.0.1:62741 (52 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.494-0400 m29000| 2015-07-09T14:19:07.484-0400 I NETWORK [conn16] end connection 127.0.0.1:62556 (51 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.494-0400 m31100| 2015-07-09T14:19:07.482-0400 I NETWORK [conn140] end connection 127.0.0.1:63205 (69 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.494-0400 m31200| 2015-07-09T14:19:07.484-0400 I NETWORK [conn28] end connection 127.0.0.1:62683 (52 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.495-0400 m29000| 2015-07-09T14:19:07.484-0400 I NETWORK [conn10] end connection 127.0.0.1:62547 (51 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.495-0400 m31100| 2015-07-09T14:19:07.482-0400 I NETWORK [conn137] end connection 127.0.0.1:63202 (69 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.495-0400 m31200| 2015-07-09T14:19:07.484-0400 I NETWORK [conn50] end connection 127.0.0.1:62800 (52 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.495-0400 m29000| 2015-07-09T14:19:07.484-0400 I NETWORK [conn15] end connection 127.0.0.1:62555 (51 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.495-0400 m31100| 2015-07-09T14:19:07.483-0400 I NETWORK [conn153] end connection 127.0.0.1:63640 (69 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.495-0400 m31200| 2015-07-09T14:19:07.484-0400 I NETWORK [conn138] end connection 127.0.0.1:63333 (52 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.495-0400 m29000| 2015-07-09T14:19:07.484-0400 I NETWORK [conn14] end connection 127.0.0.1:62554 (51 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.495-0400 m31100| 2015-07-09T14:19:07.483-0400 I NETWORK [conn133] end connection 127.0.0.1:63197 (69 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.496-0400 m31200| 2015-07-09T14:19:07.484-0400 I NETWORK [conn57] end connection 127.0.0.1:62824 (52 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.496-0400 m29000| 2015-07-09T14:19:07.485-0400 I NETWORK [conn11] end connection 127.0.0.1:62548 (50 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.496-0400 m31100| 2015-07-09T14:19:07.483-0400 I NETWORK [conn73] end connection 127.0.0.1:62809 (68 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.496-0400 m31200| 2015-07-09T14:19:07.484-0400 I NETWORK [conn70] end connection 127.0.0.1:62902 (52 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.496-0400 m29000| 2015-07-09T14:19:07.485-0400 I NETWORK [conn13] end connection 127.0.0.1:62550 (50 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.496-0400 m31100| 2015-07-09T14:19:07.483-0400 I NETWORK [conn67] end connection 127.0.0.1:62798 (68 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.496-0400 m31200| 2015-07-09T14:19:07.484-0400 I NETWORK [conn26] end connection 127.0.0.1:62618 (52 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.497-0400 m31100| 2015-07-09T14:19:07.483-0400 I NETWORK [conn176] end connection 127.0.0.1:63714 (68 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.497-0400 m31200| 2015-07-09T14:19:07.484-0400 I NETWORK [conn25] end connection 127.0.0.1:62617 (52 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.497-0400 m31100| 2015-07-09T14:19:07.483-0400 I NETWORK [conn177] end connection 127.0.0.1:63715 (68 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.497-0400 m31200| 2015-07-09T14:19:07.484-0400 I NETWORK [conn21] end connection 127.0.0.1:62609 (52 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.497-0400 m31100| 2015-07-09T14:19:07.483-0400 I NETWORK [conn166] end connection 127.0.0.1:63668 (68 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.498-0400 m31200| 2015-07-09T14:19:07.484-0400 I NETWORK [conn27] end connection 127.0.0.1:62619 (52 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.498-0400 m31100| 2015-07-09T14:19:07.484-0400 I NETWORK [conn58] end connection 127.0.0.1:62751 (68 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.498-0400 m31200| 2015-07-09T14:19:07.484-0400 I NETWORK [conn38] end connection 127.0.0.1:62749 (52 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.498-0400 m31100| 2015-07-09T14:19:07.484-0400 I NETWORK [conn161] end connection 127.0.0.1:63657 (68 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.498-0400 m31200| 2015-07-09T14:19:07.484-0400 I NETWORK [conn33] end connection 127.0.0.1:62689 (52 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.498-0400 m31100| 2015-07-09T14:19:07.484-0400 I NETWORK [conn139] end connection 127.0.0.1:63204 (68 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.498-0400 m31200| 2015-07-09T14:19:07.484-0400 I NETWORK [conn47] end connection 127.0.0.1:62773 (52 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.498-0400 m31100| 2015-07-09T14:19:07.484-0400 I NETWORK [conn136] end connection 127.0.0.1:63201 (68 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.499-0400 m31200| 2015-07-09T14:19:07.484-0400 I NETWORK [conn24] end connection 127.0.0.1:62616 (52 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.499-0400 m31100| 2015-07-09T14:19:07.484-0400 I NETWORK [conn134] end connection 127.0.0.1:63199 (68 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.499-0400 m31200| 2015-07-09T14:19:07.485-0400 I NETWORK [conn55] end connection 127.0.0.1:62821 (51 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.499-0400 m31100| 2015-07-09T14:19:07.484-0400 I NETWORK [conn50] end connection 127.0.0.1:62667 (68 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.499-0400 m31100| 2015-07-09T14:19:07.484-0400 I NETWORK [conn35] end connection 127.0.0.1:62637 (68 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.499-0400 m31100| 2015-07-09T14:19:07.484-0400 I NETWORK [conn49] end connection 127.0.0.1:62666 (68 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.499-0400 m31100| 2015-07-09T14:19:07.484-0400 I NETWORK [conn42] end connection 127.0.0.1:62649 (68 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.499-0400 m31100| 2015-07-09T14:19:07.484-0400 I NETWORK [conn27] end connection 127.0.0.1:62615 (68 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.500-0400 m31100| 2015-07-09T14:19:07.484-0400 I NETWORK [conn36] end connection 127.0.0.1:62638 (68 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.500-0400 m31100| 2015-07-09T14:19:07.484-0400 I NETWORK [conn152] end connection 127.0.0.1:63639 (68 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.500-0400 m31100| 2015-07-09T14:19:07.484-0400 I NETWORK [conn25] end connection 127.0.0.1:62613 (68 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.500-0400 m31100| 2015-07-09T14:19:07.484-0400 I NETWORK [conn132] end connection 127.0.0.1:63181 (68 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.500-0400 m31100| 2015-07-09T14:19:07.484-0400 I NETWORK [conn21] end connection 127.0.0.1:62604 (68 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.500-0400 m31100| 2015-07-09T14:19:07.484-0400 I NETWORK [conn39] end connection 127.0.0.1:62641 (68 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.500-0400 m31100| 2015-07-09T14:19:07.484-0400 I NETWORK [conn32] end connection 127.0.0.1:62634 (68 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.500-0400 m31100| 2015-07-09T14:19:07.484-0400 I NETWORK [conn66] end connection 127.0.0.1:62797 (67 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.501-0400 m31100| 2015-07-09T14:19:07.484-0400 I NETWORK [conn31] end connection 127.0.0.1:62633 (67 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.501-0400 m31100| 2015-07-09T14:19:07.484-0400 I NETWORK [conn26] end connection 127.0.0.1:62614 (67 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.501-0400 m31100| 2015-07-09T14:19:07.484-0400 I NETWORK [conn24] end connection 127.0.0.1:62612 (67 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:07.501-0400 m31100| 2015-07-09T14:19:07.485-0400 I NETWORK [conn74] end connection 127.0.0.1:62823 (67 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.478-0400 2015-07-09T14:19:08.477-0400 I - [main] shell: stopped mongo program on port 30998 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.478-0400 2015-07-09T14:19:08.478-0400 I - [main] No db started on port: 30000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.478-0400 2015-07-09T14:19:08.478-0400 I - [main] shell: stopped mongo program on port 30000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.478-0400 2015-07-09T14:19:08.478-0400 I - [main] No db started on port: 30001 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.478-0400 2015-07-09T14:19:08.478-0400 I - [main] shell: stopped mongo program on port 30001 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.478-0400 ReplSetTest n: 0 ports: [ 31100, 31101, 31102 ] 31100 number [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.479-0400 ReplSetTest stop *** Shutting down mongod in port 31100 *** [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.479-0400 m31100| 2015-07-09T14:19:08.479-0400 I CONTROL [signalProcessingThread] got signal 15 (Terminated: 15), will terminate after current cmd ends [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.480-0400 m31100| 2015-07-09T14:19:08.479-0400 I REPL [signalProcessingThread] Stopping replication applier threads [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.503-0400 m31100| 2015-07-09T14:19:08.502-0400 W SHARDING [LockPinger] removing distributed lock ping thread 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31100:1436464536:197041335' [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.505-0400 m31100| 2015-07-09T14:19:08.504-0400 W SHARDING [LockPinger] Error encountered while stopping ping on bs-osx108-8:31100:1436464536:197041335 :: caused by :: 17382 Can't use connection pool during shutdown [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.505-0400 m31100| 2015-07-09T14:19:08.505-0400 I CONTROL [signalProcessingThread] now exiting [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.505-0400 m31100| 2015-07-09T14:19:08.505-0400 I NETWORK [signalProcessingThread] shutdown: going to close listening sockets... [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.506-0400 m31100| 2015-07-09T14:19:08.505-0400 I NETWORK [signalProcessingThread] closing listening socket: 9 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.506-0400 m31100| 2015-07-09T14:19:08.505-0400 I NETWORK [signalProcessingThread] closing listening socket: 10 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.506-0400 m31100| 2015-07-09T14:19:08.505-0400 I NETWORK [signalProcessingThread] closing listening socket: 11 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.506-0400 m31100| 2015-07-09T14:19:08.505-0400 I NETWORK [signalProcessingThread] removing socket file: /tmp/mongodb-31100.sock [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.506-0400 m31100| 2015-07-09T14:19:08.506-0400 I NETWORK [signalProcessingThread] shutdown: going to flush diaglog... [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.506-0400 m31100| 2015-07-09T14:19:08.506-0400 I NETWORK [signalProcessingThread] shutdown: going to close sockets... [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.507-0400 m31100| 2015-07-09T14:19:08.506-0400 I STORAGE [signalProcessingThread] WiredTigerKVEngine shutting down [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.507-0400 m31100| 2015-07-09T14:19:08.506-0400 I NETWORK [conn2] end connection 127.0.0.1:62486 (29 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.507-0400 m31100| 2015-07-09T14:19:08.506-0400 I NETWORK [conn12] end connection 127.0.0.1:62528 (29 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.508-0400 m31100| 2015-07-09T14:19:08.507-0400 I NETWORK [conn18] end connection 127.0.0.1:62580 (29 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.508-0400 m31101| 2015-07-09T14:19:08.506-0400 I NETWORK [conn3] end connection 127.0.0.1:62484 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.508-0400 m31200| 2015-07-09T14:19:08.507-0400 I NETWORK [conn46] end connection 127.0.0.1:62772 (29 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.509-0400 m31200| 2015-07-09T14:19:08.507-0400 I NETWORK [conn16] end connection 127.0.0.1:62576 (29 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.509-0400 m31100| 2015-07-09T14:19:08.507-0400 I NETWORK [conn78] end connection 127.0.0.1:62961 (29 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.509-0400 m29000| 2015-07-09T14:19:08.507-0400 I NETWORK [conn22] end connection 127.0.0.1:62573 (37 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.509-0400 m31102| 2015-07-09T14:19:08.507-0400 I NETWORK [conn3] end connection 127.0.0.1:62485 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.510-0400 m31100| 2015-07-09T14:19:08.507-0400 I NETWORK [conn197] end connection 127.0.0.1:64081 (29 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.510-0400 m29000| 2015-07-09T14:19:08.507-0400 I NETWORK [conn50] end connection 127.0.0.1:63256 (36 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.511-0400 m31100| 2015-07-09T14:19:08.507-0400 I NETWORK [conn64] end connection 127.0.0.1:62769 (29 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.511-0400 m31202| 2015-07-09T14:19:08.508-0400 I NETWORK [conn7] end connection 127.0.0.1:62574 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.511-0400 m31200| 2015-07-09T14:19:08.508-0400 I NETWORK [conn17] end connection 127.0.0.1:62582 (27 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.511-0400 m29000| 2015-07-09T14:19:08.507-0400 I NETWORK [conn19] end connection 127.0.0.1:62570 (35 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.511-0400 m31100| 2015-07-09T14:19:08.507-0400 I NETWORK [conn3] end connection 127.0.0.1:62487 (29 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.512-0400 m31200| 2015-07-09T14:19:08.508-0400 I NETWORK [conn15] end connection 127.0.0.1:62575 (27 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.512-0400 m29000| 2015-07-09T14:19:08.507-0400 I NETWORK [conn32] end connection 127.0.0.1:62645 (35 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.512-0400 m31100| 2015-07-09T14:19:08.507-0400 I NETWORK [conn8] end connection 127.0.0.1:62519 (29 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.513-0400 m31200| 2015-07-09T14:19:08.508-0400 I NETWORK [conn44] end connection 127.0.0.1:62768 (25 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.513-0400 m29000| 2015-07-09T14:19:08.507-0400 I NETWORK [conn34] end connection 127.0.0.1:62647 (34 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.513-0400 m31100| 2015-07-09T14:19:08.508-0400 I NETWORK [conn19] end connection 127.0.0.1:62581 (29 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.513-0400 m31200| 2015-07-09T14:19:08.508-0400 I NETWORK [conn42] end connection 127.0.0.1:62764 (24 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.513-0400 m29000| 2015-07-09T14:19:08.507-0400 I NETWORK [conn31] end connection 127.0.0.1:62644 (34 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.513-0400 m31201| 2015-07-09T14:19:08.508-0400 I NETWORK [conn9] end connection 127.0.0.1:62719 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.514-0400 m31100| 2015-07-09T14:19:08.508-0400 I NETWORK [conn190] end connection 127.0.0.1:63813 (29 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.514-0400 m29000| 2015-07-09T14:19:08.507-0400 I NETWORK [conn33] end connection 127.0.0.1:62646 (34 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.514-0400 m31100| 2015-07-09T14:19:08.509-0400 I NETWORK [conn189] end connection 127.0.0.1:63811 (29 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.514-0400 m29000| 2015-07-09T14:19:08.508-0400 I NETWORK [conn26] end connection 127.0.0.1:62584 (32 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.514-0400 m31100| 2015-07-09T14:19:08.509-0400 I NETWORK [conn62] end connection 127.0.0.1:62765 (29 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.514-0400 m31200| 2015-07-09T14:19:08.509-0400 I NETWORK [conn45] end connection 127.0.0.1:62770 (23 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.514-0400 m29000| 2015-07-09T14:19:08.508-0400 I NETWORK [conn20] end connection 127.0.0.1:62571 (32 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.515-0400 m31100| 2015-07-09T14:19:08.509-0400 I NETWORK [conn77] end connection 127.0.0.1:62959 (29 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.515-0400 m29000| 2015-07-09T14:19:08.508-0400 I NETWORK [conn21] end connection 127.0.0.1:62572 (31 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.515-0400 m31100| 2015-07-09T14:19:08.509-0400 I NETWORK [conn53] end connection 127.0.0.1:62717 (29 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.515-0400 m29000| 2015-07-09T14:19:08.509-0400 I NETWORK [conn56] end connection 127.0.0.1:63262 (27 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.515-0400 m31102| 2015-07-09T14:19:08.509-0400 I NETWORK [conn10] end connection 127.0.0.1:62716 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.515-0400 m31101| 2015-07-09T14:19:08.509-0400 I NETWORK [conn9] end connection 127.0.0.1:62718 (8 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.515-0400 m31100| 2015-07-09T14:19:08.509-0400 I NETWORK [conn1] end connection 127.0.0.1:62477 (29 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.515-0400 m29000| 2015-07-09T14:19:08.509-0400 I NETWORK [conn30] end connection 127.0.0.1:62643 (27 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.516-0400 m31100| 2015-07-09T14:19:08.509-0400 I NETWORK [conn61] end connection 127.0.0.1:62763 (29 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.516-0400 m29000| 2015-07-09T14:19:08.510-0400 I NETWORK [conn18] end connection 127.0.0.1:62569 (25 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.516-0400 m31100| 2015-07-09T14:19:08.509-0400 I NETWORK [conn63] end connection 127.0.0.1:62767 (29 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.516-0400 m31100| 2015-07-09T14:19:08.510-0400 I NETWORK [conn65] end connection 127.0.0.1:62771 (29 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.516-0400 m31100| 2015-07-09T14:19:08.510-0400 I NETWORK [conn80] end connection 127.0.0.1:62965 (29 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.516-0400 m31100| 2015-07-09T14:19:08.510-0400 I NETWORK [conn79] end connection 127.0.0.1:62963 (29 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.516-0400 m31100| 2015-07-09T14:19:08.510-0400 I NETWORK [conn200] end connection 127.0.0.1:64216 (29 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.516-0400 m31200| 2015-07-09T14:19:08.510-0400 I NETWORK [conn43] end connection 127.0.0.1:62766 (22 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.517-0400 m31100| 2015-07-09T14:19:08.510-0400 I NETWORK [conn75] end connection 127.0.0.1:62868 (29 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.517-0400 m29000| 2015-07-09T14:19:08.510-0400 I NETWORK [conn58] end connection 127.0.0.1:63264 (24 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.517-0400 m29000| 2015-07-09T14:19:08.510-0400 I NETWORK [conn49] end connection 127.0.0.1:63255 (24 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.517-0400 m31100| 2015-07-09T14:19:08.510-0400 I NETWORK [conn196] end connection 127.0.0.1:64032 (29 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.517-0400 m31200| 2015-07-09T14:19:08.510-0400 I NETWORK [conn156] end connection 127.0.0.1:64033 (21 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.517-0400 m31101| 2015-07-09T14:19:08.510-0400 E REPL [rsBackgroundSync] sync producer problem: 10278 dbclient error communicating with server: bs-osx108-8:31100 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.517-0400 m31100| 2015-07-09T14:19:08.510-0400 I NETWORK [conn76] end connection 127.0.0.1:62957 (29 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.518-0400 m29000| 2015-07-09T14:19:08.510-0400 I NETWORK [conn51] end connection 127.0.0.1:63257 (22 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.518-0400 m31102| 2015-07-09T14:19:08.510-0400 E REPL [rsBackgroundSync] sync producer problem: 10278 dbclient error communicating with server: bs-osx108-8:31100 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.518-0400 m31100| 2015-07-09T14:19:08.511-0400 I NETWORK [conn194] end connection 127.0.0.1:63906 (29 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.518-0400 m29000| 2015-07-09T14:19:08.511-0400 I NETWORK [conn57] end connection 127.0.0.1:63263 (21 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.518-0400 m31200| 2015-07-09T14:19:08.511-0400 I NETWORK [conn151] end connection 127.0.0.1:63814 (20 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.518-0400 m31102| 2015-07-09T14:19:08.511-0400 I REPL [ReplicationExecutor] could not find member to sync from [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.518-0400 m31100| 2015-07-09T14:19:08.511-0400 I NETWORK [conn192] end connection 127.0.0.1:63902 (29 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.518-0400 m31101| 2015-07-09T14:19:08.511-0400 I REPL [ReplicationExecutor] could not find member to sync from [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.519-0400 m29000| 2015-07-09T14:19:08.511-0400 I NETWORK [conn55] end connection 127.0.0.1:63261 (21 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.519-0400 m31200| 2015-07-09T14:19:08.511-0400 I NETWORK [conn150] end connection 127.0.0.1:63812 (20 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.519-0400 m31100| 2015-07-09T14:19:08.511-0400 I NETWORK [conn195] end connection 127.0.0.1:64030 (29 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.520-0400 m29000| 2015-07-09T14:19:08.511-0400 I NETWORK [conn53] end connection 127.0.0.1:63259 (21 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.520-0400 m31200| 2015-07-09T14:19:08.511-0400 I NETWORK [conn157] end connection 127.0.0.1:64082 (19 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.520-0400 m31100| 2015-07-09T14:19:08.512-0400 I NETWORK [conn193] end connection 127.0.0.1:63904 (29 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.520-0400 m29000| 2015-07-09T14:19:08.511-0400 I NETWORK [conn52] end connection 127.0.0.1:63258 (21 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.520-0400 m31200| 2015-07-09T14:19:08.511-0400 I NETWORK [conn155] end connection 127.0.0.1:64031 (18 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.521-0400 m29000| 2015-07-09T14:19:08.511-0400 I NETWORK [conn54] end connection 127.0.0.1:63260 (21 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.521-0400 m31200| 2015-07-09T14:19:08.511-0400 I NETWORK [conn153] end connection 127.0.0.1:63905 (18 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.521-0400 m29000| 2015-07-09T14:19:08.511-0400 I NETWORK [conn73] end connection 127.0.0.1:63981 (20 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.521-0400 m31200| 2015-07-09T14:19:08.511-0400 I NETWORK [conn154] end connection 127.0.0.1:63907 (18 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.521-0400 m31200| 2015-07-09T14:19:08.512-0400 I NETWORK [conn152] end connection 127.0.0.1:63903 (15 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.613-0400 m31100| 2015-07-09T14:19:08.612-0400 I STORAGE [signalProcessingThread] shutdown: removing fs lock... [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:08.614-0400 m31100| 2015-07-09T14:19:08.614-0400 I CONTROL [signalProcessingThread] dbexit: rc: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:09.480-0400 2015-07-09T14:19:09.480-0400 I - [main] shell: stopped mongo program on port 31100 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:09.481-0400 ReplSetTest stop *** Mongod in port 31100 shutdown with code (0) *** [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:09.481-0400 ReplSetTest n: 1 ports: [ 31100, 31101, 31102 ] 31101 number [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:09.481-0400 ReplSetTest stop *** Shutting down mongod in port 31101 *** [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:09.481-0400 m31101| 2015-07-09T14:19:09.481-0400 I CONTROL [signalProcessingThread] got signal 15 (Terminated: 15), will terminate after current cmd ends [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:09.482-0400 m31101| 2015-07-09T14:19:09.482-0400 I REPL [signalProcessingThread] Stopping replication applier threads [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:10.027-0400 m31101| 2015-07-09T14:19:10.027-0400 I STORAGE [conn5] got request after shutdown() [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:10.141-0400 m31102| 2015-07-09T14:19:10.028-0400 I REPL [ReplicationExecutor] Error in heartbeat request to bs-osx108-8:31101; HostUnreachable network error while attempting to run command 'replSetHeartbeat' on host 'bs-osx108-8:31101' [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:10.166-0400 m31101| 2015-07-09T14:19:10.165-0400 I STORAGE [WiredTigerRecordStoreThread for local.oplog.rs] shutting down [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:10.237-0400 m31102| 2015-07-09T14:19:10.237-0400 I NETWORK [ReplExecNetThread-3] Socket closed remotely, no longer connected (idle 6 secs, remote host 127.0.0.1:31100) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:10.238-0400 m31102| 2015-07-09T14:19:10.238-0400 W NETWORK [ReplExecNetThread-3] Failed to connect to 127.0.0.1:31100, reason: errno:61 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:10.239-0400 m31102| 2015-07-09T14:19:10.238-0400 I REPL [ReplicationExecutor] Error in heartbeat request to bs-osx108-8:31100; HostUnreachable Failed attempt to connect to bs-osx108-8:31100; couldn't connect to server bs-osx108-8:31100, connection attempt failed [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:10.240-0400 m31102| 2015-07-09T14:19:10.239-0400 W NETWORK [ReplExecNetThread-3] Failed to connect to 127.0.0.1:31100, reason: errno:61 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:10.240-0400 m31102| 2015-07-09T14:19:10.240-0400 I REPL [ReplicationExecutor] Error in heartbeat request to bs-osx108-8:31100; HostUnreachable Failed attempt to connect to bs-osx108-8:31100; couldn't connect to server bs-osx108-8:31100, connection attempt failed [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:10.242-0400 m31102| 2015-07-09T14:19:10.241-0400 W NETWORK [ReplExecNetThread-3] Failed to connect to 127.0.0.1:31100, reason: errno:61 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:10.242-0400 m31102| 2015-07-09T14:19:10.242-0400 I REPL [ReplicationExecutor] Error in heartbeat request to bs-osx108-8:31100; HostUnreachable Failed attempt to connect to bs-osx108-8:31100; couldn't connect to server bs-osx108-8:31100, connection attempt failed [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:10.242-0400 m31102| 2015-07-09T14:19:10.242-0400 I REPL [ReplicationExecutor] Standing for election [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:10.313-0400 m31101| 2015-07-09T14:19:10.313-0400 I CONTROL [signalProcessingThread] now exiting [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:10.314-0400 m31101| 2015-07-09T14:19:10.313-0400 I NETWORK [signalProcessingThread] shutdown: going to close listening sockets... [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:10.314-0400 m31101| 2015-07-09T14:19:10.313-0400 I NETWORK [signalProcessingThread] closing listening socket: 12 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:10.315-0400 m31101| 2015-07-09T14:19:10.313-0400 I NETWORK [signalProcessingThread] closing listening socket: 13 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:10.315-0400 m31101| 2015-07-09T14:19:10.313-0400 I NETWORK [signalProcessingThread] closing listening socket: 14 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:10.315-0400 m31101| 2015-07-09T14:19:10.313-0400 I NETWORK [signalProcessingThread] removing socket file: /tmp/mongodb-31101.sock [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:10.316-0400 m31102| 2015-07-09T14:19:10.313-0400 I NETWORK [ReplExecNetThread-2] Socket recv() errno:54 Connection reset by peer 127.0.0.1:31101 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:10.316-0400 m31102| 2015-07-09T14:19:10.313-0400 I NETWORK [ReplExecNetThread-3] Socket recv() errno:54 Connection reset by peer 127.0.0.1:31101 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:10.316-0400 m31101| 2015-07-09T14:19:10.313-0400 I NETWORK [signalProcessingThread] shutdown: going to flush diaglog... [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:10.317-0400 m31101| 2015-07-09T14:19:10.313-0400 I NETWORK [signalProcessingThread] shutdown: going to close sockets... [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:10.317-0400 m31102| 2015-07-09T14:19:10.313-0400 I NETWORK [ReplExecNetThread-2] SocketException: remote: 127.0.0.1:31101 error: 9001 socket exception [RECV_ERROR] server [127.0.0.1:31101] [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:10.317-0400 m31102| 2015-07-09T14:19:10.313-0400 I NETWORK [ReplExecNetThread-3] SocketException: remote: 127.0.0.1:31101 error: 9001 socket exception [RECV_ERROR] server [127.0.0.1:31101] [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:10.318-0400 m31101| 2015-07-09T14:19:10.314-0400 I STORAGE [signalProcessingThread] WiredTigerKVEngine shutting down [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:10.318-0400 m31101| 2015-07-09T14:19:10.314-0400 I NETWORK [conn10] end connection 127.0.0.1:62720 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:10.318-0400 m31101| 2015-07-09T14:19:10.314-0400 I NETWORK [conn1] end connection 127.0.0.1:62479 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:10.318-0400 m31102| 2015-07-09T14:19:10.314-0400 I NETWORK [conn5] end connection 127.0.0.1:62493 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:10.319-0400 m31101| 2015-07-09T14:19:10.314-0400 I NETWORK [conn6] end connection 127.0.0.1:62518 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:10.319-0400 m31102| 2015-07-09T14:19:10.314-0400 I REPL [ReplicationExecutor] not electing self, we could not contact enough voting members [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:10.319-0400 m31201| 2015-07-09T14:19:10.314-0400 I NETWORK [conn14] end connection 127.0.0.1:64205 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:10.320-0400 m31201| 2015-07-09T14:19:10.315-0400 I NETWORK [conn13] end connection 127.0.0.1:64204 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:10.320-0400 m31101| 2015-07-09T14:19:10.314-0400 I NETWORK [conn18] end connection 127.0.0.1:64207 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:10.320-0400 m31102| 2015-07-09T14:19:10.315-0400 I REPL [ReplicationExecutor] Error in heartbeat request to bs-osx108-8:31101; HostUnreachable Failed attempt to connect to bs-osx108-8:31101; network error while attempting to run command 'isMaster' on host 'bs-osx108-8:31101' [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:10.320-0400 m31102| 2015-07-09T14:19:10.315-0400 I REPL [ReplicationExecutor] Standing for election [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:10.321-0400 m31202| 2015-07-09T14:19:10.315-0400 I NETWORK [conn16] end connection 127.0.0.1:64209 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:10.321-0400 m31101| 2015-07-09T14:19:10.315-0400 I NETWORK [conn16] end connection 127.0.0.1:64202 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:10.321-0400 m31101| 2015-07-09T14:19:10.315-0400 I NETWORK [conn17] end connection 127.0.0.1:64206 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:10.322-0400 m31101| 2015-07-09T14:19:10.315-0400 I NETWORK [conn15] end connection 127.0.0.1:64200 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:10.322-0400 m31202| 2015-07-09T14:19:10.316-0400 I NETWORK [conn15] end connection 127.0.0.1:64208 (6 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:10.323-0400 m31102| 2015-07-09T14:19:10.316-0400 W NETWORK [ReplExecNetThread-2] Failed to connect to 127.0.0.1:31101, reason: errno:61 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:10.324-0400 m31102| 2015-07-09T14:19:10.316-0400 W NETWORK [ReplExecNetThread-3] Failed to connect to 127.0.0.1:31101, reason: errno:61 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:10.324-0400 m31102| 2015-07-09T14:19:10.316-0400 I REPL [ReplicationExecutor] Error in heartbeat request to bs-osx108-8:31101; HostUnreachable Failed attempt to connect to bs-osx108-8:31101; couldn't connect to server bs-osx108-8:31101, connection attempt failed [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:10.324-0400 m31102| 2015-07-09T14:19:10.317-0400 I REPL [ReplicationExecutor] not electing self, we could not contact enough voting members [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:10.429-0400 m31101| 2015-07-09T14:19:10.428-0400 I STORAGE [signalProcessingThread] shutdown: removing fs lock... [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:10.430-0400 m31101| 2015-07-09T14:19:10.429-0400 I CONTROL [signalProcessingThread] dbexit: rc: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:10.482-0400 2015-07-09T14:19:10.482-0400 I - [main] shell: stopped mongo program on port 31101 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:10.482-0400 ReplSetTest stop *** Mongod in port 31101 shutdown with code (0) *** [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:10.482-0400 ReplSetTest n: 2 ports: [ 31100, 31101, 31102 ] 31102 number [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:10.482-0400 ReplSetTest stop *** Shutting down mongod in port 31102 *** [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:10.483-0400 m31102| 2015-07-09T14:19:10.482-0400 I CONTROL [signalProcessingThread] got signal 15 (Terminated: 15), will terminate after current cmd ends [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:10.484-0400 m31102| 2015-07-09T14:19:10.484-0400 I REPL [signalProcessingThread] Stopping replication applier threads [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.195-0400 m31102| 2015-07-09T14:19:11.195-0400 I STORAGE [WiredTigerRecordStoreThread for local.oplog.rs] shutting down [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.314-0400 m31102| 2015-07-09T14:19:11.314-0400 I CONTROL [signalProcessingThread] now exiting [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.314-0400 m31102| 2015-07-09T14:19:11.314-0400 I NETWORK [signalProcessingThread] shutdown: going to close listening sockets... [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.315-0400 m31102| 2015-07-09T14:19:11.314-0400 I NETWORK [signalProcessingThread] closing listening socket: 15 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.315-0400 m31102| 2015-07-09T14:19:11.314-0400 I NETWORK [signalProcessingThread] closing listening socket: 16 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.315-0400 m31102| 2015-07-09T14:19:11.314-0400 I NETWORK [signalProcessingThread] closing listening socket: 17 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.316-0400 m31102| 2015-07-09T14:19:11.314-0400 I NETWORK [signalProcessingThread] removing socket file: /tmp/mongodb-31102.sock [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.316-0400 m31102| 2015-07-09T14:19:11.314-0400 I NETWORK [signalProcessingThread] shutdown: going to flush diaglog... [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.316-0400 m31102| 2015-07-09T14:19:11.314-0400 I NETWORK [signalProcessingThread] shutdown: going to close sockets... [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.317-0400 m31102| 2015-07-09T14:19:11.314-0400 I STORAGE [signalProcessingThread] WiredTigerKVEngine shutting down [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.317-0400 m31102| 2015-07-09T14:19:11.315-0400 I NETWORK [conn1] end connection 127.0.0.1:62481 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.318-0400 m31102| 2015-07-09T14:19:11.315-0400 I NETWORK [conn9] end connection 127.0.0.1:62669 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.318-0400 m31102| 2015-07-09T14:19:11.315-0400 I NETWORK [conn12] end connection 127.0.0.1:64194 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.318-0400 m31102| 2015-07-09T14:19:11.315-0400 I NETWORK [conn14] end connection 127.0.0.1:64212 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.319-0400 m31102| 2015-07-09T14:19:11.315-0400 I NETWORK [conn7] end connection 127.0.0.1:62579 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.319-0400 m31201| 2015-07-09T14:19:11.315-0400 I NETWORK [conn15] end connection 127.0.0.1:64213 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.319-0400 m31202| 2015-07-09T14:19:11.315-0400 I NETWORK [conn12] end connection 127.0.0.1:64198 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.423-0400 m31102| 2015-07-09T14:19:11.422-0400 I STORAGE [signalProcessingThread] shutdown: removing fs lock... [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.423-0400 m31102| 2015-07-09T14:19:11.423-0400 I CONTROL [signalProcessingThread] dbexit: rc: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.484-0400 2015-07-09T14:19:11.483-0400 I - [main] shell: stopped mongo program on port 31102 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.484-0400 ReplSetTest stop *** Mongod in port 31102 shutdown with code (0) *** [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.484-0400 ReplSetTest stopSet deleting all dbpaths [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.494-0400 ReplSetTest stopSet *** Shut down repl set - test worked **** [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.494-0400 ReplSetTest n: 0 ports: [ 31200, 31201, 31202 ] 31200 number [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.495-0400 ReplSetTest stop *** Shutting down mongod in port 31200 *** [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.495-0400 m31200| 2015-07-09T14:19:11.494-0400 I CONTROL [signalProcessingThread] got signal 15 (Terminated: 15), will terminate after current cmd ends [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.495-0400 m31200| 2015-07-09T14:19:11.495-0400 I REPL [signalProcessingThread] Stopping replication applier threads [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.769-0400 m31200| 2015-07-09T14:19:11.769-0400 W SHARDING [LockPinger] removing distributed lock ping thread 'test-configRS/bs-osx108-8:29000/bs-osx108-8:31200:1436464537:809424560' [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.769-0400 m31200| 2015-07-09T14:19:11.769-0400 W SHARDING [LockPinger] Error encountered while stopping ping on bs-osx108-8:31200:1436464537:809424560 :: caused by :: 17382 Can't use connection pool during shutdown [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.770-0400 m31200| 2015-07-09T14:19:11.769-0400 I CONTROL [signalProcessingThread] now exiting [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.770-0400 m31200| 2015-07-09T14:19:11.769-0400 I NETWORK [signalProcessingThread] shutdown: going to close listening sockets... [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.770-0400 m31200| 2015-07-09T14:19:11.769-0400 I NETWORK [signalProcessingThread] closing listening socket: 18 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.770-0400 m31200| 2015-07-09T14:19:11.769-0400 I NETWORK [signalProcessingThread] closing listening socket: 19 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.770-0400 m31200| 2015-07-09T14:19:11.769-0400 I NETWORK [signalProcessingThread] closing listening socket: 20 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.771-0400 m31200| 2015-07-09T14:19:11.770-0400 I NETWORK [signalProcessingThread] removing socket file: /tmp/mongodb-31200.sock [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.771-0400 m31200| 2015-07-09T14:19:11.770-0400 I NETWORK [signalProcessingThread] shutdown: going to flush diaglog... [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.771-0400 m31200| 2015-07-09T14:19:11.770-0400 I NETWORK [signalProcessingThread] shutdown: going to close sockets... [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.772-0400 m31200| 2015-07-09T14:19:11.770-0400 I NETWORK [conn1] end connection 127.0.0.1:62497 (13 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.772-0400 m31200| 2015-07-09T14:19:11.770-0400 I STORAGE [signalProcessingThread] WiredTigerKVEngine shutting down [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.772-0400 m31200| 2015-07-09T14:19:11.770-0400 I NETWORK [conn2] end connection 127.0.0.1:62506 (13 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.773-0400 m31200| 2015-07-09T14:19:11.770-0400 I NETWORK [conn3] end connection 127.0.0.1:62507 (13 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.773-0400 m31200| 2015-07-09T14:19:11.770-0400 I NETWORK [conn12] end connection 127.0.0.1:62540 (13 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.773-0400 m31201| 2015-07-09T14:19:11.770-0400 I NETWORK [conn3] end connection 127.0.0.1:62505 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.773-0400 m29000| 2015-07-09T14:19:11.771-0400 I NETWORK [conn27] end connection 127.0.0.1:62586 (15 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.773-0400 m29000| 2015-07-09T14:19:11.771-0400 I NETWORK [conn29] end connection 127.0.0.1:62588 (14 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.774-0400 m29000| 2015-07-09T14:19:11.771-0400 I NETWORK [conn25] end connection 127.0.0.1:62583 (14 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.774-0400 m29000| 2015-07-09T14:19:11.771-0400 I NETWORK [conn23] end connection 127.0.0.1:62577 (14 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.774-0400 m29000| 2015-07-09T14:19:11.771-0400 I NETWORK [conn28] end connection 127.0.0.1:62587 (12 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.774-0400 m29000| 2015-07-09T14:19:11.771-0400 I NETWORK [conn24] end connection 127.0.0.1:62578 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.774-0400 m31202| 2015-07-09T14:19:11.771-0400 I NETWORK [conn3] end connection 127.0.0.1:62504 (4 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.774-0400 m31200| 2015-07-09T14:19:11.770-0400 I NETWORK [conn8] end connection 127.0.0.1:62521 (13 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.774-0400 m29000| 2015-07-09T14:19:11.771-0400 I NETWORK [conn44] end connection 127.0.0.1:62978 (10 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.774-0400 m31201| 2015-07-09T14:19:11.771-0400 E REPL [rsBackgroundSync] sync producer problem: 10278 dbclient error communicating with server: bs-osx108-8:31200 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.775-0400 m31202| 2015-07-09T14:19:11.771-0400 E REPL [rsBackgroundSync] sync producer problem: 10278 dbclient error communicating with server: bs-osx108-8:31200 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.775-0400 m31200| 2015-07-09T14:19:11.771-0400 I NETWORK [conn76] end connection 127.0.0.1:62962 (13 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.775-0400 m29000| 2015-07-09T14:19:11.771-0400 I NETWORK [conn47] end connection 127.0.0.1:62981 (9 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.775-0400 m31201| 2015-07-09T14:19:11.772-0400 I REPL [ReplicationExecutor] could not find member to sync from [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.775-0400 m31202| 2015-07-09T14:19:11.771-0400 I NETWORK [conn10] end connection 127.0.0.1:62926 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.775-0400 m31200| 2015-07-09T14:19:11.771-0400 I NETWORK [conn77] end connection 127.0.0.1:62964 (13 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.775-0400 m29000| 2015-07-09T14:19:11.771-0400 I NETWORK [conn46] end connection 127.0.0.1:62980 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.775-0400 m31202| 2015-07-09T14:19:11.772-0400 I REPL [ReplicationExecutor] could not find member to sync from [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.776-0400 m31200| 2015-07-09T14:19:11.771-0400 I NETWORK [conn78] end connection 127.0.0.1:62966 (13 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.776-0400 m29000| 2015-07-09T14:19:11.772-0400 I NETWORK [conn45] end connection 127.0.0.1:62979 (7 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.776-0400 m31200| 2015-07-09T14:19:11.772-0400 I NETWORK [conn73] end connection 127.0.0.1:62927 (13 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.776-0400 m31201| 2015-07-09T14:19:11.772-0400 I NETWORK [conn10] end connection 127.0.0.1:62928 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.776-0400 m29000| 2015-07-09T14:19:11.772-0400 I NETWORK [conn41] end connection 127.0.0.1:62865 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.776-0400 m31200| 2015-07-09T14:19:11.772-0400 I NETWORK [conn74] end connection 127.0.0.1:62958 (13 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.776-0400 m29000| 2015-07-09T14:19:11.772-0400 I NETWORK [conn42] end connection 127.0.0.1:62866 (5 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.776-0400 m31200| 2015-07-09T14:19:11.772-0400 I NETWORK [conn75] end connection 127.0.0.1:62960 (13 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.777-0400 m29000| 2015-07-09T14:19:11.772-0400 I NETWORK [conn48] end connection 127.0.0.1:63009 (3 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.777-0400 m31200| 2015-07-09T14:19:11.772-0400 I NETWORK [conn159] end connection 127.0.0.1:64215 (13 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.876-0400 m31200| 2015-07-09T14:19:11.875-0400 I STORAGE [signalProcessingThread] shutdown: removing fs lock... [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:11.876-0400 m31200| 2015-07-09T14:19:11.876-0400 I CONTROL [signalProcessingThread] dbexit: rc: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:12.297-0400 m31202| 2015-07-09T14:19:12.297-0400 I NETWORK [ReplExecNetThread-1] Socket closed remotely, no longer connected (idle 6 secs, remote host 127.0.0.1:31200) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:12.298-0400 m31201| 2015-07-09T14:19:12.297-0400 I NETWORK [ReplExecNetThread-0] Socket closed remotely, no longer connected (idle 6 secs, remote host 127.0.0.1:31200) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:12.298-0400 m31202| 2015-07-09T14:19:12.298-0400 W NETWORK [ReplExecNetThread-1] Failed to connect to 127.0.0.1:31200, reason: errno:61 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:12.298-0400 m31201| 2015-07-09T14:19:12.298-0400 W NETWORK [ReplExecNetThread-0] Failed to connect to 127.0.0.1:31200, reason: errno:61 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:12.299-0400 m31202| 2015-07-09T14:19:12.298-0400 I REPL [ReplicationExecutor] Error in heartbeat request to bs-osx108-8:31200; HostUnreachable Failed attempt to connect to bs-osx108-8:31200; couldn't connect to server bs-osx108-8:31200, connection attempt failed [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:12.299-0400 m31201| 2015-07-09T14:19:12.298-0400 I REPL [ReplicationExecutor] Error in heartbeat request to bs-osx108-8:31200; HostUnreachable Failed attempt to connect to bs-osx108-8:31200; couldn't connect to server bs-osx108-8:31200, connection attempt failed [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:12.299-0400 m31202| 2015-07-09T14:19:12.299-0400 W NETWORK [ReplExecNetThread-1] Failed to connect to 127.0.0.1:31200, reason: errno:61 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:12.300-0400 m31201| 2015-07-09T14:19:12.299-0400 W NETWORK [ReplExecNetThread-3] Failed to connect to 127.0.0.1:31200, reason: errno:61 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:12.300-0400 m31202| 2015-07-09T14:19:12.299-0400 I REPL [ReplicationExecutor] Error in heartbeat request to bs-osx108-8:31200; HostUnreachable Failed attempt to connect to bs-osx108-8:31200; couldn't connect to server bs-osx108-8:31200, connection attempt failed [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:12.300-0400 m31201| 2015-07-09T14:19:12.299-0400 I REPL [ReplicationExecutor] Error in heartbeat request to bs-osx108-8:31200; HostUnreachable Failed attempt to connect to bs-osx108-8:31200; couldn't connect to server bs-osx108-8:31200, connection attempt failed [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:12.300-0400 m31202| 2015-07-09T14:19:12.300-0400 W NETWORK [ReplExecNetThread-1] Failed to connect to 127.0.0.1:31200, reason: errno:61 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:12.301-0400 m31201| 2015-07-09T14:19:12.300-0400 W NETWORK [ReplExecNetThread-0] Failed to connect to 127.0.0.1:31200, reason: errno:61 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:12.301-0400 m31202| 2015-07-09T14:19:12.300-0400 I REPL [ReplicationExecutor] Error in heartbeat request to bs-osx108-8:31200; HostUnreachable Failed attempt to connect to bs-osx108-8:31200; couldn't connect to server bs-osx108-8:31200, connection attempt failed [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:12.301-0400 m31202| 2015-07-09T14:19:12.300-0400 I REPL [ReplicationExecutor] Standing for election [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:12.302-0400 m31201| 2015-07-09T14:19:12.300-0400 I REPL [ReplicationExecutor] Error in heartbeat request to bs-osx108-8:31200; HostUnreachable Failed attempt to connect to bs-osx108-8:31200; couldn't connect to server bs-osx108-8:31200, connection attempt failed [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:12.302-0400 m31201| 2015-07-09T14:19:12.301-0400 I REPL [ReplicationExecutor] Standing for election [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:12.302-0400 m31202| 2015-07-09T14:19:12.302-0400 I REPL [ReplicationExecutor] possible election tie; sleeping 207ms until 2015-07-09T14:19:12.509-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:12.303-0400 m31201| 2015-07-09T14:19:12.302-0400 I REPL [ReplicationExecutor] possible election tie; sleeping 232ms until 2015-07-09T14:19:12.534-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:12.496-0400 2015-07-09T14:19:12.496-0400 I - [main] shell: stopped mongo program on port 31200 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:12.497-0400 ReplSetTest stop *** Mongod in port 31200 shutdown with code (0) *** [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:12.497-0400 ReplSetTest n: 1 ports: [ 31200, 31201, 31202 ] 31201 number [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:12.497-0400 ReplSetTest stop *** Shutting down mongod in port 31201 *** [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:12.497-0400 m31201| 2015-07-09T14:19:12.497-0400 I CONTROL [signalProcessingThread] got signal 15 (Terminated: 15), will terminate after current cmd ends [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:12.498-0400 m31201| 2015-07-09T14:19:12.497-0400 I REPL [signalProcessingThread] Stopping replication applier threads [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:12.510-0400 m31202| 2015-07-09T14:19:12.510-0400 I REPL [ReplicationExecutor] Standing for election [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:12.511-0400 m31201| 2015-07-09T14:19:12.510-0400 I STORAGE [conn5] got request after shutdown() [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:12.511-0400 m31202| 2015-07-09T14:19:12.511-0400 I REPL [ReplicationExecutor] not electing self, we could not contact enough voting members [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:12.666-0400 m31201| 2015-07-09T14:19:12.666-0400 I STORAGE [WiredTigerRecordStoreThread for local.oplog.rs] shutting down [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:13.321-0400 m31201| 2015-07-09T14:19:13.320-0400 I CONTROL [signalProcessingThread] now exiting [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:13.321-0400 m31201| 2015-07-09T14:19:13.321-0400 I NETWORK [signalProcessingThread] shutdown: going to close listening sockets... [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:13.321-0400 m31201| 2015-07-09T14:19:13.321-0400 I NETWORK [signalProcessingThread] closing listening socket: 21 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:13.322-0400 m31201| 2015-07-09T14:19:13.321-0400 I NETWORK [signalProcessingThread] closing listening socket: 22 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:13.322-0400 m31201| 2015-07-09T14:19:13.321-0400 I NETWORK [signalProcessingThread] closing listening socket: 23 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:13.322-0400 m31201| 2015-07-09T14:19:13.321-0400 I NETWORK [signalProcessingThread] removing socket file: /tmp/mongodb-31201.sock [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:13.322-0400 m31201| 2015-07-09T14:19:13.321-0400 I NETWORK [signalProcessingThread] shutdown: going to flush diaglog... [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:13.323-0400 m31201| 2015-07-09T14:19:13.321-0400 I NETWORK [signalProcessingThread] shutdown: going to close sockets... [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:13.323-0400 m31201| 2015-07-09T14:19:13.321-0400 I STORAGE [signalProcessingThread] WiredTigerKVEngine shutting down [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:13.323-0400 m31201| 2015-07-09T14:19:13.321-0400 I NETWORK [conn1] end connection 127.0.0.1:62499 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:13.323-0400 m31202| 2015-07-09T14:19:13.321-0400 I NETWORK [conn5] end connection 127.0.0.1:62513 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:13.324-0400 m31201| 2015-07-09T14:19:13.322-0400 I NETWORK [conn6] end connection 127.0.0.1:62520 (1 connection now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:13.422-0400 m31201| 2015-07-09T14:19:13.422-0400 I STORAGE [signalProcessingThread] shutdown: removing fs lock... [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:13.423-0400 m31201| 2015-07-09T14:19:13.422-0400 I CONTROL [signalProcessingThread] dbexit: rc: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:13.498-0400 2015-07-09T14:19:13.498-0400 I - [main] shell: stopped mongo program on port 31201 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:13.498-0400 ReplSetTest stop *** Mongod in port 31201 shutdown with code (0) *** [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:13.499-0400 ReplSetTest n: 2 ports: [ 31200, 31201, 31202 ] 31202 number [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:13.499-0400 ReplSetTest stop *** Shutting down mongod in port 31202 *** [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:13.499-0400 m31202| 2015-07-09T14:19:13.498-0400 I CONTROL [signalProcessingThread] got signal 15 (Terminated: 15), will terminate after current cmd ends [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:13.499-0400 m31202| 2015-07-09T14:19:13.498-0400 I REPL [signalProcessingThread] Stopping replication applier threads [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:13.782-0400 m31202| 2015-07-09T14:19:13.782-0400 I STORAGE [WiredTigerRecordStoreThread for local.oplog.rs] shutting down [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:13.804-0400 2015-07-09T14:19:13.803-0400 I NETWORK [ReplicaSetMonitorWatcher] Socket closed remotely, no longer connected (idle 10 secs, remote host 127.0.0.1:31100) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:13.805-0400 2015-07-09T14:19:13.805-0400 W NETWORK [ReplicaSetMonitorWatcher] Failed to connect to 127.0.0.1:31100, reason: errno:61 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:13.811-0400 2015-07-09T14:19:13.810-0400 I NETWORK [ReplicaSetMonitorWatcher] Socket closed remotely, no longer connected (idle 10 secs, remote host 127.0.0.1:31101) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:13.812-0400 2015-07-09T14:19:13.812-0400 W NETWORK [ReplicaSetMonitorWatcher] Failed to connect to 127.0.0.1:31101, reason: errno:61 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:13.812-0400 2015-07-09T14:19:13.812-0400 I NETWORK [ReplicaSetMonitorWatcher] Socket closed remotely, no longer connected (idle 10 secs, remote host 127.0.0.1:31102) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:13.814-0400 2015-07-09T14:19:13.813-0400 W NETWORK [ReplicaSetMonitorWatcher] Failed to connect to 127.0.0.1:31102, reason: errno:61 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:13.814-0400 2015-07-09T14:19:13.813-0400 W NETWORK [ReplicaSetMonitorWatcher] No primary detected for set test-rs0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:13.814-0400 2015-07-09T14:19:13.813-0400 I NETWORK [ReplicaSetMonitorWatcher] All nodes for set test-rs0 are down. This has happened for 1 checks in a row. Polling will stop after 29 more failed checks [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:13.815-0400 2015-07-09T14:19:13.814-0400 I NETWORK [ReplicaSetMonitorWatcher] Socket closed remotely, no longer connected (idle 10 secs, remote host 127.0.0.1:31200) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:13.816-0400 2015-07-09T14:19:13.816-0400 W NETWORK [ReplicaSetMonitorWatcher] Failed to connect to 127.0.0.1:31200, reason: errno:61 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:13.816-0400 2015-07-09T14:19:13.816-0400 I NETWORK [ReplicaSetMonitorWatcher] Socket closed remotely, no longer connected (idle 10 secs, remote host 127.0.0.1:31201) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:13.817-0400 2015-07-09T14:19:13.817-0400 W NETWORK [ReplicaSetMonitorWatcher] Failed to connect to 127.0.0.1:31201, reason: errno:61 Connection refused [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:13.818-0400 m31202| 2015-07-09T14:19:13.818-0400 I STORAGE [conn9] got request after shutdown() [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:13.820-0400 2015-07-09T14:19:13.820-0400 I NETWORK [ReplicaSetMonitorWatcher] Detected bad connection created at 1436464542786411 microSec, clearing pool for bs-osx108-8:31202 of 0 connections [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:13.821-0400 2015-07-09T14:19:13.820-0400 W NETWORK [ReplicaSetMonitorWatcher] No primary detected for set test-rs1 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:13.821-0400 2015-07-09T14:19:13.820-0400 I NETWORK [ReplicaSetMonitorWatcher] All nodes for set test-rs1 are down. This has happened for 1 checks in a row. Polling will stop after 29 more failed checks [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:14.328-0400 m31202| 2015-07-09T14:19:14.327-0400 I CONTROL [signalProcessingThread] now exiting [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:14.328-0400 m31202| 2015-07-09T14:19:14.327-0400 I NETWORK [signalProcessingThread] shutdown: going to close listening sockets... [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:14.328-0400 m31202| 2015-07-09T14:19:14.327-0400 I NETWORK [signalProcessingThread] closing listening socket: 24 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:14.329-0400 m31202| 2015-07-09T14:19:14.327-0400 I NETWORK [signalProcessingThread] closing listening socket: 25 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:14.329-0400 m31202| 2015-07-09T14:19:14.328-0400 I NETWORK [signalProcessingThread] closing listening socket: 26 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:14.329-0400 m31202| 2015-07-09T14:19:14.328-0400 I NETWORK [signalProcessingThread] removing socket file: /tmp/mongodb-31202.sock [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:14.329-0400 m31202| 2015-07-09T14:19:14.328-0400 I NETWORK [signalProcessingThread] shutdown: going to flush diaglog... [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:14.329-0400 m31202| 2015-07-09T14:19:14.328-0400 I NETWORK [signalProcessingThread] shutdown: going to close sockets... [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:14.330-0400 m31202| 2015-07-09T14:19:14.328-0400 I STORAGE [signalProcessingThread] WiredTigerKVEngine shutting down [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:14.330-0400 m31202| 2015-07-09T14:19:14.328-0400 I NETWORK [conn1] end connection 127.0.0.1:62501 (0 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:14.429-0400 m31202| 2015-07-09T14:19:14.429-0400 I STORAGE [signalProcessingThread] shutdown: removing fs lock... [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:14.429-0400 m31202| 2015-07-09T14:19:14.429-0400 I CONTROL [signalProcessingThread] dbexit: rc: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:14.500-0400 2015-07-09T14:19:14.499-0400 I - [main] shell: stopped mongo program on port 31202 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:14.500-0400 ReplSetTest stop *** Mongod in port 31202 shutdown with code (0) *** [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:14.500-0400 ReplSetTest stopSet deleting all dbpaths [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:14.509-0400 ReplSetTest stopSet *** Shut down repl set - test worked **** [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:14.509-0400 ReplSetTest n: 0 ports: [ 29000 ] 29000 number [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:14.509-0400 ReplSetTest stop *** Shutting down mongod in port 29000 *** [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:14.509-0400 m29000| 2015-07-09T14:19:14.509-0400 I CONTROL [signalProcessingThread] got signal 15 (Terminated: 15), will terminate after current cmd ends [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:14.510-0400 m29000| 2015-07-09T14:19:14.509-0400 I REPL [signalProcessingThread] Stopping replication applier threads [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:14.701-0400 m29000| 2015-07-09T14:19:14.701-0400 I CONTROL [signalProcessingThread] now exiting [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:14.702-0400 m29000| 2015-07-09T14:19:14.701-0400 I NETWORK [signalProcessingThread] shutdown: going to close listening sockets... [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:14.702-0400 m29000| 2015-07-09T14:19:14.701-0400 I NETWORK [signalProcessingThread] closing listening socket: 32 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:14.702-0400 m29000| 2015-07-09T14:19:14.701-0400 I NETWORK [signalProcessingThread] closing listening socket: 33 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:14.702-0400 m29000| 2015-07-09T14:19:14.701-0400 I NETWORK [signalProcessingThread] closing listening socket: 34 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:14.702-0400 m29000| 2015-07-09T14:19:14.702-0400 I NETWORK [signalProcessingThread] removing socket file: /tmp/mongodb-29000.sock [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:14.703-0400 m29000| 2015-07-09T14:19:14.702-0400 I NETWORK [signalProcessingThread] shutdown: going to flush diaglog... [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:14.703-0400 m29000| 2015-07-09T14:19:14.702-0400 I NETWORK [signalProcessingThread] shutdown: going to close sockets... [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:14.703-0400 m29000| 2015-07-09T14:19:14.702-0400 I STORAGE [signalProcessingThread] WiredTigerKVEngine shutting down [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:14.704-0400 m29000| 2015-07-09T14:19:14.702-0400 I NETWORK [conn2] end connection 127.0.0.1:62531 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:14.704-0400 m29000| 2015-07-09T14:19:14.703-0400 I NETWORK [conn35] end connection 127.0.0.1:62690 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:14.704-0400 m29000| 2015-07-09T14:19:14.703-0400 I NETWORK [conn1] end connection 127.0.0.1:62525 (2 connections now open) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:14.841-0400 m29000| 2015-07-09T14:19:14.841-0400 I STORAGE [WiredTigerRecordStoreThread for local.oplog.rs] shutting down [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:14.888-0400 m29000| 2015-07-09T14:19:14.887-0400 I STORAGE [signalProcessingThread] shutdown: removing fs lock... [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:14.889-0400 m29000| 2015-07-09T14:19:14.889-0400 I CONTROL [signalProcessingThread] dbexit: rc: 0 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:15.511-0400 2015-07-09T14:19:15.510-0400 I - [main] shell: stopped mongo program on port 29000 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:15.511-0400 ReplSetTest stop *** Mongod in port 29000 shutdown with code (0) *** [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:15.511-0400 ReplSetTest stopSet deleting all dbpaths [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:15.514-0400 ReplSetTest stopSet *** Shut down repl set - test worked **** [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:15.514-0400 *** ShardingTest test completed successfully in 1439.402 seconds *** [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:15.517-0400 2015-07-09T14:19:15.516-0400 E QUERY [main] Error: 4 threads threw 2 different exceptions: [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:15.517-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:15.517-0400 3 threads threw [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:15.517-0400 Error: [632] != [1000] are not equal : undefined [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:15.517-0400 at quietlyDoAssert (jstests/concurrency/fsm_libs/assert.js:53:15) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:15.517-0400 at Function.assert.eq (src/mongo/shell/assert.js:38:5) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:15.517-0400 at wrapAssertFn (jstests/concurrency/fsm_libs/assert.js:60:16) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:15.517-0400 at Function.assertWithLevel.(anonymous function) [as eq] (jstests/concurrency/fsm_libs/assert.js:99:13) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:15.518-0400 at Object.query (jstests/concurrency/fsm_workloads/agg_base.js:44:31) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:15.518-0400 at Object.runFSM [as run] (jstests/concurrency/fsm_libs/fsm.js:19:16) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:15.518-0400 at :8:13 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:15.518-0400 at Object.main (jstests/concurrency/fsm_libs/worker_thread.js:81:17) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:15.518-0400 at ____MongoToV8_newFunction_temp (:5:25) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:15.518-0400 at ____MongoToV8_newFunction_temp (:3:24) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:15.519-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:15.519-0400 1 thread threw [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:15.519-0400 Error: [182] != [1000] are not equal : undefined [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:15.519-0400 at quietlyDoAssert (jstests/concurrency/fsm_libs/assert.js:53:15) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:15.519-0400 at Function.assert.eq (src/mongo/shell/assert.js:38:5) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:15.519-0400 at wrapAssertFn (jstests/concurrency/fsm_libs/assert.js:60:16) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:15.519-0400 at Function.assertWithLevel.(anonymous function) [as eq] (jstests/concurrency/fsm_libs/assert.js:99:13) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:15.519-0400 at Object.query (jstests/concurrency/fsm_workloads/agg_base.js:44:31) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:15.520-0400 at Object.runFSM [as run] (jstests/concurrency/fsm_libs/fsm.js:19:16) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:15.520-0400 at :8:13 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:15.520-0400 at Object.main (jstests/concurrency/fsm_libs/worker_thread.js:81:17) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:15.520-0400 at ____MongoToV8_newFunction_temp (:5:25) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:15.520-0400 at ____MongoToV8_newFunction_temp (:3:24) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:15.520-0400 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:15.520-0400 at throwError (jstests/concurrency/fsm_libs/runner.js:268:23) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:15.520-0400 at jstests/concurrency/fsm_libs/runner.js:396:17 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:15.521-0400 at Array.forEach (native) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:15.521-0400 at runWorkloads (jstests/concurrency/fsm_libs/runner.js:351:22) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:15.521-0400 at serial (jstests/concurrency/fsm_libs/runner.js:415:13) [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:15.521-0400 at jstests/concurrency/fsm_all_sharded_replication.js:68:1 at jstests/concurrency/fsm_libs/runner.js:280 [js_test:fsm_all_sharded_replication] 2015-07-09T14:19:15.521-0400 failed to load: jstests/concurrency/fsm_all_sharded_replication.js